Merge branch 'storageareas' of github.com:ConnorTechnology/ZoneMinder into storageareas

This commit is contained in:
Isaac Connor 2017-08-09 12:51:06 -04:00
commit 3bf4bf8c0e
6 changed files with 122 additions and 87 deletions

View File

@ -63,6 +63,7 @@ FfmpegCamera::FfmpegCamera( int p_id, const std::string &p_path, const std::stri
mReopenThread = 0;
videoStore = NULL;
video_last_pts = 0;
have_video_keyframe = false;
#if HAVE_LIBSWSCALE
mConvertContext = NULL;
@ -640,6 +641,7 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event
delete videoStore;
videoStore = NULL;
have_video_keyframe = false;
monitor->SetVideoWriterEventId( 0 );
} // end if videoStore
@ -692,6 +694,7 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event
Debug(2, "Writing queued packet stream: %d KEY %d, remaining (%d)", avp->stream_index, avp->flags & AV_PKT_FLAG_KEY, packetqueue.size() );
if ( avp->stream_index == mVideoStreamId ) {
ret = videoStore->writeVideoFramePacket( avp );
have_video_keyframe = true;
} else if ( avp->stream_index == mAudioStreamId ) {
ret = videoStore->writeAudioFramePacket( avp );
} else {
@ -712,6 +715,7 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event
Info("Deleting videoStore instance");
delete videoStore;
videoStore = NULL;
have_video_keyframe = false;
monitor->SetVideoWriterEventId( 0 );
}
@ -747,13 +751,15 @@ else if ( packet.pts && video_last_pts > packet.pts ) {
} // end if recording or not
if ( packet.stream_index == mVideoStreamId ) {
if ( videoStore ) {
if ( videoStore && ( have_video_keyframe || key_frame ) ) {
//Write the packet to our video store
int ret = videoStore->writeVideoFramePacket( &packet );
if ( ret < 0 ) { //Less than zero and we skipped a frame
zm_av_packet_unref( &packet );
return 0;
}
have_video_keyframe = true;
}
Debug(4, "about to decode video" );
@ -817,6 +823,7 @@ else if ( packet.pts && video_last_pts > packet.pts ) {
} else if ( packet.stream_index == mAudioStreamId ) { //FIXME best way to copy all other streams
if ( videoStore ) {
if ( record_audio ) {
if ( have_video_keyframe ) {
Debug(3, "Recording audio packet streamindex(%d) packetstreamindex(%d)", mAudioStreamId, packet.stream_index );
//Write the packet to our video store
//FIXME no relevance of last key frame
@ -826,6 +833,9 @@ else if ( packet.pts && video_last_pts > packet.pts ) {
zm_av_packet_unref( &packet );
return 0;
}
} else {
Debug(3, "Not recording audio because we don't have a bvideo keyframe yet");
}
} else {
Debug(4, "Not doing recording of audio packet" );
}

View File

@ -78,6 +78,7 @@ class FfmpegCamera : public Camera {
char oldDirectory[4096];
unsigned int old_event_id;
zm_packetqueue packetqueue;
bool have_video_keyframe;
#if HAVE_LIBSWSCALE
struct SwsContext *mConvertContext;

View File

@ -44,7 +44,7 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
video_input_context = avcodec_alloc_context3( NULL );
avcodec_parameters_to_context( video_input_context, video_input_stream->codecpar );
zm_dump_codecpar( video_input_stream->codecpar );
//zm_dump_codecpar( video_input_stream->codecpar );
#else
video_input_context = video_input_stream->codec;
#endif
@ -285,7 +285,7 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
zm_dump_stream_format( oc, 1, 0, 1 );
AVDictionary * opts = NULL;
av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov", 0);
//av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov", 0);
//av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov", 0);
//av_dict_set(&opts, "movflags", "frag_keyframe+empty_moov+default_base_moof", 0);
if ((ret = avformat_write_header( oc, &opts )) < 0) {
@ -308,55 +308,58 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
video_last_dts = 0;
audio_last_pts = 0;
audio_last_dts = 0;
video_previous_pts = 0;
video_previous_dts = 0;
audio_previous_pts = 0;
audio_previous_dts = 0;
video_next_pts = 0;
video_next_dts = 0;
audio_next_pts = 0;
audio_next_dts = 0;
} // VideoStore::VideoStore
VideoStore::~VideoStore(){
if ( audio_output_codec ) {
// Do we need to flush the outputs? I have no idea.
// The codec queues data. We need to send a flush command and output whatever we get. Failures are not fatal.
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
int64_t size;
while ( 1 ) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
ret = avcodec_send_frame( audio_output_context, NULL );
if ( ret < 0 ) {
Error("Error sending flush to encoder (%d) (%s)", ret, av_err2str( ret ));
break;
}
ret = avcodec_receive_packet( audio_output_context, &pkt );
if ( ret < 0 ) {
Error("ERror encoding audio while flushing (%d) (%s)", ret, av_err2str( ret ) );
Error("ERror encoding audio while flushing (%d) (%s)", ret, av_err2str( ret ));
break;
}
#else
int got_packet = 0;
ret = avcodec_encode_audio2( audio_output_context, &pkt, NULL, &got_packet );
if ( ret < 0 ) {
Error("ERror encoding audio while flushing (%d) (%s)", ret, av_err2str( ret ) );
Error("ERror encoding audio while flushing (%d) (%s)", ret, av_err2str( ret ));
break;
}
Debug(1, "Have audio encoder, need to flush it's output" );
Debug(1, "Have audio encoder, need to flush it's output");
if ( ! got_packet ) {
break;
}
#endif
size += pkt.size;
Debug(2, "writing flushed packet pts(%d) dts(%d) duration(%d)", pkt.pts, pkt.dts, pkt.duration );
if ( pkt.pts != AV_NOPTS_VALUE )
pkt.pts = av_rescale_q(pkt.pts, audio_output_context->time_base, audio_output_stream->time_base);
if ( pkt.dts != AV_NOPTS_VALUE )
pkt.dts = av_rescale_q(pkt.dts, audio_output_context->time_base, audio_output_stream->time_base);
Debug(2, "writing flushed packet pts(%d) dts(%d) duration(%d)", pkt.pts, pkt.dts, pkt.duration);
pkt.pts = audio_next_pts;
pkt.dts = audio_next_dts;
if ( pkt.duration > 0 )
pkt.duration = av_rescale_q(pkt.duration, audio_output_context->time_base, audio_output_stream->time_base);
Debug(2, "writing flushed packet pts(%d) dts(%d) duration(%d)", pkt.pts, pkt.dts, pkt.duration );
audio_next_pts += pkt.duration;
audio_next_dts += pkt.duration;
Debug(2, "writing flushed packet pts(%d) dts(%d) duration(%d)", pkt.pts, pkt.dts, pkt.duration);
pkt.stream_index = audio_output_stream->index;
av_interleaved_write_frame( oc, &pkt );
zm_av_packet_unref( &pkt );
} // while 1
} // while have buffered frames
} // end if audio_output_codec
// Flush Queues
@ -513,6 +516,15 @@ bool VideoStore::setup_resampler() {
return false;
}
Debug(1, "Audio output bit_rate (%d) sample_rate(%d) channels(%d) fmt(%d) layout(%d) frame_size(%d)",
audio_output_context->bit_rate,
audio_output_context->sample_rate,
audio_output_context->channels,
audio_output_context->sample_fmt,
audio_output_context->channel_layout,
audio_output_context->frame_size
);
/** Create a new frame to store the audio samples. */
if ( !(input_frame = zm_av_frame_alloc()) ) {
Error("Could not allocate input frame");
@ -635,8 +647,22 @@ void VideoStore::dumpPacket( AVPacket *pkt ){
int VideoStore::writeVideoFramePacket( AVPacket *ipkt ) {
av_init_packet(&opkt);
int duration;
opkt.pts = video_next_pts;
opkt.dts = video_next_dts;
int duration;
if ( video_last_pts && ( ipkt->duration == AV_NOPTS_VALUE || ! ipkt->duration ) ) {
duration = av_rescale_q( ipkt->pts - video_last_pts, video_input_stream->time_base, video_output_stream->time_base);
// Video packets don't really have a duration. Audio does.
//opkt.duration = av_rescale_q(duration, video_input_stream->time_base, video_output_stream->time_base);
opkt.duration = 0;
} else {
duration = opkt.duration = av_rescale_q(ipkt->duration, video_input_stream->time_base, video_output_stream->time_base);
}
video_last_pts = ipkt->pts;
video_last_dts = ipkt->dts;
#if 0
//Scale the PTS of the outgoing packet to be the correct time base
if ( ipkt->pts != AV_NOPTS_VALUE ) {
@ -646,63 +672,58 @@ int VideoStore::writeVideoFramePacket( AVPacket *ipkt ) {
Debug(2, "Starting video video_last_pts will become (%d)", ipkt->pts );
} else {
if ( ipkt->pts < video_last_pts ) {
Debug(1, "Resetting video_last_pts from (%d) to (%d)", video_last_pts, ipkt->pts );
Debug(1, "Resetting video_last_pts from (%d) to (%d)", video_last_pts, ipkt->pts);
// wrap around, need to figure out the distance FIXME having this wrong should cause a jump, but then play ok?
opkt.pts = video_previous_pts + av_rescale_q( ipkt->pts, video_input_stream->time_base, video_output_stream->time_base);
opkt.pts = video_next_pts + av_rescale_q( ipkt->pts, video_input_stream->time_base, video_output_stream->time_base);
} else {
opkt.pts = video_previous_pts + av_rescale_q( ipkt->pts - video_last_pts, video_input_stream->time_base, video_output_stream->time_base);
opkt.pts = video_next_pts + av_rescale_q( ipkt->pts - video_last_pts, video_input_stream->time_base, video_output_stream->time_base);
}
}
Debug(3, "opkt.pts = %d from ipkt->pts(%d) - last_pts(%d)", opkt.pts, ipkt->pts, video_last_pts );
duration = ipkt->pts - video_last_pts;
Debug(3, "opkt.pts = %d from ipkt->pts(%d) - last_pts(%d)", opkt.pts, ipkt->pts, video_last_pts);
video_last_pts = ipkt->pts;
} else {
Debug(3, "opkt.pts = undef");
opkt.pts = AV_NOPTS_VALUE;
}
//Scale the DTS of the outgoing packet to be the correct time base
// Just because the input stream wraps, doesn't mean the output needs to. Really, if we are limiting ourselves to 10min segments I can't imagine every wrapping in the output. So need to handle input wrap, without causing output wrap.
if ( ! video_last_dts ) {
// This is the first packet.
opkt.dts = 0;
Debug(1, "Starting video video_last_dts will become (%d)", ipkt->dts );
Debug(1, "Starting video video_last_dts will become (%lu)", ipkt->dts );
video_last_dts = ipkt->dts;
} else {
//Scale the DTS of the outgoing packet to be the correct time base
if ( ipkt->dts == AV_NOPTS_VALUE ) {
// why are we using cur_dts instead of packet.dts? I think cur_dts is in AV_TIME_BASE_Q, but ipkt.dts is in video_input_stream->time_base
if ( video_input_stream->cur_dts < video_last_dts ) {
Debug(1, "Resetting video_last_dts from (%d) to (%d) p.dts was (%d)", video_last_dts, video_input_stream->cur_dts, ipkt->dts );
opkt.dts = video_previous_dts + av_rescale_q(video_input_stream->cur_dts, AV_TIME_BASE_Q, video_output_stream->time_base);
opkt.dts = video_next_dts + av_rescale_q(video_input_stream->cur_dts, AV_TIME_BASE_Q, video_output_stream->time_base);
} else {
opkt.dts = video_previous_dts + av_rescale_q(video_input_stream->cur_dts - video_last_dts, AV_TIME_BASE_Q, video_output_stream->time_base);
opkt.dts = video_next_dts + av_rescale_q(video_input_stream->cur_dts - video_last_dts, AV_TIME_BASE_Q, video_output_stream->time_base);
}
Debug(3, "opkt.dts = %d from video_input_stream->cur_dts(%d) - previus_dts(%d)", opkt.dts, video_input_stream->cur_dts, video_last_dts );
video_last_dts = video_input_stream->cur_dts;
} else {
if ( ipkt->dts < video_last_dts ) {
Debug(1, "Resetting video_last_dts from (%d) to (%d)", video_last_dts, ipkt->dts );
opkt.dts = video_previous_dts + av_rescale_q( ipkt->dts, video_input_stream->time_base, video_output_stream->time_base);
opkt.dts = video_next_dts + av_rescale_q( ipkt->dts, video_input_stream->time_base, video_output_stream->time_base);
} else {
opkt.dts = video_previous_dts + av_rescale_q( ipkt->dts - video_last_dts, video_input_stream->time_base, video_output_stream->time_base);
opkt.dts = video_next_dts + av_rescale_q( ipkt->dts - video_last_dts, video_input_stream->time_base, video_output_stream->time_base);
}
Debug(3, "opkt.dts = %d from ipkt.dts(%d) - previus_dts(%d)", opkt.dts, ipkt->dts, video_last_dts );
video_last_dts = ipkt->dts;
}
}
#endif
if ( opkt.dts > opkt.pts ) {
Debug( 1, "opkt.dts(%d) must be <= opkt.pts(%d). Decompression must happen before presentation.", opkt.dts, opkt.pts );
opkt.dts = opkt.pts;
}
if ( ipkt->duration == AV_NOPTS_VALUE ) {
opkt.duration = av_rescale_q( duration, video_input_stream->time_base, video_output_stream->time_base);
} else {
opkt.duration = av_rescale_q(ipkt->duration, video_input_stream->time_base, video_output_stream->time_base);
}
opkt.flags = ipkt->flags;
opkt.pos=-1;
int keyframe = opkt.flags & AV_PKT_FLAG_KEY;
opkt.pos = -1;
opkt.data = ipkt->data;
opkt.size = ipkt->size;
@ -712,21 +733,21 @@ int VideoStore::writeVideoFramePacket( AVPacket *ipkt ) {
AVPacket safepkt;
memcpy( &safepkt, &opkt, sizeof(AVPacket) );
Debug(1, "writing video packet pts(%d) dts(%d) duration(%d)", opkt.pts, opkt.dts, opkt.duration );
if ((opkt.data == NULL)||(opkt.size < 1)) {
Debug(1, "writing video packet keyframe(%d) pts(%d) dts(%d) duration(%d) ipkt.duration(%d)", keyframe, opkt.pts, opkt.dts, duration, ipkt->duration);
if ( (opkt.data == NULL) || (opkt.size < 1) ) {
Warning("%s:%d: Mangled AVPacket: discarding frame", __FILE__, __LINE__ );
dumpPacket( ipkt);
dumpPacket(ipkt);
dumpPacket(&opkt);
} else if ((video_previous_dts > 0) && (video_previous_dts > opkt.dts)) {
Warning("%s:%d: DTS out of order: %lld \u226E %lld; discarding frame", __FILE__, __LINE__, video_previous_dts, opkt.dts);
video_previous_dts = opkt.dts;
} else if ( (video_next_dts > 0) && (video_next_dts > opkt.dts) ) {
Warning("%s:%d: DTS out of order: %lld \u226E %lld; discarding frame", __FILE__, __LINE__, video_next_dts, opkt.dts);
video_next_dts = opkt.dts;
dumpPacket(&opkt);
} else {
video_previous_dts = opkt.dts; // Unsure if av_interleaved_write_frame() clobbers opkt.dts when out of order, so storing in advance
video_previous_pts = opkt.pts;
video_next_dts = opkt.dts + duration; // Unsure if av_interleaved_write_frame() clobbers opkt.dts when out of order, so storing in advance
video_next_pts = opkt.pts + duration;
ret = av_interleaved_write_frame(oc, &opkt);
if ( ret < 0 ) {
// There's nothing we can really do if the frame is rejected, just drop it and get on with the next
@ -873,18 +894,20 @@ int VideoStore::writeAudioFramePacket( AVPacket *ipkt ) {
}
// PTS is difficult, because of the buffering of the audio packets in the resampler. So we have to do it once we actually have a packet...
// audio_last_pts is the pts of ipkt, audio_next_pts is the last pts of the output
//Scale the PTS of the outgoing packet to be the correct time base
#if 0
if ( ipkt->pts != AV_NOPTS_VALUE ) {
if ( ! audio_last_pts ) {
opkt.pts = 0;
Debug(1, "No audio_last_pts");
} else {
if ( audio_last_pts > ipkt->pts ) {
Debug(1, "Resetting audeo_start_pts from (%d) to (%d)", audio_last_pts, ipkt->pts );
opkt.pts = audio_previous_pts + av_rescale_q(ipkt->pts, audio_input_stream->time_base, audio_output_stream->time_base);
Debug(1, "Resetting audio_start_pts from (%d) to (%d)", audio_last_pts, ipkt->pts );
opkt.pts = audio_next_pts + av_rescale_q(ipkt->pts, audio_input_stream->time_base, audio_output_stream->time_base);
} else {
opkt.pts = audio_previous_pts + av_rescale_q(ipkt->pts - audio_last_pts, audio_input_stream->time_base, audio_output_stream->time_base);
opkt.pts = audio_next_pts + av_rescale_q(ipkt->pts - audio_last_pts, audio_input_stream->time_base, audio_output_stream->time_base);
}
Debug(2, "audio opkt.pts = %d from ipkt->pts(%d) - last_pts(%d)", opkt.pts, ipkt->pts, audio_last_pts );
}
@ -893,33 +916,35 @@ int VideoStore::writeAudioFramePacket( AVPacket *ipkt ) {
Debug(2, "opkt.pts = undef");
opkt.pts = AV_NOPTS_VALUE;
}
#else
opkt.pts = audio_next_pts;
#endif
//Scale the DTS of the outgoing packet to be the correct time base
if ( ! audio_last_dts ) {
opkt.dts = 0;
} else {
if( ipkt->dts == AV_NOPTS_VALUE ) {
opkt.dts = audio_next_dts;
#if 0
if ( ipkt->dts == AV_NOPTS_VALUE ) {
// So if the input has no dts assigned... still need an output dts... so we use cur_dts?
if ( audio_last_dts > audio_input_stream->cur_dts ) {
if ( audio_last_dts >= audio_input_stream->cur_dts ) {
Debug(1, "Resetting audio_last_dts from (%d) to cur_dts (%d)", audio_last_dts, audio_input_stream->cur_dts );
opkt.dts = audio_previous_dts + av_rescale_q( audio_input_stream->cur_dts, AV_TIME_BASE_Q, audio_output_stream->time_base);
opkt.dts = audio_next_dts + av_rescale_q( audio_input_stream->cur_dts, AV_TIME_BASE_Q, audio_output_stream->time_base);
} else {
opkt.dts = audio_previous_dts + av_rescale_q( audio_input_stream->cur_dts - audio_last_dts, AV_TIME_BASE_Q, audio_output_stream->time_base);
opkt.dts = audio_next_dts + av_rescale_q( audio_input_stream->cur_dts - audio_last_dts, AV_TIME_BASE_Q, audio_output_stream->time_base);
}
audio_last_dts = audio_input_stream->cur_dts;
Debug(2, "opkt.dts = %d from video_input_stream->cur_dts(%d) - last_dts(%d)", opkt.dts, audio_input_stream->cur_dts, audio_last_dts );
} else {
if ( audio_last_dts > ipkt->dts ) {
if ( audio_last_dts >= ipkt->dts ) {
Debug(1, "Resetting audio_last_dts from (%d) to (%d)", audio_last_dts, ipkt->dts );
opkt.dts = audio_previous_dts + av_rescale_q(ipkt->dts, audio_input_stream->time_base, audio_output_stream->time_base);
opkt.dts = audio_next_dts + av_rescale_q(ipkt->dts, audio_input_stream->time_base, audio_output_stream->time_base);
} else {
opkt.dts = audio_previous_dts + av_rescale_q(ipkt->dts - audio_last_dts, audio_input_stream->time_base, audio_output_stream->time_base);
opkt.dts = audio_next_dts + av_rescale_q(ipkt->dts - audio_last_dts, audio_input_stream->time_base, audio_output_stream->time_base);
Debug(2, "opkt.dts = %d from previous(%d) + ( ipkt->dts(%d) - last_dts(%d) )", opkt.dts, audio_next_dts, ipkt->dts, audio_last_dts );
}
Debug(2, "opkt.dts = %d from ipkt->dts(%d) - last_dts(%d)", opkt.dts, ipkt->dts, audio_last_dts );
}
}
#endif
audio_last_dts = ipkt->dts;
if ( opkt.dts > opkt.pts ) {
Debug(1,"opkt.dts(%d) must be <= opkt.pts(%d). Decompression must happen before presentation.", opkt.dts, opkt.pts );
@ -933,13 +958,13 @@ int VideoStore::writeAudioFramePacket( AVPacket *ipkt ) {
// pkt.pos: byte position in stream, -1 if unknown
opkt.pos = -1;
opkt.stream_index = audio_output_stream->index;//ipkt->stream_index;
audio_next_dts = opkt.dts + opkt.duration;
audio_next_pts = opkt.pts + opkt.duration;
AVPacket safepkt;
memcpy(&safepkt, &opkt, sizeof(AVPacket));
audio_previous_dts = opkt.dts; // Unsure if av_interleaved_write_frame() clobbers opkt.dts when out of order, so storing in advance
audio_previous_pts = opkt.pts;
ret = av_interleaved_write_frame(oc, &opkt);
if(ret!=0){
if ( ret != 0 ) {
Error("Error writing audio frame packet: %s\n", av_make_error_string(ret).c_str());
dumpPacket(&safepkt);
} else {
@ -948,4 +973,3 @@ int VideoStore::writeAudioFramePacket( AVPacket *ipkt ) {
zm_av_packet_unref(&opkt);
return 0;
} // end int VideoStore::writeAudioFramePacket( AVPacket *ipkt )

View File

@ -61,10 +61,10 @@ AVAudioResampleContext* resample_context;
int64_t audio_last_dts;
// These are for output, should start at zero. We assume they do not wrap because we just aren't going to save files that big.
int64_t video_previous_pts;
int64_t video_previous_dts;
int64_t audio_previous_pts;
int64_t audio_previous_dts;
int64_t video_next_pts;
int64_t video_next_dts;
int64_t audio_next_pts;
int64_t audio_next_dts;
int64_t filter_in_rescale_delta_last;

View File

@ -541,6 +541,18 @@ Warning("Addterm");
zmcControl( $monitor, 'stop' );
}
// If fast deletes are on, then zmaudit will clean everything else up later
// If fast deletes are off and there are lots of events then this step may
// well time out before completing, in which case zmaudit will still tidy up
if ( !ZM_OPT_FAST_DELETE ) {
$markEids = dbFetchAll( 'SELECT Id FROM Events WHERE MonitorId=?', NULL, array($markMid) );
foreach( $markEids as $markEid )
deleteEvent( $markEid );
deletePath( ZM_DIR_EVENTS.'/'.basename($monitor['Name']) );
deletePath( ZM_DIR_EVENTS.'/'.$monitor['Id'] ); // I'm trusting the Id.
} // end if ZM_OPT_FAST_DELETE
// This is the important stuff
dbQuery( 'DELETE FROM Monitors WHERE Id = ?', array($markMid) );
dbQuery( 'DELETE FROM Zones WHERE MonitorId = ?', array($markMid) );
@ -549,18 +561,6 @@ Warning("Addterm");
fixSequences();
// If fast deletes are on, then zmaudit will clean everything else up later
// If fast deletes are off and there are lots of events then this step may
// well time out before completing, in which case zmaudit will still tidy up
if ( !ZM_OPT_FAST_DELETE ) {
// Slight hack, we maybe should load *, but we happen to know that the deleteEvent function uses Id and StartTime.
$markEids = dbFetchAll( 'SELECT Id,StartTime FROM Events WHERE MonitorId=?', NULL, array($markMid) );
foreach( $markEids as $markEid )
deleteEvent( $markEid, $markMid );
deletePath( ZM_DIR_EVENTS.'/'.basename($monitor['Name']) );
deletePath( ZM_DIR_EVENTS.'/'.$monitor['Id'] ); // I'm trusting the Id.
} // end if ZM_OPT_FAST_DELETE
} // end if found the monitor in the db
} // end if canedit this monitor
} // end foreach monitor in MarkMid

View File

@ -194,7 +194,7 @@ isset($view) || $view = NULL;
isset($request) || $request = NULL;
isset($action) || $action = NULL;
if ( ZM_ENABLE_CSRF_MAGIC && $action != 'login' && $view != 'view_video' && $request != 'control' ) {
if ( ZM_ENABLE_CSRF_MAGIC && $action != 'login' && $view != 'view_video' && $view != 'video' && $request != 'control' ) {
require_once( 'includes/csrf/csrf-magic.php' );
Logger::Debug("Calling csrf_check with the following values: \$request = \"$request\", \$view = \"$view\", \$action = \"$action\"");
csrf_check();