Merge branch 'fix_zms_from_mp4' into h265
This commit is contained in:
commit
0ec391354c
|
@ -22,7 +22,7 @@ if [ "$1" = "configure" ]; then
|
|||
|
||||
if [ "$ZM_DB_HOST" = "localhost" ]; then
|
||||
|
||||
if [ -e "/lib/systemd/system/mysql.service" ] || [ -e "/lib/systemd/system/mariadb.service" ]; then
|
||||
if [ -e "/lib/systemd/system/mysql.service" ] || [ -e "/lib/systemd/system/mariadb.service" ] || [ -e "/etc/init.d/mysql" ]; then
|
||||
# Ensure zoneminder is stopped
|
||||
deb-systemd-invoke stop zoneminder.service || exit $?
|
||||
|
||||
|
@ -68,6 +68,7 @@ if [ "$1" = "configure" ]; then
|
|||
|
||||
# Add any new PTZ control configurations to the database (will not overwrite)
|
||||
zmcamtool.pl --import >/dev/null 2>&1
|
||||
echo "Done Updating; starting ZoneMinder."
|
||||
else
|
||||
echo 'NOTE: MySQL/MariaDB not running; please start mysql and run dpkg-reconfigure zoneminder when it is running.'
|
||||
fi
|
||||
|
@ -78,7 +79,6 @@ if [ "$1" = "configure" ]; then
|
|||
else
|
||||
echo "Not doing database upgrade due to remote db server ($ZM_DB_HOST)."
|
||||
fi
|
||||
echo "Done Updating; starting ZoneMinder."
|
||||
deb-systemd-invoke restart zoneminder.service
|
||||
|
||||
fi
|
||||
|
|
|
@ -67,6 +67,7 @@ my $level = 1;
|
|||
my $monitor_id = 0;
|
||||
my $version;
|
||||
my $force = 0;
|
||||
my $server_id = undef;
|
||||
my $storage_id = undef;
|
||||
|
||||
logInit();
|
||||
|
@ -78,6 +79,7 @@ GetOptions(
|
|||
level =>\$level,
|
||||
'monitor_id=i' =>\$monitor_id,
|
||||
report =>\$report,
|
||||
'server_id=i' =>\$server_id,
|
||||
'storage_id=i' =>\$storage_id,
|
||||
version =>\$version
|
||||
) or pod2usage(-exitstatus => -1);
|
||||
|
@ -181,13 +183,15 @@ MAIN: while( $loop ) {
|
|||
Term();
|
||||
}
|
||||
Info("Auditing Storage Area $Storage_Areas[0]{Id} $Storage_Areas[0]{Name} at $Storage_Areas[0]{Path}");
|
||||
} elsif ( $Config{ZM_SERVER_ID} ) {
|
||||
@Storage_Areas = ZoneMinder::Storage->find( ServerId => $Config{ZM_SERVER_ID} );
|
||||
} elsif ( $server_id ) {
|
||||
@Storage_Areas = ZoneMinder::Storage->find( ServerId => $server_id );
|
||||
if ( ! @Storage_Areas ) {
|
||||
Error("No Storage Area found with ServerId =" . $Config{ZM_SERVER_ID});
|
||||
Error("No Storage Area found with ServerId =" . $server_id);
|
||||
Term();
|
||||
}
|
||||
Info("Auditing All Storage Areas on Server " . $Storage_Areas[0]->Server()->Name());
|
||||
foreach my $Storage ( @Storage_Areas ) {
|
||||
Info('Auditing ' . $Storage->Name() . ' at ' . $Storage->Path() . ' on ' . $Storage->Server()->Name() );
|
||||
}
|
||||
} else {
|
||||
@Storage_Areas = ZoneMinder::Storage->find();
|
||||
Info("Auditing All Storage Areas");
|
||||
|
@ -382,7 +386,7 @@ MAIN: while( $loop ) {
|
|||
} # if USE_DEEP_STORAGE
|
||||
Debug( 'Got '.int(keys(%$fs_events))." filesystem events for monitor $monitor_dir\n" );
|
||||
|
||||
delete_empty_directories($monitor_dir);
|
||||
delete_empty_directories($$Storage{Path}.'/'.$monitor_dir);
|
||||
} # end foreach monitor
|
||||
|
||||
if ( $cleaned ) {
|
||||
|
|
|
@ -713,13 +713,17 @@ sub substituteTags {
|
|||
}
|
||||
} # end if $first_alarm_frame
|
||||
|
||||
if ( $attachments_ref && $Config{ZM_OPT_FFMPEG} ) {
|
||||
if ( $attachments_ref ) {
|
||||
if ( $text =~ s/%EV%//g ) {
|
||||
my ( $format, $path ) = generateVideo($filter, $Event);
|
||||
if ( !$format ) {
|
||||
return undef;
|
||||
if ( $$Event{DefaultVideo} ) {
|
||||
push @$attachments_ref, { type=>'video/mp4', path=>join('/',$Event->Path(), $Event->DefaultVideo()) };
|
||||
} elsif ( $Config{ZM_OPT_FFMPEG} ) {
|
||||
my ( $format, $path ) = generateVideo($filter, $Event);
|
||||
if ( !$format ) {
|
||||
return undef;
|
||||
}
|
||||
push( @$attachments_ref, { type=>"video/$format", path=>$path } );
|
||||
}
|
||||
push( @$attachments_ref, { type=>"video/$format", path=>$path } );
|
||||
}
|
||||
if ( $text =~ s/%EVM%//g ) {
|
||||
my ( $format, $path ) = generateVideo($filter, $Event, 1);
|
||||
|
|
|
@ -539,7 +539,7 @@ Debug(3, "Writing video");
|
|||
if ( score < 0 )
|
||||
score = 0;
|
||||
|
||||
bool db_frame = ( frame_type != BULK ) || ((frames%config.bulk_frame_interval)==0) || !frames;
|
||||
bool db_frame = ( frame_type != BULK ) || (!frames) || ((frames%config.bulk_frame_interval)==0) ;
|
||||
if ( db_frame ) {
|
||||
|
||||
Debug( 1, "Adding frame %d of type \"%s\" to DB", frames, Event::frame_type_names[frame_type] );
|
||||
|
|
|
@ -109,7 +109,10 @@ bool EventStream::loadInitialEventData( uint64_t init_event_id, unsigned int ini
|
|||
bool EventStream::loadEventData(uint64_t event_id) {
|
||||
static char sql[ZM_SQL_MED_BUFSIZ];
|
||||
|
||||
snprintf(sql, sizeof(sql), "SELECT MonitorId, StorageId, Frames, unix_timestamp( StartTime ) AS StartTimestamp, (SELECT max(Delta)-min(Delta) FROM Frames WHERE EventId=Events.Id) AS Duration, DefaultVideo, Scheme FROM Events WHERE Id = %" PRIu64, event_id);
|
||||
snprintf(sql, sizeof(sql), "SELECT MonitorId, StorageId, Frames,"
|
||||
" unix_timestamp( StartTime ) AS StartTimestamp,"
|
||||
" (SELECT max(Delta)-min(Delta) FROM Frames WHERE EventId=Events.Id) AS Duration, DefaultVideo, Scheme"
|
||||
" FROM Events WHERE Id = %" PRIu64, event_id);
|
||||
|
||||
if ( mysql_query(&dbconn, sql) ) {
|
||||
Error("Can't run query: %s", mysql_error(&dbconn));
|
||||
|
@ -161,18 +164,25 @@ bool EventStream::loadEventData(uint64_t event_id) {
|
|||
|
||||
if ( storage_path[0] == '/' )
|
||||
snprintf( event_data->path, sizeof(event_data->path), "%s/%ld/%02d/%02d/%02d/%02d/%02d/%02d",
|
||||
storage_path, event_data->monitor_id, event_time->tm_year-100, event_time->tm_mon+1, event_time->tm_mday, event_time->tm_hour, event_time->tm_min, event_time->tm_sec );
|
||||
storage_path, event_data->monitor_id,
|
||||
event_time->tm_year-100, event_time->tm_mon+1, event_time->tm_mday,
|
||||
event_time->tm_hour, event_time->tm_min, event_time->tm_sec );
|
||||
else
|
||||
snprintf( event_data->path, sizeof(event_data->path), "%s/%s/%ld/%02d/%02d/%02d/%02d/%02d/%02d",
|
||||
staticConfig.PATH_WEB.c_str(), storage_path, event_data->monitor_id, event_time->tm_year-100, event_time->tm_mon+1, event_time->tm_mday, event_time->tm_hour, event_time->tm_min, event_time->tm_sec );
|
||||
staticConfig.PATH_WEB.c_str(), storage_path, event_data->monitor_id,
|
||||
event_time->tm_year-100, event_time->tm_mon+1, event_time->tm_mday,
|
||||
event_time->tm_hour, event_time->tm_min, event_time->tm_sec );
|
||||
} else if ( event_data->scheme == Storage::MEDIUM ) {
|
||||
struct tm *event_time = localtime( &event_data->start_time );
|
||||
if ( storage_path[0] == '/' )
|
||||
snprintf( event_data->path, sizeof(event_data->path), "%s/%ld/%04d-%02d-%02d/%" PRIu64,
|
||||
storage_path, event_data->monitor_id, event_time->tm_year+1900, event_time->tm_mon+1, event_time->tm_mday, event_data->event_id );
|
||||
storage_path, event_data->monitor_id,
|
||||
event_time->tm_year+1900, event_time->tm_mon+1, event_time->tm_mday,
|
||||
event_data->event_id );
|
||||
else
|
||||
snprintf( event_data->path, sizeof(event_data->path), "%s/%s/%ld/%04d-%02d-%02d/%" PRIu64,
|
||||
staticConfig.PATH_WEB.c_str(), storage_path, event_data->monitor_id, event_time->tm_year+1900, event_time->tm_mon+1, event_time->tm_mday,
|
||||
staticConfig.PATH_WEB.c_str(), storage_path, event_data->monitor_id,
|
||||
event_time->tm_year+1900, event_time->tm_mon+1, event_time->tm_mday,
|
||||
event_data->event_id );
|
||||
|
||||
} else {
|
||||
|
@ -186,8 +196,9 @@ bool EventStream::loadEventData(uint64_t event_id) {
|
|||
delete storage; storage = NULL;
|
||||
|
||||
updateFrameRate( (double)event_data->frame_count/event_data->duration );
|
||||
Debug(3,"fps set by frame_count(%d)/duration(%f)", event_data->frame_count, event_data->duration);
|
||||
|
||||
snprintf(sql, sizeof(sql), "SELECT FrameId, unix_timestamp( `TimeStamp` ), Delta FROM Frames where EventId = %" PRIu64 " ORDER BY FrameId ASC", event_id);
|
||||
snprintf(sql, sizeof(sql), "SELECT FrameId, unix_timestamp(`TimeStamp`), Delta FROM Frames WHERE EventId = %" PRIu64 " ORDER BY FrameId ASC", event_id);
|
||||
if ( mysql_query(&dbconn, sql) ) {
|
||||
Error("Can't run query: %s", mysql_error(&dbconn));
|
||||
exit(mysql_errno(&dbconn));
|
||||
|
@ -203,29 +214,46 @@ bool EventStream::loadEventData(uint64_t event_id) {
|
|||
|
||||
event_data->frames = new FrameData[event_data->frame_count];
|
||||
int last_id = 0;
|
||||
time_t timestamp, last_timestamp = event_data->start_time;
|
||||
double last_timestamp = event_data->start_time;
|
||||
double last_delta = 0.0;
|
||||
|
||||
while ( ( dbrow = mysql_fetch_row( result ) ) ) {
|
||||
int id = atoi(dbrow[0]);
|
||||
timestamp = atoi(dbrow[1]);
|
||||
//timestamp = atof(dbrow[1]);
|
||||
double delta = atof(dbrow[2]);
|
||||
int id_diff = id - last_id;
|
||||
double frame_delta = id_diff ? (delta-last_delta)/id_diff : 0;
|
||||
double frame_delta = id_diff ? (delta-last_delta)/id_diff : (delta - last_delta);
|
||||
// Fill in data between bulk frames
|
||||
if ( id_diff > 1 ) {
|
||||
for ( int i = last_id+1; i < id; i++ ) {
|
||||
event_data->frames[i-1].timestamp = (time_t)(last_timestamp + ((i-last_id)*frame_delta));
|
||||
event_data->frames[i-1].offset = (time_t)(event_data->frames[i-1].timestamp-event_data->start_time);
|
||||
// Delta is the time since last frame, no since beginning of Event
|
||||
event_data->frames[i-1].delta = frame_delta;
|
||||
event_data->frames[i-1].timestamp = last_timestamp + ((i-last_id)*frame_delta);
|
||||
event_data->frames[i-1].offset = event_data->frames[i-1].timestamp - event_data->start_time;
|
||||
event_data->frames[i-1].in_db = false;
|
||||
Debug(4,"Frame %d timestamp:(%f), offset(%f) delta(%f), in_db(%d)",
|
||||
i,
|
||||
event_data->frames[i-1].timestamp,
|
||||
event_data->frames[i-1].offset,
|
||||
event_data->frames[i-1].delta,
|
||||
event_data->frames[i-1].in_db
|
||||
);
|
||||
}
|
||||
}
|
||||
event_data->frames[id-1].timestamp = timestamp;
|
||||
event_data->frames[id-1].offset = (time_t)(event_data->frames[id-1].timestamp-event_data->start_time);
|
||||
event_data->frames[id-1].delta = id>1?frame_delta:0.0;
|
||||
event_data->frames[id-1].timestamp = event_data->start_time + delta;
|
||||
event_data->frames[id-1].offset = delta;
|
||||
event_data->frames[id-1].delta = frame_delta;
|
||||
event_data->frames[id-1].in_db = true;
|
||||
last_id = id;
|
||||
last_delta = delta;
|
||||
last_timestamp = timestamp;
|
||||
last_timestamp = event_data->frames[id-1].timestamp;
|
||||
Debug(4,"Frame %d timestamp:(%f), offset(%f) delta(%f), in_db(%d)",
|
||||
id,
|
||||
event_data->frames[id-1].timestamp,
|
||||
event_data->frames[id-1].offset,
|
||||
event_data->frames[id-1].delta,
|
||||
event_data->frames[id-1].in_db
|
||||
);
|
||||
}
|
||||
if ( mysql_errno( &dbconn ) ) {
|
||||
Error( "Can't fetch row: %s", mysql_error( &dbconn ) );
|
||||
|
@ -636,7 +664,8 @@ Debug(1, "Loading image");
|
|||
} else if ( ffmpeg_input ) {
|
||||
// Get the frame from the mp4 input
|
||||
Debug(1,"Getting frame from ffmpeg");
|
||||
AVFrame *frame = ffmpeg_input->get_frame( ffmpeg_input->get_video_stream_id() );
|
||||
FrameData *frame_data = &event_data->frames[curr_frame_id-1];
|
||||
AVFrame *frame = ffmpeg_input->get_frame( ffmpeg_input->get_video_stream_id(), frame_data->offset );
|
||||
if ( frame ) {
|
||||
image = new Image(frame);
|
||||
av_frame_free(&frame);
|
||||
|
@ -797,12 +826,16 @@ void EventStream::runStream() {
|
|||
}
|
||||
|
||||
// Figure out if we should send this frame
|
||||
|
||||
Debug(3,"cur_frame_id (%d-1) mod frame_mod(%d)",curr_frame_id, frame_mod);
|
||||
// If we are streaming and this frame is due to be sent
|
||||
if ( ((curr_frame_id-1)%frame_mod) == 0 ) {
|
||||
// frame mod defaults to 1 and if we are going faster than max_fps will get multiplied by 2
|
||||
// so if it is 2, then we send every other frame, if is it 4 then every fourth frame, etc.
|
||||
if ( (frame_mod == 1) || (((curr_frame_id-1)%frame_mod) == 0) ) {
|
||||
delta_us = (unsigned int)(frame_data->delta * 1000000);
|
||||
Debug(3,"frame delta %u ", delta_us);
|
||||
// if effective > base we should speed up frame delivery
|
||||
delta_us = (unsigned int)((delta_us * base_fps)/effective_fps);
|
||||
Debug(3,"delta %u = base_fps(%f)/effective fps(%f)", delta_us, base_fps, effective_fps);
|
||||
// but must not exceed maxfps
|
||||
delta_us = max(delta_us, 1000000 / maxfps);
|
||||
send_frame = true;
|
||||
|
|
|
@ -47,8 +47,8 @@ class EventStream : public StreamBase {
|
|||
protected:
|
||||
struct FrameData {
|
||||
//unsigned long id;
|
||||
time_t timestamp;
|
||||
time_t offset;
|
||||
double timestamp;
|
||||
double offset;
|
||||
double delta;
|
||||
bool in_db;
|
||||
};
|
||||
|
|
|
@ -276,7 +276,7 @@ void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
|
|||
Debug(1, "[0x%x]", st->id);
|
||||
if (lang)
|
||||
Debug(1, "(%s)", lang->value);
|
||||
Debug(1, ", frames:%d, timebase: %d/%d", st->codec_info_nb_frames, st->time_base.num, st->time_base.den);
|
||||
Debug(1, ", frames:%d, timebase: %d/%d", st->nb_frames, st->time_base.num, st->time_base.den);
|
||||
avcodec_string(buf, sizeof(buf), st->codec, is_output);
|
||||
Debug(1, ": %s", buf);
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
|
@ -437,12 +437,17 @@ int zm_receive_frame( AVCodecContext *context, AVFrame *frame, AVPacket &packet
|
|||
#endif
|
||||
return 1;
|
||||
} // end int zm_receive_frame( AVCodecContext *context, AVFrame *frame, AVPacket &packet )
|
||||
void dumpPacket(AVPacket *pkt, const char *text) {
|
||||
void dumpPacket(AVStream *stream, AVPacket *pkt, const char *text) {
|
||||
char b[10240];
|
||||
|
||||
double pts_time = (double)av_rescale_q(pkt->pts,
|
||||
stream->time_base,
|
||||
AV_TIME_BASE_Q
|
||||
) / AV_TIME_BASE;
|
||||
|
||||
snprintf(b, sizeof(b),
|
||||
" pts: %" PRId64 ", dts: %" PRId64
|
||||
", data: %p, size: %d, stream_index: %d, flags: %04x, keyframe(%d) pos: %" PRId64
|
||||
" pts: %" PRId64 "=%f, dts: %" PRId64
|
||||
", size: %d, stream_index: %d, flags: %04x, keyframe(%d) pos: %" PRId64
|
||||
", duration: %"
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
PRId64
|
||||
|
@ -451,8 +456,8 @@ void dumpPacket(AVPacket *pkt, const char *text) {
|
|||
#endif
|
||||
"\n",
|
||||
pkt->pts,
|
||||
pts_time,
|
||||
pkt->dts,
|
||||
pkt->data,
|
||||
pkt->size,
|
||||
pkt->stream_index,
|
||||
pkt->flags,
|
||||
|
|
|
@ -327,5 +327,5 @@ int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt);
|
|||
bool is_video_stream( AVStream * stream );
|
||||
bool is_audio_stream( AVStream * stream );
|
||||
int zm_receive_frame( AVCodecContext *context, AVFrame *frame, AVPacket &packet );
|
||||
void dumpPacket(AVPacket *,const char *text="DEBUG");
|
||||
void dumpPacket(AVStream *, AVPacket *,const char *text="");
|
||||
#endif // ZM_FFMPEG_H
|
||||
|
|
|
@ -713,10 +713,10 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event
|
|||
|
||||
int keyframe = packet.flags & AV_PKT_FLAG_KEY;
|
||||
bytes += packet.size;
|
||||
dumpPacket(&packet);
|
||||
dumpPacket(mFormatContext->streams[packet.stream_index], &packet, "Captured");
|
||||
|
||||
//Video recording
|
||||
if ( recording.tv_sec ) {
|
||||
if ( keyframe && recording.tv_sec ) {
|
||||
|
||||
uint32_t last_event_id = monitor->GetLastEventId() ;
|
||||
uint32_t video_writer_event_id = monitor->GetVideoWriterEventId();
|
||||
|
@ -817,31 +817,33 @@ int FfmpegCamera::CaptureAndRecord( Image &image, timeval recording, char* event
|
|||
|
||||
} else {
|
||||
// Not recording
|
||||
if ( videoStore ) {
|
||||
|
||||
if ( videoStore && keyframe ) {
|
||||
Info("Deleting videoStore instance");
|
||||
delete videoStore;
|
||||
videoStore = NULL;
|
||||
have_video_keyframe = false;
|
||||
monitor->SetVideoWriterEventId(0);
|
||||
}
|
||||
|
||||
// Buffer video packets, since we are not recording.
|
||||
// All audio packets are keyframes, so only if it's a video keyframe
|
||||
if ( packet.stream_index == mVideoStreamId ) {
|
||||
if ( keyframe ) {
|
||||
Debug(3, "Clearing queue");
|
||||
packetqueue.clearQueue(monitor->GetPreEventCount(), mVideoStreamId);
|
||||
packetqueue.queuePacket(&packet);
|
||||
} else if ( packetqueue.size() ) {
|
||||
// it's a keyframe or we already have something in the queue
|
||||
packetqueue.queuePacket(&packet);
|
||||
}
|
||||
} else if ( packet.stream_index == mAudioStreamId ) {
|
||||
// The following lines should ensure that the queue always begins with a video keyframe
|
||||
//Debug(2, "Have audio packet, reocrd_audio is (%d) and packetqueue.size is (%d)", record_audio, packetqueue.size() );
|
||||
if ( record_audio && packetqueue.size() ) {
|
||||
// if it's audio, and we are doing audio, and there is already something in the queue
|
||||
packetqueue.queuePacket(&packet);
|
||||
if ( ! videoStore ) {
|
||||
// Buffer video packets, since we are not recording.
|
||||
// All audio packets are keyframes, so only if it's a video keyframe
|
||||
if ( packet.stream_index == mVideoStreamId ) {
|
||||
if ( keyframe ) {
|
||||
packetqueue.clearQueue(monitor->GetPreEventCount(), mVideoStreamId);
|
||||
packetqueue.queuePacket(&packet);
|
||||
} else if ( packetqueue.size() ) {
|
||||
Debug(3, "queue has %d", packetqueue.size());
|
||||
// it's a keyframe or we already have something in the queue
|
||||
packetqueue.queuePacket(&packet);
|
||||
}
|
||||
} else if ( packet.stream_index == mAudioStreamId ) {
|
||||
// The following lines should ensure that the queue always begins with a video keyframe
|
||||
//Debug(2, "Have audio packet, reocrd_audio is (%d) and packetqueue.size is (%d)", record_audio, packetqueue.size() );
|
||||
if ( record_audio && packetqueue.size() ) {
|
||||
// if it's audio, and we are doing audio, and there is already something in the queue
|
||||
packetqueue.queuePacket(&packet);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // end if recording or not
|
||||
|
|
|
@ -119,8 +119,9 @@ AVFrame *FFmpeg_Input::get_frame( int stream_id ) {
|
|||
Error( "Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, ret, errbuf );
|
||||
return NULL;
|
||||
}
|
||||
dumpPacket(input_format_context->streams[packet.stream_index], &packet, "Received packet");
|
||||
|
||||
if ( (stream_id < 0 ) || ( packet.stream_index == stream_id ) ) {
|
||||
if ( (stream_id < 0) || (packet.stream_index == stream_id) ) {
|
||||
Debug(3,"Packet is for our stream (%d)", packet.stream_index );
|
||||
|
||||
AVCodecContext *context = streams[packet.stream_index].context;
|
||||
|
@ -154,7 +155,7 @@ AVFrame *FFmpeg_Input::get_frame( int stream_id ) {
|
|||
}
|
||||
} else {
|
||||
#endif
|
||||
Debug(1,"Getting a frame?");
|
||||
Debug(1,"Getting frame %d", streams[packet.stream_index].frame_count);
|
||||
ret = avcodec_receive_frame( context, frame );
|
||||
if ( ret < 0 ) {
|
||||
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
|
||||
|
@ -185,3 +186,20 @@ AVFrame *FFmpeg_Input::get_frame( int stream_id ) {
|
|||
return frame;
|
||||
|
||||
} // end AVFrame *FFmpeg_Input::get_frame
|
||||
|
||||
AVFrame *FFmpeg_Input::get_frame( int stream_id, double at ) {
|
||||
Debug(1, "Getting frame from stream %d at %f", stream_id, at);
|
||||
|
||||
int64_t seek_target = (int64_t)at * AV_TIME_BASE;
|
||||
Debug(1, "Getting frame from stream %d at %" PRId64, stream_id, seek_target);
|
||||
seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q, input_format_context->streams[stream_id]->time_base);
|
||||
Debug(1, "Getting frame from stream %d at %" PRId64, stream_id, seek_target);
|
||||
|
||||
int ret;
|
||||
if ( ( ret = av_seek_frame(input_format_context, stream_id, seek_target, 0/*FORWARDS*/) < 0 ) ) {
|
||||
Error("Unable to seek in stream");
|
||||
return NULL;
|
||||
}
|
||||
return get_frame(stream_id);
|
||||
|
||||
} // end AVFrame *FFmpeg_Input::get_frame( int stream_id, struct timeval at)
|
||||
|
|
|
@ -22,6 +22,7 @@ class FFmpeg_Input {
|
|||
int Open( const char *filename );
|
||||
int Close();
|
||||
AVFrame *get_frame( int stream_id=-1 );
|
||||
AVFrame *get_frame( int stream_id, double at );
|
||||
int get_video_stream_id() {
|
||||
return video_stream_id;
|
||||
}
|
||||
|
|
|
@ -1389,7 +1389,7 @@ bool Monitor::Analyse() {
|
|||
|
||||
} else if ( signal && Active() && (function == MODECT || function == MOCORD) ) {
|
||||
Event::StringSet zoneSet;
|
||||
if ( !(image_count % (motion_frame_skip+1) ) ) {
|
||||
if ( (!motion_frame_skip) || !(image_count % (motion_frame_skip+1) ) ) {
|
||||
// Get new score.
|
||||
int new_motion_score = DetectMotion(*snap_image, zoneSet);
|
||||
|
||||
|
@ -1714,7 +1714,7 @@ Error("Creating new event when one exists");
|
|||
//Warning("In state TAPE,
|
||||
//video_store_data->recording = event->StartTime();
|
||||
//}
|
||||
if ( !(image_count%(frame_skip+1)) ) {
|
||||
if ( (!frame_skip) || !(image_count%(frame_skip+1)) ) {
|
||||
if ( config.bulk_frame_interval > 1 ) {
|
||||
event->AddFrame( snap_image, *timestamp, (event->Frames()<pre_event_count?0:-1) );
|
||||
} else {
|
||||
|
@ -1731,6 +1731,7 @@ Error("Creating new event when one exists");
|
|||
}
|
||||
shared_data->state = state = IDLE;
|
||||
last_section_mod = 0;
|
||||
trigger_data->trigger_state = TRIGGER_CANCEL;
|
||||
} // end if ( trigger_data->trigger_state != TRIGGER_OFF )
|
||||
|
||||
if ( (!signal_change && signal) && (function == MODECT || function == MOCORD) ) {
|
||||
|
@ -1756,7 +1757,7 @@ Error("Creating new event when one exists");
|
|||
|
||||
image_count++;
|
||||
|
||||
return( true );
|
||||
return true;
|
||||
}
|
||||
|
||||
void Monitor::Reload() {
|
||||
|
@ -2384,6 +2385,9 @@ int Monitor::Capture() {
|
|||
|
||||
if ( captureResult < 0 ) {
|
||||
Info("Return from Capture (%d), signal loss", captureResult);
|
||||
// Tell zma to end the event. zma will reset TRIGGER
|
||||
trigger_data->trigger_state = TRIGGER_OFF;
|
||||
|
||||
// Unable to capture image for temporary reason
|
||||
// Fake a signal loss image
|
||||
Rgb signalcolor;
|
||||
|
|
|
@ -119,11 +119,14 @@ unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream
|
|||
|
||||
void zm_packetqueue::clearQueue() {
|
||||
ZMPacket *packet = NULL;
|
||||
int delete_count = 0;
|
||||
while(!pktQueue.empty()) {
|
||||
packet = pktQueue.front();
|
||||
pktQueue.pop_front();
|
||||
delete packet;
|
||||
delete_count += 1;
|
||||
}
|
||||
Debug(3, "Deleted (%d) packets", delete_count );
|
||||
}
|
||||
|
||||
unsigned int zm_packetqueue::size() {
|
||||
|
|
|
@ -64,7 +64,7 @@ void StreamBase::updateFrameRate(double fps) {
|
|||
base_fps = fps;
|
||||
effective_fps = (base_fps*abs(replay_rate))/ZM_RATE_BASE;
|
||||
frame_mod = 1;
|
||||
Debug(3, "FPS:%.2f, MXFPS:%.2f, BFPS:%.2f, EFPS:%.2f, FM:%d", fps, maxfps, base_fps, effective_fps, frame_mod);
|
||||
Debug(3, "FPS:%.2f, MaxFPS:%.2f, BaseFPS:%.2f, EffectiveFPS:%.2f, FrameMod:%d", fps, maxfps, base_fps, effective_fps, frame_mod);
|
||||
// Min frame repeat?
|
||||
while( effective_fps > maxfps ) {
|
||||
effective_fps /= 2.0;
|
||||
|
|
|
@ -178,7 +178,6 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
|
|||
}
|
||||
|
||||
Monitor::Orientation orientation = monitor->getOrientation();
|
||||
Debug(3, "Have orientation");
|
||||
if (orientation) {
|
||||
if (orientation == Monitor::ROTATE_0) {
|
||||
} else if (orientation == Monitor::ROTATE_90) {
|
||||
|
@ -295,14 +294,18 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
|
|||
} // end if audio_in_stream
|
||||
|
||||
|
||||
video_first_pts = 0;
|
||||
video_first_dts = 0;
|
||||
video_last_pts = 0;
|
||||
video_last_dts = 0;
|
||||
|
||||
audio_first_pts = 0;
|
||||
audio_first_dts = 0;
|
||||
audio_last_pts = 0;
|
||||
audio_last_dts = 0;
|
||||
video_next_pts = 0;
|
||||
video_next_dts = 0;
|
||||
audio_next_pts = 0;
|
||||
audio_next_dts = 0;
|
||||
|
||||
} // VideoStore::VideoStore
|
||||
|
||||
bool VideoStore::open() {
|
||||
|
@ -386,18 +389,16 @@ VideoStore::~VideoStore() {
|
|||
#endif
|
||||
Debug(2, "writing flushed packet pts(%d) dts(%d) duration(%d)", pkt.pts,
|
||||
pkt.dts, pkt.duration);
|
||||
pkt.pts = audio_next_pts;
|
||||
pkt.dts = audio_next_dts;
|
||||
|
||||
#if 0
|
||||
if ( pkt.duration > 0 )
|
||||
pkt.duration =
|
||||
av_rescale_q(pkt.duration, audio_out_ctx->time_base,
|
||||
audio_out_stream->time_base);
|
||||
audio_next_pts += pkt.duration;
|
||||
audio_next_dts += pkt.duration;
|
||||
|
||||
Debug(2, "writing flushed packet pts(%d) dts(%d) duration(%d)", pkt.pts,
|
||||
pkt.dts, pkt.duration);
|
||||
#endif
|
||||
pkt.stream_index = audio_out_stream->index;
|
||||
av_interleaved_write_frame(oc, &pkt);
|
||||
zm_av_packet_unref(&pkt);
|
||||
|
@ -720,96 +721,98 @@ bool VideoStore::setup_resampler() {
|
|||
int VideoStore::writeVideoFramePacket(AVPacket *ipkt) {
|
||||
av_init_packet(&opkt);
|
||||
|
||||
opkt.pts = video_next_pts;
|
||||
opkt.dts = video_next_dts;
|
||||
opkt.duration = 0;
|
||||
dumpPacket(video_in_stream, ipkt, "input packet");
|
||||
|
||||
int64_t duration;
|
||||
if ( !video_last_pts ) {
|
||||
duration = 0;
|
||||
} else {
|
||||
duration =
|
||||
av_rescale_q(ipkt->pts - video_last_pts, video_in_stream->time_base,
|
||||
video_out_stream->time_base);
|
||||
Debug(1, "duration calc: pts(%" PRId64 ") - last_pts(% " PRId64 ") = (%" PRId64 ")",
|
||||
if ( ipkt->duration ) {
|
||||
duration = av_rescale_q(
|
||||
ipkt->duration,
|
||||
video_in_stream->time_base,
|
||||
video_out_stream->time_base);
|
||||
Debug(1, "duration from ipkt: pts(%" PRId64 ") - last_pts(%" PRId64 ") = (%" PRId64 ") => (%" PRId64 ") (%d/%d) (%d/%d)",
|
||||
ipkt->pts,
|
||||
video_last_pts,
|
||||
duration);
|
||||
if (duration <= 0) {
|
||||
ipkt->duration,
|
||||
duration,
|
||||
video_in_stream->time_base.num,
|
||||
video_in_stream->time_base.den,
|
||||
video_out_stream->time_base.num,
|
||||
video_out_stream->time_base.den
|
||||
);
|
||||
} else {
|
||||
duration =
|
||||
av_rescale_q(
|
||||
ipkt->pts - video_last_pts,
|
||||
video_in_stream->time_base,
|
||||
video_out_stream->time_base);
|
||||
Debug(1, "duration calc: pts(%" PRId64 ") - last_pts(%" PRId64 ") = (%" PRId64 ") => (%" PRId64 ")",
|
||||
ipkt->pts,
|
||||
video_last_pts,
|
||||
ipkt->pts - video_last_pts,
|
||||
duration
|
||||
);
|
||||
if ( duration <= 0 ) {
|
||||
duration = ipkt->duration ? ipkt->duration : av_rescale_q(1,video_in_stream->time_base, video_out_stream->time_base);
|
||||
}
|
||||
}
|
||||
opkt.duration = duration;
|
||||
|
||||
//#if ( 0 && video_last_pts && ( ipkt->duration == AV_NOPTS_VALUE || !
|
||||
//ipkt->duration ) ) {
|
||||
// Video packets don't really have a duration. Audio does.
|
||||
// opkt.duration = av_rescale_q(duration, video_in_stream->time_base,
|
||||
// video_out_stream->time_base);
|
||||
// opkt.duration = 0;
|
||||
//} else {
|
||||
// duration = opkt.duration = av_rescale_q(ipkt->duration,
|
||||
// video_in_stream->time_base, video_out_stream->time_base);
|
||||
//}
|
||||
video_last_pts = ipkt->pts;
|
||||
video_last_dts = ipkt->dts;
|
||||
|
||||
#if 0
|
||||
//Scale the PTS of the outgoing packet to be the correct time base
|
||||
if ( ipkt->pts != AV_NOPTS_VALUE ) {
|
||||
|
||||
if ( ! video_last_pts ) {
|
||||
if ( !video_first_pts ) {
|
||||
// This is the first packet.
|
||||
opkt.pts = 0;
|
||||
Debug(2, "Starting video video_last_pts will become (%d)", ipkt->pts);
|
||||
Debug(2, "Starting video first_pts will become %" PRId64, ipkt->pts);
|
||||
video_first_pts = ipkt->pts;
|
||||
} else {
|
||||
if ( ipkt->pts < video_last_pts ) {
|
||||
Debug(1, "Resetting video_last_pts from (%d) to (%d)", video_last_pts, ipkt->pts);
|
||||
if ( ipkt->pts < video_first_pts ) {
|
||||
Debug(1, "Resetting first_pts from %" PRId64 " to %" PRId64, video_last_pts, ipkt->pts);
|
||||
video_first_pts -= video_last_pts;
|
||||
// wrap around, need to figure out the distance FIXME having this wrong should cause a jump, but then play ok?
|
||||
opkt.pts = video_next_pts + av_rescale_q( ipkt->pts, video_in_stream->time_base, video_out_stream->time_base);
|
||||
} else {
|
||||
opkt.pts = video_next_pts + av_rescale_q( ipkt->pts - video_last_pts, video_in_stream->time_base, video_out_stream->time_base);
|
||||
}
|
||||
opkt.pts = av_rescale_q(
|
||||
ipkt->pts-video_first_pts,
|
||||
video_in_stream->time_base,
|
||||
video_out_stream->time_base
|
||||
);
|
||||
}
|
||||
Debug(3, "opkt.pts = %d from ipkt->pts(%d) - last_pts(%d)", opkt.pts, ipkt->pts, video_last_pts);
|
||||
Debug(3, "opkt.pts = %" PRId64 " from ipkt->pts(%" PRId64 ") - first_pts(%" PRId64 ")", opkt.pts, ipkt->pts, video_first_pts);
|
||||
video_last_pts = ipkt->pts;
|
||||
} else {
|
||||
Debug(3, "opkt.pts = undef");
|
||||
opkt.pts = AV_NOPTS_VALUE;
|
||||
}
|
||||
// Just because the in stream wraps, doesn't mean the out needs to. Really, if we are limiting ourselves to 10min segments I can't imagine every wrapping in the out. So need to handle in wrap, without causing out wrap.
|
||||
if ( !video_last_dts ) {
|
||||
// This is the first packet.
|
||||
opkt.dts = 0;
|
||||
Debug(1, "Starting video video_last_dts will become (%lu)", ipkt->dts);
|
||||
video_last_dts = ipkt->dts;
|
||||
} else {
|
||||
// Scale the DTS of the outgoing packet to be the correct time base
|
||||
|
||||
if ( ipkt->dts == AV_NOPTS_VALUE ) {
|
||||
// why are we using cur_dts instead of packet.dts? I think cur_dts is in AV_TIME_BASE_Q, but ipkt.dts is in video_in_stream->time_base
|
||||
if ( video_in_stream->cur_dts < video_last_dts ) {
|
||||
Debug(1, "Resetting video_last_dts from (%d) to (%d) p.dts was (%d)", video_last_dts, video_in_stream->cur_dts, ipkt->dts);
|
||||
opkt.dts = video_next_dts + av_rescale_q(video_in_stream->cur_dts, AV_TIME_BASE_Q, video_out_stream->time_base);
|
||||
} else {
|
||||
opkt.dts = video_next_dts + av_rescale_q(video_in_stream->cur_dts - video_last_dts, AV_TIME_BASE_Q, video_out_stream->time_base);
|
||||
}
|
||||
Debug(3, "opkt.dts = %d from video_in_stream->cur_dts(%d) - previus_dts(%d)", opkt.dts, video_in_stream->cur_dts, video_last_dts);
|
||||
video_last_dts = video_in_stream->cur_dts;
|
||||
if ( ipkt->dts != AV_NOPTS_VALUE ) {
|
||||
if ( !video_first_dts ) {
|
||||
// This is the first packet.
|
||||
opkt.dts = 0;
|
||||
Debug(1, "Starting video first_dts will become (%" PRId64 ")", ipkt->dts);
|
||||
video_first_dts = ipkt->dts;
|
||||
} else {
|
||||
if ( ipkt->dts < video_last_dts ) {
|
||||
Debug(1, "Resetting video_last_dts from (%d) to (%d)", video_last_dts, ipkt->dts);
|
||||
opkt.dts = video_next_dts + av_rescale_q( ipkt->dts, video_in_stream->time_base, video_out_stream->time_base);
|
||||
} else {
|
||||
opkt.dts = video_next_dts + av_rescale_q( ipkt->dts - video_last_dts, video_in_stream->time_base, video_out_stream->time_base);
|
||||
if ( ipkt->dts < video_first_dts ) {
|
||||
Debug(1, "Resetting first_dts from (%" PRId64 ") to (%" PRId64")",
|
||||
video_first_dts, ipkt->dts);
|
||||
video_first_dts -= video_last_dts;
|
||||
}
|
||||
Debug(3, "opkt.dts = %d from ipkt.dts(%d) - previus_dts(%d)", opkt.dts, ipkt->dts, video_last_dts);
|
||||
opkt.dts = av_rescale_q(
|
||||
ipkt->dts-video_first_dts,
|
||||
video_in_stream->time_base,
|
||||
video_out_stream->time_base
|
||||
);
|
||||
Debug(3, "opkt.dts = %" PRId64 " from ipkt.dts(%" PRId64 ") - first_dts(%" PRId64 ")",
|
||||
opkt.dts, ipkt->dts, video_first_dts);
|
||||
video_last_dts = ipkt->dts;
|
||||
}
|
||||
} else {
|
||||
Debug(3, "opkt.dts = undef");
|
||||
opkt.dts = AV_NOPTS_VALUE;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (opkt.dts > opkt.pts) {
|
||||
Debug(1,
|
||||
"opkt.dts(%d) must be <= opkt.pts(%d). Decompression must happen "
|
||||
"opkt.dts(%" PRId64 ") must be <= opkt.pts(%" PRId64 "). Decompression must happen "
|
||||
"before presentation.",
|
||||
opkt.dts, opkt.pts);
|
||||
opkt.dts = opkt.pts;
|
||||
|
@ -823,24 +826,21 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) {
|
|||
|
||||
opkt.stream_index = video_out_stream->index;
|
||||
|
||||
AVPacket safepkt;
|
||||
memcpy(&safepkt, &opkt, sizeof(AVPacket));
|
||||
|
||||
dumpPacket( &opkt, "writing video packet" );
|
||||
dumpPacket(video_out_stream, &opkt, "writing video packet");
|
||||
if ((opkt.data == NULL) || (opkt.size < 1)) {
|
||||
Warning("%s:%d: Mangled AVPacket: discarding frame", __FILE__, __LINE__);
|
||||
dumpPacket(ipkt);
|
||||
dumpPacket(&opkt);
|
||||
dumpPacket(video_in_stream, ipkt,"In Packet");
|
||||
dumpPacket(video_out_stream, &opkt);
|
||||
|
||||
#if 0
|
||||
} else if ((video_next_dts > 0) && (video_next_dts > opkt.dts)) {
|
||||
Warning("%s:%d: DTS out of order: %lld \u226E %lld; discarding frame",
|
||||
__FILE__, __LINE__, video_next_dts, opkt.dts);
|
||||
video_next_dts = opkt.dts;
|
||||
dumpPacket(&opkt);
|
||||
#endif
|
||||
|
||||
} else {
|
||||
video_next_dts = opkt.dts + duration;
|
||||
video_next_pts = opkt.pts + duration;
|
||||
ret = av_interleaved_write_frame(oc, &opkt);
|
||||
if (ret < 0) {
|
||||
// There's nothing we can really do if the frame is rejected, just drop it
|
||||
|
@ -849,7 +849,6 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) {
|
|||
"%s:%d: Writing frame [av_interleaved_write_frame()] failed: %s(%d) "
|
||||
" ",
|
||||
__FILE__, __LINE__, av_make_error_string(ret).c_str(), ret);
|
||||
dumpPacket(&safepkt);
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
zm_dump_codecpar(video_in_stream->codecpar);
|
||||
zm_dump_codecpar(video_out_stream->codecpar);
|
||||
|
@ -904,7 +903,7 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) {
|
|||
&data_present, ipkt)) < 0) {
|
||||
Error("Could not decode frame (error '%s')\n",
|
||||
av_make_error_string(ret).c_str());
|
||||
dumpPacket(ipkt);
|
||||
dumpPacket(video_in_stream, ipkt);
|
||||
av_frame_free(&in_frame);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1068,8 +1067,7 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) {
|
|||
}
|
||||
// opkt.duration = av_rescale_q(ipkt->duration, audio_in_stream->time_base,
|
||||
// audio_out_stream->time_base);
|
||||
Debug(2, "opkt.pts (%d), opkt.dts(%d) opkt.duration = (%d)", opkt.pts,
|
||||
opkt.dts, opkt.duration);
|
||||
dumpPacket( audio_out_stream, &opkt );
|
||||
|
||||
// pkt.pos: byte position in stream, -1 if unknown
|
||||
opkt.pos = -1;
|
||||
|
@ -1077,13 +1075,10 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) {
|
|||
audio_next_dts = opkt.dts + opkt.duration;
|
||||
audio_next_pts = opkt.pts + opkt.duration;
|
||||
|
||||
AVPacket safepkt;
|
||||
memcpy(&safepkt, &opkt, sizeof(AVPacket));
|
||||
ret = av_interleaved_write_frame(oc, &opkt);
|
||||
if (ret != 0) {
|
||||
Error("Error writing audio frame packet: %s\n",
|
||||
av_make_error_string(ret).c_str());
|
||||
dumpPacket(&safepkt);
|
||||
} else {
|
||||
Debug(2, "Success writing audio frame");
|
||||
}
|
||||
|
|
|
@ -52,6 +52,11 @@ private:
|
|||
int64_t audio_last_pts;
|
||||
int64_t audio_last_dts;
|
||||
|
||||
int64_t video_first_pts;
|
||||
int64_t video_first_dts;
|
||||
int64_t audio_first_pts;
|
||||
int64_t audio_first_dts;
|
||||
|
||||
// These are for out, should start at zero. We assume they do not wrap because we just aren't going to save files that big.
|
||||
int64_t video_next_pts;
|
||||
int64_t video_next_dts;
|
||||
|
|
|
@ -1005,9 +1005,6 @@ function initPage() {
|
|||
nearEventsQuery(eventData.Id);
|
||||
initialAlarmCues(eventData.Id); //call ajax+renderAlarmCues
|
||||
if (scale == "auto") changeScale();
|
||||
if (window.history.length == 1) {
|
||||
$j('#closeWindow').html('');
|
||||
}
|
||||
}
|
||||
|
||||
// Kick everything off
|
||||
|
|
|
@ -493,11 +493,11 @@ function getEventCmdResponse( respObj, respText ) {
|
|||
|
||||
var cells = row.getElements( 'td' );
|
||||
|
||||
var link = new Element( 'a', { 'href': '#', 'events': { 'click': createEventPopup.pass( [ event.Id, '&trms=1&attr1=MonitorId&op1=%3d&val1='+monitorId+'&page=1&popup=1', event.Width, event.Height ] ) } });
|
||||
var link = new Element( 'a', { 'href': '#', 'events': { 'click': createEventPopup.pass( [ event.Id, '&terms=1&attr1=MonitorId&op1=%3d&val1='+monitorId+'&page=1&popup=1', event.Width, event.Height ] ) } });
|
||||
link.set( 'text', event.Id );
|
||||
link.inject( row.getElement( 'td.colId' ) );
|
||||
|
||||
link = new Element( 'a', { 'href': '#', 'events': { 'click': createEventPopup.pass( [ event.Id, '&trms=1&attr1=MonitorId&op1=%3d&val1='+monitorId+'&page=1&popup=1', event.Width, event.Height ] ) } });
|
||||
link = new Element( 'a', { 'href': '#', 'events': { 'click': createEventPopup.pass( [ event.Id, '&terms=1&attr1=MonitorId&op1=%3d&val1='+monitorId+'&page=1&popup=1', event.Width, event.Height ] ) } });
|
||||
link.set( 'text', event.Name );
|
||||
link.inject( row.getElement( 'td.colName' ) );
|
||||
|
||||
|
|
Loading…
Reference in New Issue