Working zms h265 playing.

This commit is contained in:
Isaac Connor 2018-11-19 16:45:56 -05:00
parent 62e511cfd1
commit b291c06035
6 changed files with 141 additions and 58 deletions

View File

@ -71,8 +71,9 @@ bool EventStream::loadInitialEventData( int monitor_id, time_t event_time ) {
if ( event_time ) {
curr_stream_time = event_time;
curr_frame_id = 1;
curr_frame_id = 1; // curr_frame_id is 1-based
if ( event_time >= event_data->start_time ) {
Debug(2, "event time is after event start");
for (unsigned int i = 0; i < event_data->frame_count; i++ ) {
//Info( "eft %d > et %d", event_data->frames[i].timestamp, event_time );
if ( event_data->frames[i].timestamp >= event_time ) {
@ -231,7 +232,7 @@ bool EventStream::loadEventData(uint64_t event_id) {
event_data->frames[i-1].timestamp = last_timestamp + ((i-last_id)*frame_delta);
event_data->frames[i-1].offset = event_data->frames[i-1].timestamp - event_data->start_time;
event_data->frames[i-1].in_db = false;
Debug(4,"Frame %d timestamp:(%f), offset(%f) delta(%f), in_db(%d)",
Debug(3,"Frame %d timestamp:(%f), offset(%f) delta(%f), in_db(%d)",
i,
event_data->frames[i-1].timestamp,
event_data->frames[i-1].offset,
@ -664,11 +665,16 @@ Debug(1, "Loading image");
} else if ( ffmpeg_input ) {
// Get the frame from the mp4 input
Debug(1,"Getting frame from ffmpeg");
AVFrame *frame;
if ( curr_frame_id == 1 ) {
// Special case, first frame, we want to send the initial keyframe.
frame = ffmpeg_input->get_frame( ffmpeg_input->get_video_stream_id(), 0 );
}
FrameData *frame_data = &event_data->frames[curr_frame_id-1];
AVFrame *frame = ffmpeg_input->get_frame( ffmpeg_input->get_video_stream_id(), frame_data->offset );
frame = ffmpeg_input->get_frame( ffmpeg_input->get_video_stream_id(), frame_data->offset );
if ( frame ) {
image = new Image(frame);
av_frame_free(&frame);
//av_frame_free(&frame);
} else {
Error("Failed getting a frame.");
return false;

View File

@ -276,8 +276,8 @@ void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
if (flags & AVFMT_SHOW_IDS)
Debug(1, "[0x%x]", st->id);
if (lang)
Debug(1, "(%s)", lang->value);
Debug(1, ", frames:%d, timebase: %d/%d", st->nb_frames, st->time_base.num, st->time_base.den);
Debug(1, "language (%s)", lang->value);
Debug(1, "frames:%d, timebase: %d/%d", st->nb_frames, st->time_base.num, st->time_base.den);
avcodec_string(buf, sizeof(buf), st->codec, is_output);
Debug(1, ": %s", buf);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
@ -303,9 +303,6 @@ void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
int tbn = st->time_base.den && st->time_base.num;
int tbc = st->codec->time_base.den && st->codec->time_base.num;
if (fps || tbn || tbc)
Debug(3, "\n" );
if (fps)
zm_log_fps(av_q2d(st->avg_frame_rate), tbn || tbc ? "fps, " : "fps");
if (tbn)

View File

@ -10,6 +10,7 @@ FFmpeg_Input::FFmpeg_Input() {
av_register_all();
avcodec_register_all();
streams = NULL;
frame = NULL;
}
FFmpeg_Input::~FFmpeg_Input() {
@ -34,13 +35,16 @@ int FFmpeg_Input::Open( const char *filepath ) {
/** Get information on the input file (number of streams etc.). */
if ( (error = avformat_find_stream_info(input_format_context, NULL)) < 0 ) {
Error( "Could not open find stream info (error '%s')\n",
av_make_error_string(error).c_str() );
Error(
"Could not open find stream info (error '%s')",
av_make_error_string(error).c_str()
);
avformat_close_input(&input_format_context);
return error;
}
streams = new stream[input_format_context->nb_streams];
Debug(2,"Have %d streams", input_format_context->nb_streams);
for ( unsigned int i = 0; i < input_format_context->nb_streams; i += 1 ) {
if ( is_video_stream( input_format_context->streams[i] ) ) {
@ -53,10 +57,13 @@ int FFmpeg_Input::Open( const char *filepath ) {
}
} else if ( is_audio_stream( input_format_context->streams[i] ) ) {
if ( audio_stream_id == -1 ) {
Debug(2,"Audio stream is %d", i);
audio_stream_id = i;
} else {
Warning( "Have another audio stream." );
}
} else {
Warning("Unknown stream type");
}
streams[i].frame_count = 0;
@ -95,12 +102,11 @@ int FFmpeg_Input::Open( const char *filepath ) {
} // end int FFmpeg_Input::Open( const char * filepath )
AVFrame *FFmpeg_Input::get_frame( int stream_id ) {
Debug(1, "Getting frame from stream %d", stream_id );
Debug(1, "Getting frame from stream %d", stream_id);
int frameComplete = false;
AVPacket packet;
av_init_packet( &packet );
AVFrame *frame = zm_av_frame_alloc();
av_init_packet(&packet);
char errbuf[AV_ERROR_MAX_STRING_SIZE];
while ( !frameComplete ) {
@ -127,15 +133,13 @@ AVFrame *FFmpeg_Input::get_frame( int stream_id ) {
AVCodecContext *context = streams[packet.stream_index].context;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
ret = avcodec_send_packet( context, &packet );
if ( ret < 0 ) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to send packet at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
zm_av_packet_unref( &packet );
continue;
} else {
Debug(1, "Success getting a packet");
}
ret = avcodec_send_packet(context, &packet);
if ( ret < 0 ) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to send packet at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
zm_av_packet_unref( &packet );
continue;
}
#if HAVE_AVUTIL_HWCONTEXT_H
if ( hwaccel ) {
@ -150,17 +154,24 @@ AVFrame *FFmpeg_Input::get_frame( int stream_id ) {
if (ret < 0) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to transfer frame at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
zm_av_packet_unref( &packet );
zm_av_packet_unref(&packet);
continue;
}
} else {
#endif
Debug(1,"Getting frame %d", streams[packet.stream_index].frame_count);
ret = avcodec_receive_frame( context, frame );
if ( frame ) {
av_frame_free(&frame);
frame = zm_av_frame_alloc();
} else {
frame = zm_av_frame_alloc();
}
//Debug(1,"Getting frame %d", streams[packet.stream_index].frame_count);
ret = avcodec_receive_frame(context, frame);
if ( ret < 0 ) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to send packet at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
zm_av_packet_unref( &packet );
av_frame_free(&frame);
continue;
}
@ -170,11 +181,18 @@ AVFrame *FFmpeg_Input::get_frame( int stream_id ) {
frameComplete = 1;
# else
if ( frame ) {
av_frame_free(&frame);
frame = zm_av_frame_alloc();
} else {
frame = zm_av_frame_alloc();
}
ret = zm_avcodec_decode_video(context, frame, &frameComplete, &packet);
if ( ret < 0 ) {
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
Error( "Unable to decode frame at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
zm_av_packet_unref( &packet );
av_frame_free(&frame);
continue;
}
#endif
@ -190,15 +208,42 @@ AVFrame *FFmpeg_Input::get_frame( int stream_id ) {
AVFrame *FFmpeg_Input::get_frame( int stream_id, double at ) {
Debug(1, "Getting frame from stream %d at %f", stream_id, at);
int64_t seek_target = (int64_t)at * AV_TIME_BASE;
Debug(1, "Getting frame from stream %d at %" PRId64, stream_id, seek_target);
int64_t seek_target = (int64_t)(at * AV_TIME_BASE);
Debug(1, "Getting frame from stream %d at seektarget: %" PRId64, stream_id, seek_target);
seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q, input_format_context->streams[stream_id]->time_base);
Debug(1, "Getting frame from stream %d at %" PRId64, stream_id, seek_target);
if ( frame ) {
if ( (frame->pts + frame->pkt_duration) > seek_target ) {
// The current frame is still the valid picture.
Debug(2,"Returning previous frame which is still good");
return frame;
}
if ( frame->pts < seek_target ) {
Debug(2, "Frame pts %" PRId64 " pkt_pts %" PRId64 " duration %" PRId64, frame->pts, frame->pkt_pts, frame->pkt_duration);
while ( frame && (frame->pts < seek_target) ) {
if ( ! get_frame(stream_id) )
return frame;
}
return frame;
}
}
int ret;
if ( ( ret = av_seek_frame(input_format_context, stream_id, seek_target, 0/*FORWARDS*/) < 0 ) ) {
Error("Unable to seek in stream");
return NULL;
if ( frame ) {
if ( ( ret = av_seek_frame(input_format_context, stream_id, seek_target, AVSEEK_FLAG_ANY) < 0 ) ) {
Error("Unable to seek in stream");
return NULL;
}
} else {
// Must go for a keyframe
if ( ( ret = av_seek_frame(input_format_context, stream_id, seek_target,
AVSEEK_FLAG_FRAME
) < 0 ) ) {
Error("Unable to seek in stream");
return NULL;
}
}
return get_frame(stream_id);

View File

@ -41,6 +41,7 @@ class FFmpeg_Input {
int video_stream_id;
int audio_stream_id;
AVFormatContext *input_format_context;
AVFrame *frame;
};
#endif

View File

@ -1442,7 +1442,7 @@ bool Monitor::Analyse() {
//TODO: What happens is the event closes and sets recording to false then recording to true again so quickly that our capture daemon never picks it up. Maybe need a refresh flag?
if ( (!signal_change && signal) && (function == RECORD || function == MOCORD) ) {
if ( event ) {
Debug(3, "Detected new event at (%d.%d)", timestamp->tv_sec, timestamp->tv_usec);
Debug(3, "Have signal and recording with open event at (%d.%d)", timestamp->tv_sec, timestamp->tv_usec);
if ( section_length && ( timestamp->tv_sec >= section_length ) ) {
// TODO: Wouldn't this be clearer if we just did something like if now - event->start > section_length ?

View File

@ -87,27 +87,27 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
video_out_stream = avformat_new_stream(oc, NULL);
if ( !video_out_stream ) {
Error("Unable to create video out stream");
return;
} else {
Debug(2, "Success creating video out stream");
}
// Since we are not re-encoding, all we have to do is copy the parameters
video_out_ctx = avcodec_alloc_context3(NULL);
video_out_ctx = video_out_stream->codec;
//video_out_ctx = avcodec_alloc_context3(NULL);
// Copy params from instream to ctx
ret = avcodec_parameters_to_context(video_out_ctx,
video_in_stream->codecpar);
if (ret < 0) {
if ( ret < 0 ) {
Error("Could not initialize ctx parameteres");
return;
} else {
zm_dump_codec(video_out_ctx);
}
video_out_stream = avformat_new_stream(oc, NULL);
if (!video_out_stream) {
Error("Unable to create video out stream\n");
return;
} else {
Debug(2, "Success creating video out stream");
}
if ( !video_out_ctx->codec_tag ) {
video_out_ctx->codec_tag =
av_codec_get_tag(oc->oformat->codec_tag, video_in_ctx->codec_id);
@ -117,21 +117,46 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
// Now copy them to the out stream
ret = avcodec_parameters_from_context(video_out_stream->codecpar,
video_out_ctx);
if (ret < 0) {
Error("Could not initialize stream parameteres");
if ( ret < 0 ) {
Error("Could not initialize stream parameters");
return;
} else {
Debug(2, "Success setting parameters");
}
zm_dump_codecpar(video_in_stream->codecpar);
zm_dump_codecpar(video_out_stream->codecpar);
AVCodec *video_out_codec = avcodec_find_encoder( video_out_ctx->codec_id );
if ( !video_out_codec ) {
#if (LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 11, 0) && (LIBAVFORMAT_VERSION_MICRO >= 100))
Fatal( "Could not find encoder for '%s'", avcodec_get_name( video_out_ctx->codec_id ) );
#else
Fatal( "Could not find encoder for '%d'", video_out_ctx->codec_id );
#endif
}
AVDictionary *opts = 0;
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
Warning("Can't open video codec (%s) %s",
video_out_codec->name,
av_make_error_string(ret).c_str()
);
video_out_codec = NULL;
}
AVDictionaryEntry *e = NULL;
while ( (e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL ) {
Warning( "Encoder Option %s not recognized by ffmpeg codec", e->key);
}
#else
video_out_stream =
avformat_new_stream(oc, NULL);
//(AVCodec *)(video_in_ctx->codec));
//avformat_new_stream(oc,(const AVCodec *)(video_in_ctx->codec));
if ( !video_out_stream ) {
Fatal("Unable to create video out stream\n");
Fatal("Unable to create video out stream");
} else {
Debug(2, "Success creating video out stream");
}
@ -160,6 +185,7 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
// Just copy them from the in, no reason to choose different
video_out_ctx->time_base = video_in_ctx->time_base;
if ( ! (video_out_ctx->time_base.num && video_out_ctx->time_base.den) ) {
Debug(2,"No timebase found in video in context, defaulting to Q");
video_out_ctx->time_base = AV_TIME_BASE_Q;
}
video_out_stream->time_base = video_in_stream->time_base;
@ -182,17 +208,17 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
}
Monitor::Orientation orientation = monitor->getOrientation();
if (orientation) {
if (orientation == Monitor::ROTATE_0) {
} else if (orientation == Monitor::ROTATE_90) {
if ( orientation ) {
if ( orientation == Monitor::ROTATE_0 ) {
} else if ( orientation == Monitor::ROTATE_90 ) {
dsr = av_dict_set(&video_out_stream->metadata, "rotate", "90", 0);
if (dsr < 0) Warning("%s:%d: title set failed", __FILE__, __LINE__);
if ( dsr < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
} else if (orientation == Monitor::ROTATE_180) {
dsr = av_dict_set(&video_out_stream->metadata, "rotate", "180", 0);
if (dsr < 0) Warning("%s:%d: title set failed", __FILE__, __LINE__);
if ( dsr < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
} else if (orientation == Monitor::ROTATE_270) {
dsr = av_dict_set(&video_out_stream->metadata, "rotate", "270", 0);
if (dsr < 0) Warning("%s:%d: title set failed", __FILE__, __LINE__);
if ( dsr < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
} else {
Warning("Unsupported Orientation(%d)", orientation);
}
@ -209,7 +235,7 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
resample_ctx = NULL;
#endif
if (audio_in_stream) {
if ( audio_in_stream ) {
Debug(3, "Have audio stream");
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
@ -352,6 +378,14 @@ bool VideoStore::open() {
//avformat_free_context(oc);
return false;
}
Debug(3,
"Time bases: VIDEO in stream (%d/%d) in codec: (%d/%d) out "
"stream: (%d/%d) out codec (%d/%d)",
video_in_stream->time_base.num, video_in_stream->time_base.den,
video_in_ctx->time_base.num, video_in_ctx->time_base.den,
video_out_stream->time_base.num, video_out_stream->time_base.den,
video_out_ctx->time_base.num,
video_out_ctx->time_base.den);
return true;
} // end VideoStore::open()
@ -359,7 +393,7 @@ VideoStore::~VideoStore() {
if ( oc->pb ) {
if (audio_out_codec) {
if ( audio_out_codec ) {
// The codec queues data. We need to send a flush command and out
// whatever we get. Failures are not fatal.
AVPacket pkt;
@ -394,8 +428,8 @@ VideoStore::~VideoStore() {
break;
}
#endif
Debug(2, "writing flushed packet pts(%d) dts(%d) duration(%d)", pkt.pts,
pkt.dts, pkt.duration);
Debug(2, "writing flushed packet pts(%d) dts(%d) duration(%d)",
pkt.pts, pkt.dts, pkt.duration);
#if 0
if ( pkt.duration > 0 )
@ -450,7 +484,7 @@ VideoStore::~VideoStore() {
avcodec_close(video_out_ctx);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
avcodec_free_context(&video_out_ctx);
//avcodec_free_context(&video_out_ctx);
#endif
video_out_ctx = NULL;
Debug(4, "Success freeing video_out_ctx");
@ -780,7 +814,7 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) {
Debug(2, "Starting video first_pts will become %" PRId64, ipkt->pts);
video_first_pts = ipkt->pts;
} else {
if ( ipkt->pts < video_first_pts ) {
if ( 0 && ipkt->pts < video_first_pts ) {
Debug(1, "Resetting first_pts from %" PRId64 " to %" PRId64, video_last_pts, ipkt->pts);
video_first_pts -= video_last_pts;
// wrap around, need to figure out the distance FIXME having this wrong should cause a jump, but then play ok?
@ -805,7 +839,7 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) {
Debug(1, "Starting video first_dts will become (%" PRId64 ")", ipkt->dts);
video_first_dts = ipkt->dts;
} else {
if ( ipkt->dts < video_first_dts ) {
if ( 0 && ipkt->dts < video_first_dts ) {
Debug(1, "Resetting first_dts from (%" PRId64 ") to (%" PRId64")",
video_first_dts, ipkt->dts);
video_first_dts -= video_last_dts;