From d1416322561730c47000bf9a6acbcbc8128a29c2 Mon Sep 17 00:00:00 2001 From: Isaac Connor Date: Wed, 11 Sep 2019 09:18:03 -0400 Subject: [PATCH 1/3] general clean of onvif probe view. Use buttons instead of inputs and use data-on-change-this instead of inline js. Also rename username and password to Username and Password to not conflict with authentication to ZM ui. --- web/skins/classic/views/js/onvifprobe.js | 10 ++-- web/skins/classic/views/onvifprobe.php | 69 ++++++++++-------------- 2 files changed, 32 insertions(+), 47 deletions(-) diff --git a/web/skins/classic/views/js/onvifprobe.js b/web/skins/classic/views/js/onvifprobe.js index b49ac5754..cfe0939d0 100644 --- a/web/skins/classic/views/js/onvifprobe.js +++ b/web/skins/classic/views/js/onvifprobe.js @@ -22,14 +22,14 @@ function gotoStep2( element ) { form.submit(); } -function configureButtons( element ) { +function configureButtons(element) { var form = element.form; - if (form.elements.namedItem("nextBtn")) { + if (form.elements.namedItem('nextBtn')) { form.nextBtn.disabled = (form.probe.selectedIndex==0) || - (form.username == "") || (form.username == null) || - (form.password == "") || (form.password == null); + (form.Username == '') || (form.Username == null) || + (form.Password == '') || (form.Password == null); } - if (form.elements.namedItem("saveBtn")) { + if (form.elements.namedItem('saveBtn')) { form.saveBtn.disabled = (form.probe.selectedIndex==0); } } diff --git a/web/skins/classic/views/onvifprobe.php b/web/skins/classic/views/onvifprobe.php index e1c4c8ef7..8c5feb50b 100644 --- a/web/skins/classic/views/onvifprobe.php +++ b/web/skins/classic/views/onvifprobe.php @@ -41,18 +41,18 @@ function execONVIF( $cmd ) { Please the following command from a command line for more information:

$shell_command" ); } else { - ZM\Logger::Debug( "Results from probe: " . implode( '
', $output ) ); + ZM\Logger::Debug('Results from probe: '.implode('
', $output)); } return $output; } -function probeCameras( $localIp ) { +function probeCameras($localIp) { $cameras = array(); - if ( $lines = @execONVIF( 'probe' ) ) { + if ( $lines = @execONVIF('probe') ) { foreach ( $lines as $line ) { - $line = rtrim( $line ); - if ( preg_match( '|^(.+),(.+),\s\((.*)\)$|', $line, $matches ) ) { + $line = rtrim($line); + if ( preg_match('|^(.+),(.+),\s\((.*)\)$|', $line, $matches) ) { $device_ep = $matches[1]; $soapversion = $matches[2]; $camera = array( @@ -65,7 +65,7 @@ function probeCameras( $localIp ) { ), ); foreach ( preg_split('|,\s*|', $matches[3]) as $attr_val ) { - if ( preg_match( '|(.+)=\'(.*)\'|', $attr_val, $tokens ) ) { + if ( preg_match('|(.+)=\'(.*)\'|', $attr_val, $tokens) ) { if ( $tokens[1] == 'hardware' ) { $camera['model'] = $tokens[2]; } elseif ( $tokens[1] == 'name' ) { @@ -84,7 +84,7 @@ function probeCameras( $localIp ) { return $cameras; } // end function probeCameras -function probeProfiles( $device_ep, $soapversion, $username, $password ) { +function probeProfiles($device_ep, $soapversion, $username, $password) { $profiles = array(); if ( $lines = @execONVIF("profiles $device_ep $soapversion $username $password") ) { foreach ( $lines as $line ) { @@ -94,7 +94,7 @@ function probeProfiles( $device_ep, $soapversion, $username, $password ) { // add user@pass to URI if ( preg_match('|^(\S+://)(.+)$|', $stream_uri, $tokens) ) { $stream_uri = $tokens[1].$username.':'.$password.'@'.$tokens[2]; - } + } $profile = array( # 'monitor' part of camera 'Type' => 'Ffmpeg', @@ -125,8 +125,8 @@ xhtmlHeaders(__FILE__, translate('MonitorProbe') ); if ( !isset($_REQUEST['step']) || ($_REQUEST['step'] == '1') ) { $monitors = array(); - foreach ( dbFetchAll("SELECT Id, Name, Host FROM Monitors WHERE Type = 'Remote' ORDER BY Host") as $monitor ) { - if ( preg_match( '/^(.+)@(.+)$/', $monitor['Host'], $matches ) ) { + foreach ( dbFetchAll("SELECT Id, Name, Host FROM Monitors WHERE Type='Remote' ORDER BY Host") as $monitor ) { + if ( preg_match('/^(.+)@(.+)$/', $monitor['Host'], $matches) ) { //echo "1: ".$matches[2]." = ".gethostbyname($matches[2])."
"; $monitors[gethostbyname($matches[2])] = $monitor; } else { @@ -137,26 +137,12 @@ if ( !isset($_REQUEST['step']) || ($_REQUEST['step'] == '1') ) { $detcameras = probeCameras(''); foreach ( $detcameras as $camera ) { - if ( preg_match( '|([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)|', $camera['monitor']['Host'], $matches ) ) { + if ( preg_match('|([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)|', $camera['monitor']['Host'], $matches) ) { $ip = $matches[1]; } $host = $ip; -/* - if ( isset($monitors[$ip]) ) - { - $monitor = $monitors[$ip]; - $sourceString .= " (".$monitor['Name'].")"; - } - else - { - $sourceString .= " - ".translate('Available'); - } - $cameras[$sourceDesc] = $sourceString; - } -*/ -// $sourceDesc = htmlspecialchars(serialize($camera['monitor'])); $sourceDesc = base64_encode(json_encode($camera['monitor'])); - $sourceString = $camera['model'].' @ '.$host . ' using version ' . $camera['monitor']['SOAP'] ; + $sourceString = $camera['model'].' @ '.$host.' using version '.$camera['monitor']['SOAP']; $cameras[$sourceDesc] = $sourceString; } @@ -179,39 +165,38 @@ if ( !isset($_REQUEST['step']) || ($_REQUEST['step'] == '1') ) {

- 'configureButtons(this)')); ?> + 'configureButtons')); ?>

- - + +

- - + +

- - + +
- $value ) { if ( isset($value) ) { $monitor[$name] = $value; @@ -221,7 +206,7 @@ if ( !isset($_REQUEST['step']) || ($_REQUEST['step'] == '1') ) { //print $monitor['Host'].", ".$_REQUEST['username'].", ".$_REQUEST['password']."
"; - $detprofiles = probeProfiles($monitor['Host'], $monitor['SOAP'], $_REQUEST['username'], $_REQUEST['password']); + $detprofiles = probeProfiles($monitor['Host'], $monitor['SOAP'], $_REQUEST['Username'], $_REQUEST['Password']); foreach ( $detprofiles as $profile ) { $monitor = $camera['monitor']; @@ -258,18 +243,18 @@ if ( !isset($_REQUEST['step']) || ($_REQUEST['step'] == '1') ) {

- 'configureButtons(this)')); ?> + 'configureButtons')); ?>

- - - + + +
- + From 8fb55cdbff6fa94d0144493a0af06dd23e0970e6 Mon Sep 17 00:00:00 2001 From: Isaac Connor Date: Thu, 12 Sep 2019 16:51:33 -0400 Subject: [PATCH 2/3] Add codec name to zm_dump_codec and zm_dump_codecpar add new functions zm_receive_packet --- src/zm_ffmpeg.cpp | 41 ++++++++++++++++++++++++++++++++++++++--- src/zm_ffmpeg.h | 3 +++ 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/src/zm_ffmpeg.cpp b/src/zm_ffmpeg.cpp index bd8643335..d9f62a9e1 100644 --- a/src/zm_ffmpeg.cpp +++ b/src/zm_ffmpeg.cpp @@ -289,9 +289,10 @@ static void zm_log_fps(double d, const char *postfix) { #if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0) void zm_dump_codecpar ( const AVCodecParameters *par ) { - Debug(1, "Dumping codecpar codec_type(%d) codec_id(%d) codec_tag(%d) width(%d) height(%d) bit_rate(%d) format(%d = %s)", + Debug(1, "Dumping codecpar codec_type(%d) codec_id(%d %s) codec_tag(%d) width(%d) height(%d) bit_rate(%d) format(%d = %s)", par->codec_type, par->codec_id, + avcodec_get_name(par->codec_id), par->codec_tag, par->width, par->height, @@ -303,10 +304,11 @@ void zm_dump_codecpar ( const AVCodecParameters *par ) { #endif void zm_dump_codec(const AVCodecContext *codec) { - Debug(1, "Dumping codec_context codec_type(%d) codec_id(%d) width(%d) height(%d) timebase(%d/%d) format(%s)\n" + Debug(1, "Dumping codec_context codec_type(%d) codec_id(%d %s) width(%d) height(%d) timebase(%d/%d) format(%s) " "gop_size %d max_b_frames %d me_cmp %d me_range %d qmin %d qmax %d", codec->codec_type, codec->codec_id, + avcodec_get_name(codec->codec_id), codec->width, codec->height, codec->time_base.num, @@ -327,7 +329,6 @@ void zm_dump_codec(const AVCodecContext *codec) { /* "user interface" functions */ void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) { - char buf[256]; Debug(1, "Dumping stream index i(%d) index(%d)", i, index ); int flags = (is_output ? ic->oformat->flags : ic->iformat->flags); AVStream *st = ic->streams[i]; @@ -350,8 +351,14 @@ void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) st->codec_info_nb_frames, codec->frame_size, st->time_base.num, st->time_base.den ); + +#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0) + Debug(1, "codec: %s", avcodec_get_name(st->codecpar->codec_id)); +#else + char buf[256]; avcodec_string(buf, sizeof(buf), st->codec, is_output); Debug(1, "codec: %s", buf); +#endif if ( st->sample_aspect_ratio.num && // default av_cmp_q(st->sample_aspect_ratio, codec->sample_aspect_ratio) @@ -477,6 +484,27 @@ bool is_audio_context( AVCodecContext *codec_context ) { #endif } +int zm_receive_packet(AVCodecContext *context, AVPacket &packet) { +#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0) + int ret = avcodec_receive_packet(context, &packet); + if ( ret < 0 ) { + if ( AVERROR_EOF != ret ) { + Error("Error encoding (%d) (%s)", ret, + av_err2str(ret)); + } + return 0; + } + return 1; +#else + int got_packet = 0; + int ret = avcodec_encode_audio2(context, &packet, NULL, &got_packet); + if ( ret < 0 ) { + Error("Error encoding (%d) (%s)", ret, av_err2str(ret)); + } + return got_packet; +#endif +} // end int zm_receive_packet(AVCodecContext *context, AVPacket &packet) + int zm_receive_frame(AVCodecContext *context, AVFrame *frame, AVPacket &packet) { int ret; #if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0) @@ -600,3 +628,10 @@ void dumpPacket(AVPacket *pkt, const char *text) { pkt->duration); Debug(2, "%s:%d:%s: %s", __FILE__, __LINE__, text, b); } + +void zm_packet_copy_rescale_ts(const AVPacket *ipkt, AVPacket *opkt, const AVRational src_tb, const AVRational dst_tb) { + opkt->pts = ipkt->pts; + opkt->dts = ipkt->dts; + opkt->duration = ipkt->duration; + av_packet_rescale_ts(opkt, src_tb, dst_tb); +} diff --git a/src/zm_ffmpeg.h b/src/zm_ffmpeg.h index eac8fe3e3..f23666568 100644 --- a/src/zm_ffmpeg.h +++ b/src/zm_ffmpeg.h @@ -352,9 +352,12 @@ bool is_audio_stream(AVStream *); bool is_video_context(AVCodec *); bool is_audio_context(AVCodec *); +int zm_receive_packet(AVCodecContext *context, AVPacket &packet); + int zm_receive_frame(AVCodecContext *context, AVFrame *frame, AVPacket &packet); int zm_send_frame(AVCodecContext *context, AVFrame *frame, AVPacket &packet); void dumpPacket(AVStream *, AVPacket *,const char *text=""); void dumpPacket(AVPacket *,const char *text=""); +void zm_packet_copy_rescale_ts(const AVPacket *ipkt, AVPacket *opkt, const AVRational src_tb, const AVRational dst_tb); #endif // ZM_FFMPEG_H From 0a8aba2b7694c96c9348d32edad63297da077e10 Mon Sep 17 00:00:00 2001 From: Isaac Connor Date: Thu, 12 Sep 2019 16:52:59 -0400 Subject: [PATCH 3/3] rework how pts/dts is handled. We still use video_first_pts, but if the result is negative, normalize it to 0. Also now use audio_next_pts/dts to calculate pts when resampling and re-encoding. I believe this resolves the issues with wrong duration in the audio stream. --- src/zm_videostore.cpp | 339 ++++++++++++++---------------------------- 1 file changed, 110 insertions(+), 229 deletions(-) diff --git a/src/zm_videostore.cpp b/src/zm_videostore.cpp index eda382077..d7b1fe980 100644 --- a/src/zm_videostore.cpp +++ b/src/zm_videostore.cpp @@ -193,18 +193,6 @@ VideoStore::VideoStore( ); video_out_stream->r_frame_rate = video_in_stream->r_frame_rate; } -#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0) - ret = avcodec_parameters_from_context(video_out_stream->codecpar, video_out_ctx); - if ( ret < 0 ) { - Error("Could not initialize video_out_ctx parameters"); - return; - } else { - zm_dump_codec(video_out_ctx); - } - - zm_dump_codecpar(video_in_stream->codecpar); - zm_dump_codecpar(video_out_stream->codecpar); -#endif Debug(3, "Time bases: VIDEO in stream (%d/%d) in codec: (%d/%d) out " "stream: (%d/%d) out codec (%d/%d)", @@ -222,6 +210,10 @@ VideoStore::VideoStore( } #if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0) + /* I'm not entirely sure that this is a good idea. We may have to do it someday but really only when transcoding + * * think what I was trying to achieve here was to have zm_dump_codecpar output nice info + * */ +#if 0 AVDictionary *opts = 0; if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) { Warning("Can't open video codec (%s) %s", @@ -235,6 +227,24 @@ VideoStore::VideoStore( while ( (e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL ) { Warning("Encoder Option %s not recognized by ffmpeg codec", e->key); } + ret = avcodec_parameters_from_context(video_out_stream->codecpar, video_out_ctx); + if ( ret < 0 ) { + Error("Could not initialize video_out_ctx parameters"); + return; + } else { + zm_dump_codec(video_out_ctx); + } +#else + ret = avcodec_parameters_from_context(video_out_stream->codecpar, video_in_ctx); + if ( ret < 0 ) { + Error("Could not initialize video_out_ctx parameters"); + return; + } else { + zm_dump_codec(video_out_ctx); + } +#endif + zm_dump_codecpar(video_in_stream->codecpar); + zm_dump_codecpar(video_out_stream->codecpar); #endif Monitor::Orientation orientation = monitor->getOrientation(); @@ -274,6 +284,7 @@ VideoStore::VideoStore( audio_first_pts = 0; audio_first_dts = 0; + /* When encoding audio, these are used to tell us what the correct pts is, because it gets lost during resampling. */ audio_next_pts = 0; audio_next_dts = 0; @@ -419,9 +430,9 @@ bool VideoStore::open() { VideoStore::~VideoStore() { if ( oc->pb ) { - int ret; if ( audio_out_codec ) { + // The codec queues data. We need to send a flush command and out // whatever we get. Failures are not fatal. AVPacket pkt; @@ -429,99 +440,53 @@ VideoStore::~VideoStore() { pkt.data = NULL; pkt.size = 0; av_init_packet(&pkt); + + /* + * At the end of the file, we pass the remaining samples to + * the encoder. */ + int frame_size = audio_out_ctx->frame_size; + while ( av_audio_fifo_size(fifo) > 0 ) { + /* Take one frame worth of audio samples from the FIFO buffer, + * encode it and write it to the output file. */ + + Debug(1, "Remaining samples in fifo for AAC codec frame_size %d > fifo size %d", + frame_size, av_audio_fifo_size(fifo)); + + // SHould probably set the frame size to what is reported FIXME + if ( av_audio_fifo_read(fifo, (void **)out_frame->data, frame_size) ) { + + av_init_packet(&opkt); + if ( zm_send_frame(audio_out_ctx, out_frame, opkt) ) { + if ( zm_receive_packet(audio_out_ctx, pkt) ) + pkt.stream_index = audio_out_stream->index; + + pkt.duration = av_rescale_q( + pkt.duration, + audio_out_ctx->time_base, + audio_out_stream->time_base); + } + } // end if data returned from fifo + + } // end while still data in the fifo + #if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0) // Put encoder into flushing mode avcodec_send_frame(audio_out_ctx, NULL); #endif while (1) { -#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0) - ret = avcodec_receive_packet(audio_out_ctx, &pkt); - if ( ret < 0 ) { - if ( AVERROR_EOF != ret ) { - Error("Error encoding audio while flushing (%d) (%s)", ret, - av_err2str(ret)); - } + if ( ! zm_receive_packet(audio_out_ctx, pkt) ) break; - } -#else - int got_packet = 0; - ret = avcodec_encode_audio2(audio_out_ctx, &pkt, NULL, &got_packet); - if ( ret < 0 ) { - Error("Error encoding audio while flushing (%d) (%s)", ret, - av_err2str(ret)); - break; - } - Debug(1, "Have audio encoder, need to flush it's out"); - if ( !got_packet ) { - break; - } -#endif dumpPacket(&pkt, "raw from encoder"); - // Need to adjust pts and dts and duration - pkt.stream_index = audio_out_stream->index; - pkt.duration = av_rescale_q( - pkt.duration, - audio_out_ctx->time_base, - audio_out_stream->time_base); - // Scale the PTS of the outgoing packet to be the correct time base - if ( pkt.pts != AV_NOPTS_VALUE ) { -#if 0 - pkt.pts = av_rescale_q( - pkt.pts, - audio_out_ctx->time_base, - audio_in_stream->time_base); - // audio_first_pts is in audio_in_stream time base - pkt.pts -= audio_first_pts; - pkt.pts = av_rescale_q( - pkt.pts, - audio_in_stream->time_base, - audio_out_stream->time_base); -#else - pkt.pts = av_rescale_q( - pkt.pts, - audio_out_ctx->time_base, - audio_out_stream->time_base); -#endif - - Debug(2, "audio pkt.pts = %" PRId64 " from first_pts(%" PRId64 ")", - pkt.pts, audio_first_pts); - } else { - Debug(2, "pkt.pts = undef"); - pkt.pts = AV_NOPTS_VALUE; - } - - if ( pkt.dts != AV_NOPTS_VALUE ) { -#if 0 - pkt.dts = av_rescale_q( - pkt.dts, - audio_out_ctx->time_base, - audio_in_stream->time_base); - pkt.dts -= audio_first_dts; - pkt.dts = av_rescale_q( - pkt.dts, - audio_in_stream->time_base, - audio_out_stream->time_base); -#else - pkt.dts = av_rescale_q( - pkt.dts, - audio_out_ctx->time_base, - audio_out_stream->time_base); -#endif - Debug(2, "pkt.dts = %" PRId64 " - first_dts(%" PRId64 ")", - pkt.dts, audio_first_dts); - } else { - pkt.dts = AV_NOPTS_VALUE; - } - + av_packet_rescale_ts(&pkt, audio_out_ctx->time_base, audio_out_stream->time_base); dumpPacket(audio_out_stream, &pkt, "writing flushed packet"); av_interleaved_write_frame(oc, &pkt); zm_av_packet_unref(&pkt); - } // while have buffered frames - } // end if audio_out_codec + } // while have buffered frames + } // end if audio_out_codec // Flush Queues Debug(1,"Flushing interleaved queues"); @@ -711,7 +676,7 @@ bool VideoStore::setup_resampler() { } } if ( found ) { - Debug(3, "Sample rate is good"); + Debug(3, "Sample rate is good %d", audio_out_ctx->sample_rate); } else { audio_out_ctx->sample_rate = audio_out_codec->supported_samplerates[0]; @@ -743,6 +708,7 @@ bool VideoStore::setup_resampler() { audio_out_stream = NULL; return false; } + zm_dump_codec(audio_out_ctx); #if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0) ret = avcodec_parameters_from_context( @@ -752,6 +718,7 @@ bool VideoStore::setup_resampler() { return false; } #endif + zm_dump_codecpar(audio_out_stream->codecpar); Debug(3, "Time bases: AUDIO in stream (%d/%d) in codec: (%d/%d) out " @@ -923,6 +890,7 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) { ); if ( duration <= 0 ) { // Why are we setting the duration to 1? + Warning("Duration is 0, setting to 1"); duration = ipkt->duration ? ipkt->duration : av_rescale_q(1,video_in_stream->time_base, video_out_stream->time_base); } } @@ -936,17 +904,6 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) { opkt.pts = 0; Debug(2, "Starting video first_pts will become %" PRId64, ipkt->pts); video_first_pts = ipkt->pts; -#if 1 - if ( audio_in_stream ) { - // Since audio starts after the start of the video, need to set this here. - audio_first_pts = av_rescale_q( - ipkt->pts, - video_in_stream->time_base, - audio_in_stream->time_base - ); - Debug(2, "Starting audio first_pts will become %" PRId64, audio_first_pts); - } -#endif } else { opkt.pts = av_rescale_q( ipkt->pts - video_first_pts, @@ -960,8 +917,6 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) { } else { Debug(3, "opkt.pts = undef"); opkt.pts = AV_NOPTS_VALUE; -// can't set 0, it will get rejected - //AV_NOPTS_VALUE; } // Just because the in stream wraps, doesn't mean the out needs to. // Really, if we are limiting ourselves to 10min segments I can't imagine every wrapping in the out. @@ -974,17 +929,6 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) { opkt.dts = 0; Debug(2, "Starting video first_dts will become (%" PRId64 ")", ipkt->dts); video_first_dts = ipkt->dts; -#if 1 - if ( audio_in_stream ) { - // Since audio starts after the start of the video, need to set this here. - audio_first_dts = av_rescale_q( - ipkt->dts, - video_in_stream->time_base, - audio_in_stream->time_base - ); - Debug(2, "Starting audio first dts will become %" PRId64, audio_first_dts); - } -#endif } else { opkt.dts = av_rescale_q( ipkt->dts - video_first_dts, @@ -1035,6 +979,40 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) { } dumpPacket(audio_in_stream, ipkt, "input packet"); + if ( !audio_first_pts ) { + // Since audio starts after the start of the video, need to set this here. + audio_first_pts = av_rescale_q( + video_first_pts, + video_in_stream->time_base, + audio_in_stream->time_base + ); + Debug(2, "Starting audio first_pts will become %" PRId64, audio_first_pts); + audio_next_pts = ipkt->pts - audio_first_pts; + if ( audio_next_pts < 0 ) { + audio_first_pts -= audio_next_pts; + audio_next_pts = 0; + } + } + if ( !audio_first_dts ) { + // Since audio starts after the start of the video, need to set this here. + audio_first_dts = av_rescale_q( + video_first_dts, + video_in_stream->time_base, + audio_in_stream->time_base + ); + audio_next_dts = ipkt->dts - audio_first_dts; + if ( ipkt->dts < 0 ) { + audio_first_dts -= ipkt->dts; + ipkt->dts = 0; + } + Debug(2, "Starting audio first_dts will become %" PRId64, audio_first_dts); + } + + // Need to adjust pts before feeding to decoder.... should really copy the pkt instead of modifying it + ipkt->pts -= audio_first_pts; + ipkt->dts -= audio_first_dts; + dumpPacket(audio_in_stream, ipkt, "after pts adjustment"); + if ( audio_out_codec ) { if ( ( ret = zm_receive_frame(audio_in_ctx, in_frame, *ipkt) ) < 0 ) { Debug(3, "Not ready to receive frame"); @@ -1042,140 +1020,31 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) { } zm_dump_frame(in_frame, "In frame from decode"); - if ( in_frame->pts != AV_NOPTS_VALUE ) { - if ( !audio_first_pts ) { - audio_first_pts = in_frame->pts; - Debug(1, "No audio_first_pts setting to %" PRId64, audio_first_pts); - in_frame->pts = 0; - } else { - // out_frame_pts is in codec->timebase, audio_first_pts is in packet timebase. - in_frame->pts = in_frame->pts - audio_first_pts; - zm_dump_frame(in_frame, "in frame after pts adjustment"); - } - } else { - // sending AV_NOPTS_VALUE doesn't really work but we seem to get it in ffmpeg 2.8 - in_frame->pts = audio_next_pts; - } - if ( !resample_audio() ) { //av_frame_unref(in_frame); return 0; } zm_dump_frame(out_frame, "Out frame after resample"); -#if 0 - // out_frame pts is in the input pkt pts... needs to be adjusted before sending to the encoder - if ( out_frame->pts != AV_NOPTS_VALUE ) { - if ( !audio_first_pts ) { - audio_first_pts = out_frame->pts; - Debug(1, "No audio_first_pts setting to %" PRId64, audio_first_pts); - out_frame->pts = 0; - } else { - // out_frame_pts is in codec->timebase, audio_first_pts is in packet timebase. - out_frame->pts = out_frame->pts - audio_first_pts; - zm_dump_frame(out_frame, "Out frame after pts adjustment"); - } - // - } else { - // sending AV_NOPTS_VALUE doesn't really work but we seem to get it in ffmpeg 2.8 - out_frame->pts = audio_next_pts; - } - audio_next_pts = out_frame->pts + out_frame->nb_samples; -#endif av_init_packet(&opkt); if ( !zm_send_frame(audio_out_ctx, out_frame, opkt) ) { return 0; } - dumpPacket(audio_out_stream, &opkt, "raw opkt"); - Debug(1, "Duration before %d in %d/%d", opkt.duration, - audio_out_ctx->time_base.num, - audio_out_ctx->time_base.den); - - opkt.duration = av_rescale_q( - opkt.duration, - audio_out_ctx->time_base, - audio_out_stream->time_base); - Debug(1, "Duration after %d in %d/%d", opkt.duration, - audio_out_stream->time_base.num, - audio_out_stream->time_base.den); // Scale the PTS of the outgoing packet to be the correct time base -#if 0 - if ( ipkt->pts != AV_NOPTS_VALUE ) { - if ( !audio_first_pts ) { - opkt.pts = 0; - audio_first_pts = ipkt->pts; - Debug(1, "No audio_first_pts"); - } else { - opkt.pts = av_rescale_q( - opkt.pts, + av_packet_rescale_ts(&opkt, audio_out_ctx->time_base, audio_out_stream->time_base); - opkt.pts -= audio_first_pts; - Debug(2, "audio opkt.pts = %" PRId64 " from first_pts %" PRId64, - opkt.pts, audio_first_pts); - } - } else { - Debug(2, "opkt.pts = undef"); - opkt.pts = AV_NOPTS_VALUE; - } - if ( opkt.dts != AV_NOPTS_VALUE ) { - if ( !audio_first_dts ) { - opkt.dts = 0; - audio_first_dts = opkt.dts; - } else { - opkt.dts = av_rescale_q( - opkt.dts, - audio_out_ctx->time_base, - audio_out_stream->time_base); - opkt.dts -= audio_first_dts; - Debug(2, "audio opkt.dts = %" PRId64 " from first_dts %" PRId64, - opkt.dts, audio_first_dts); - } - audio_last_dts = opkt.dts; - } else { - opkt.dts = AV_NOPTS_VALUE; - } -#else - opkt.pts = av_rescale_q( - opkt.pts, - audio_out_ctx->time_base, - audio_out_stream->time_base); - opkt.dts = av_rescale_q( - opkt.dts, - audio_out_ctx->time_base, - audio_out_stream->time_base); -#endif - - write_packet(&opkt, audio_out_stream); + write_packet(&opkt, audio_out_stream); zm_av_packet_unref(&opkt); #if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0) // While the encoder still has packets for us while ( !avcodec_receive_packet(audio_out_ctx, &opkt) ) { - opkt.pts = av_rescale_q( - opkt.pts, - audio_out_ctx->time_base, - audio_out_stream->time_base); - opkt.dts = av_rescale_q( - opkt.dts, - audio_out_ctx->time_base, - audio_out_stream->time_base); - + av_packet_rescale_ts(&opkt, audio_out_ctx->time_base, audio_out_stream->time_base); dumpPacket(audio_out_stream, &opkt, "raw opkt"); - Debug(1, "Duration before %d in %d/%d", opkt.duration, - audio_out_ctx->time_base.num, - audio_out_ctx->time_base.den); - - opkt.duration = av_rescale_q( - opkt.duration, - audio_out_ctx->time_base, - audio_out_stream->time_base); - Debug(1, "Duration after %d in %d/%d", opkt.duration, - audio_out_stream->time_base.num, - audio_out_stream->time_base.den); write_packet(&opkt, audio_out_stream); } #endif @@ -1188,6 +1057,7 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) { opkt.size = ipkt->size; opkt.flags = ipkt->flags; +#if 0 if ( ipkt->duration && (ipkt->duration != AV_NOPTS_VALUE) ) { opkt.duration = av_rescale_q( ipkt->duration, @@ -1229,6 +1099,12 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) { } else { opkt.dts = AV_NOPTS_VALUE; } +#else + opkt.duration = ipkt->duration; + opkt.pts = ipkt->pts; + opkt.dts = ipkt->dts; + av_packet_rescale_ts(&opkt, audio_in_stream->time_base, audio_out_stream->time_base); +#endif write_packet(&opkt, audio_out_stream); zm_av_packet_unref(&opkt); @@ -1318,12 +1194,17 @@ int VideoStore::resample_audio() { zm_dump_frame(out_frame, "Out frame after fifo read"); // resampling changes the duration because the timebase is 1/samples // I think we should be dealing in codec timebases not stream +#if 0 if ( in_frame->pts != AV_NOPTS_VALUE ) { out_frame->pts = av_rescale_q( in_frame->pts, audio_in_ctx->time_base, audio_out_ctx->time_base); } +#else + out_frame->pts = audio_next_pts; + audio_next_pts += out_frame->nb_samples; +#endif zm_dump_frame(out_frame, "Out frame after timestamp conversion"); #else #if defined(HAVE_LIBAVRESAMPLE)