From 9d19e2587cda0fc21ce8b0144992885cad3f5625 Mon Sep 17 00:00:00 2001 From: Isaac Connor Date: Wed, 31 Aug 2016 16:58:41 -0400 Subject: [PATCH 1/2] fix should only write out the queue if we are starting a new event --- src/zm_ffmpeg_camera.cpp | 429 +++++++++++++++++++-------------------- 1 file changed, 214 insertions(+), 215 deletions(-) diff --git a/src/zm_ffmpeg_camera.cpp b/src/zm_ffmpeg_camera.cpp index 682f23c69..008a762b1 100644 --- a/src/zm_ffmpeg_camera.cpp +++ b/src/zm_ffmpeg_camera.cpp @@ -46,7 +46,7 @@ FfmpegCamera::FfmpegCamera( int p_id, const std::string &p_path, const std::stri { Initialise(); } - + mFormatContext = NULL; mVideoStreamId = -1; mAudioStreamId = -1; @@ -62,7 +62,7 @@ FfmpegCamera::FfmpegCamera( int p_id, const std::string &p_path, const std::stri mReopenThread = 0; wasRecording = false; videoStore = NULL; - + #if HAVE_LIBSWSCALE mConvertContext = NULL; #endif @@ -79,7 +79,7 @@ FfmpegCamera::FfmpegCamera( int p_id, const std::string &p_path, const std::stri } else { Panic("Unexpected colours: %d",colours); } - + } FfmpegCamera::~FfmpegCamera() @@ -100,6 +100,7 @@ void FfmpegCamera::Initialise() av_log_set_level( AV_LOG_QUIET ); av_register_all(); + avformat_network_init(); } void FfmpegCamera::Terminate() @@ -129,47 +130,36 @@ int FfmpegCamera::Capture( Image &image ) if (!mCanCapture){ return -1; } - + // If the reopen thread has a value, but mCanCapture != 0, then we have just reopened the connection to the ffmpeg device, and we can clean up the thread. if (mReopenThread != 0) { void *retval = 0; int ret; - + ret = pthread_join(mReopenThread, &retval); if (ret != 0){ Error("Could not join reopen thread."); } - + Info( "Successfully reopened stream." ); mReopenThread = 0; } AVPacket packet; - uint8_t* directbuffer; - - /* Request a writeable buffer of the target image */ - directbuffer = image.WriteBuffer(width, height, colours, subpixelorder); - if(directbuffer == NULL) { - Error("Failed requesting writeable buffer for the captured image."); - return (-1); - } - + int frameComplete = false; - while ( !frameComplete ) - { + while ( !frameComplete ) { int avResult = av_read_frame( mFormatContext, &packet ); - if ( avResult < 0 ) - { + if ( avResult < 0 ) { char errbuf[AV_ERROR_MAX_STRING_SIZE]; av_strerror(avResult, errbuf, AV_ERROR_MAX_STRING_SIZE); if ( - // Check if EOF. - (avResult == AVERROR_EOF || (mFormatContext->pb && mFormatContext->pb->eof_reached)) || - // Check for Connection failure. - (avResult == -110) - ) - { - Info( "av_read_frame returned \"%s\". Reopening stream.", errbuf); + // Check if EOF. + (avResult == AVERROR_EOF || (mFormatContext->pb && mFormatContext->pb->eof_reached)) || + // Check for Connection failure. + (avResult == -110) + ) { + Info( "av_read_frame returned \"%s\". Reopening stream.", errbuf ); ReopenFfmpeg(); } @@ -178,8 +168,7 @@ int FfmpegCamera::Capture( Image &image ) } Debug( 5, "Got packet from stream %d", packet.stream_index ); // What about audio stream? Maybe someday we could do sound detection... - if ( packet.stream_index == mVideoStreamId ) - { + if ( packet.stream_index == mVideoStreamId ) { #if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0) if ( avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet ) < 0 ) #else @@ -189,36 +178,45 @@ int FfmpegCamera::Capture( Image &image ) Debug( 4, "Decoded video packet at frame %d", frameCount ); - if ( frameComplete ) - { - Debug( 3, "Got frame %d", frameCount ); + if ( frameComplete ) { + Debug( 4, "Got frame %d", frameCount ); + + uint8_t* directbuffer; + + /* Request a writeable buffer of the target image */ + directbuffer = image.WriteBuffer(width, height, colours, subpixelorder); + if(directbuffer == NULL) { + Error("Failed requesting writeable buffer for the captured image."); + return (-1); + } + #if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0) av_image_fill_arrays(mFrame->data, mFrame->linesize, - directbuffer, imagePixFormat, width, height, 1); + directbuffer, imagePixFormat, width, height, 1); #else avpicture_fill( (AVPicture *)mFrame, directbuffer, - imagePixFormat, width, height); + imagePixFormat, width, height); #endif - -#if HAVE_LIBSWSCALE - if(mConvertContext == NULL) { - mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL ); - if(mConvertContext == NULL) - Fatal( "Unable to create conversion context for %s", mPath.c_str() ); - } - - if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 ) - Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount ); +#if HAVE_LIBSWSCALE + if(mConvertContext == NULL) { + mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL ); + + if(mConvertContext == NULL) + Fatal( "Unable to create conversion context for %s", mPath.c_str() ); + } + + if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 ) + Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount ); #else // HAVE_LIBSWSCALE - Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" ); + Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" ); #endif // HAVE_LIBSWSCALE - + frameCount++; - } // end if frameComplete - } else { - Debug( 4, "Different stream_index %d", packet.stream_index ); - } // end if packet.stream_index == mVideoStreamId + } // end if frameComplete + } else { + Debug( 4, "Different stream_index %d", packet.stream_index ); + } // end if packet.stream_index == mVideoStreamId #if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100) av_packet_unref( &packet); #else @@ -292,7 +290,7 @@ int FfmpegCamera::OpenFfmpeg() { Info( "Stream open %s", mPath.c_str() ); startTime=av_gettime();//FIXME here or after find_Stream_info - + //FIXME can speed up initial analysis but need sensible parameters... //mFormatContext->probesize = 32; //mFormatContext->max_analyze_duration = 32; @@ -301,7 +299,7 @@ int FfmpegCamera::OpenFfmpeg() { Debug ( 1, "Calling av_find_stream_info" ); if ( av_find_stream_info( mFormatContext ) < 0 ) #else - Debug ( 1, "Calling avformat_find_stream_info" ); + Debug ( 1, "Calling avformat_find_stream_info" ); if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 ) #endif Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) ); @@ -309,6 +307,7 @@ int FfmpegCamera::OpenFfmpeg() { Debug ( 1, "Got stream info" ); // Find first video stream present + // The one we want Might not be the first mVideoStreamId = -1; mAudioStreamId = -1; for (unsigned int i=0; i < mFormatContext->nb_streams; i++ ) @@ -316,37 +315,37 @@ int FfmpegCamera::OpenFfmpeg() { #if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0)) if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO ) #else - if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO ) + if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO ) #endif - { - if ( mVideoStreamId == -1 ) { - mVideoStreamId = i; - // if we break, then we won't find the audio stream - continue; - } else { - Debug(2, "Have another video stream." ); + { + if ( mVideoStreamId == -1 ) { + mVideoStreamId = i; + // if we break, then we won't find the audio stream + continue; + } else { + Debug(2, "Have another video stream." ); + } } - } #if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0)) if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO ) #else - if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO ) + if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO ) #endif - { - if ( mAudioStreamId == -1 ) { - mAudioStreamId = i; - } else { - Debug(2, "Have another audio stream." ); + { + if ( mAudioStreamId == -1 ) { + mAudioStreamId = i; + } else { + Debug(2, "Have another audio stream." ); + } } - - } } if ( mVideoStreamId == -1 ) Fatal( "Unable to locate video stream in %s", mPath.c_str() ); if ( mAudioStreamId == -1 ) Debug( 3, "Unable to locate audio stream in %s", mPath.c_str() ); - Debug ( 1, "Found video stream" ); + Debug ( 3, "Found video stream at index %d", mVideoStreamId ); + Debug ( 3, "Found audio stream at index %d", mAudioStreamId ); mCodecContext = mFormatContext->streams[mVideoStreamId]->codec; @@ -355,13 +354,15 @@ int FfmpegCamera::OpenFfmpeg() { Fatal( "Can't find codec for video stream from %s", mPath.c_str() ); Debug ( 1, "Found decoder" ); + zm_dump_stream_format( mFormatContext, mVideoStreamId, 0, 0 ); + zm_dump_stream_format( mFormatContext, mAudioStreamId, 0, 0 ); // Open the codec #if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0) Debug ( 1, "Calling avcodec_open" ); if ( avcodec_open( mCodecContext, mCodec ) < 0 ) #else - Debug ( 1, "Calling avcodec_open2" ); + Debug ( 1, "Calling avcodec_open2" ); if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 ) #endif Fatal( "Unable to open codec for video stream from %s", mPath.c_str() ); @@ -398,17 +399,17 @@ int FfmpegCamera::OpenFfmpeg() { } Debug ( 1, "Validated imagesize" ); - + #if HAVE_LIBSWSCALE Debug ( 1, "Calling sws_isSupportedInput" ); if(!sws_isSupportedInput(mCodecContext->pix_fmt)) { Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff)); } - + if(!sws_isSupportedOutput(imagePixFormat)) { Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff)); } - + #else // HAVE_LIBSWSCALE Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" ); #endif // HAVE_LIBSWSCALE @@ -444,7 +445,7 @@ int FfmpegCamera::CloseFfmpeg(){ av_freep( &mFrame ); av_freep( &mRawFrame ); #endif - + #if HAVE_LIBSWSCALE if ( mConvertContext ) { @@ -455,8 +456,8 @@ int FfmpegCamera::CloseFfmpeg(){ if ( mCodecContext ) { - avcodec_close( mCodecContext ); - mCodecContext = NULL; // Freed by av_close_input_file + avcodec_close( mCodecContext ); + mCodecContext = NULL; // Freed by av_close_input_file } if ( mFormatContext ) { @@ -509,37 +510,28 @@ void *FfmpegCamera::ReopenFfmpegThreadCallback(void *ctx){ } //Function to handle capture and store - -int FfmpegCamera::CaptureAndRecord(Image &image, bool recording, char* event_file) { +int FfmpegCamera::CaptureAndRecord( Image &image, bool recording, char* event_file ) { if (!mCanCapture){ return -1; } + int ret; // If the reopen thread has a value, but mCanCapture != 0, then we have just reopened the connection to the ffmpeg device, and we can clean up the thread. if (mReopenThread != 0) { void *retval = 0; - int ret; - + ret = pthread_join(mReopenThread, &retval); if (ret != 0){ Error("Could not join reopen thread."); } - + Info( "Successfully reopened stream." ); mReopenThread = 0; } AVPacket packet; - AVPacket queuedpacket; - uint8_t* directbuffer; + AVPacket queued_packet; - /* Request a writeable buffer of the target image */ - directbuffer = image.WriteBuffer(width, height, colours, subpixelorder); - if( directbuffer == NULL ) { - Error("Failed requesting writeable buffer for the captured image."); - return (-1); - } - if ( mCodecContext->codec_id != AV_CODEC_ID_H264 ) { Error( "Input stream is not h264. The stored event file may not be viewable in browser." ); } @@ -564,6 +556,85 @@ int FfmpegCamera::CaptureAndRecord(Image &image, bool recording, char* event_fil return( -1 ); } Debug( 5, "Got packet from stream %d", packet.stream_index ); + + //Video recording + if ( recording ) { + + // The directory we are recording to is no longer tied to the current event. + // Need to re-init the videostore with the correct directory and start recording again + // for efficiency's sake, we should test for keyframe before we test for directory change... + if ( videoStore && (packet.flags & AV_PKT_FLAG_KEY) && (strcmp(oldDirectory, event_file) != 0 ) ) { + // don't open new videostore until we're on a key frame..would this require an offset adjustment for the event as a result?... + // if we store our key frame location with the event will that be enough? + Info("Re-starting video storage module"); + delete videoStore; + videoStore = NULL; + } + + if ( ! videoStore ) { + //Instantiate the video storage module + + if (record_audio) { + if (mAudioStreamId == -1) { + Debug(3, "Record Audio on but no audio stream found"); + videoStore = new VideoStore((const char *) event_file, "mp4", + mFormatContext->streams[mVideoStreamId], + NULL, + startTime, + this->getMonitor()->getOrientation()); + + } else { + Debug(3, "Video module initiated with audio stream"); + videoStore = new VideoStore((const char *) event_file, "mp4", + mFormatContext->streams[mVideoStreamId], + mFormatContext->streams[mAudioStreamId], + startTime, + this->getMonitor()->getOrientation()); + } + } else { + Debug(3, "Record_audio is false so exclude audio stream"); + videoStore = new VideoStore((const char *) event_file, "mp4", + mFormatContext->streams[mVideoStreamId], + NULL, + startTime, + this->getMonitor()->getOrientation() ); + } + strcpy(oldDirectory, event_file); + + // Need to write out all the frames from the last keyframe? + unsigned int packet_count = 0; + while ( packetqueue.popPacket( &queued_packet ) ) { + packet_count += 1; + //Write the packet to our video store + if ( queued_packet.stream_index == mVideoStreamId ) { + ret = videoStore->writeVideoFramePacket(&queued_packet, mFormatContext->streams[mVideoStreamId]); + } else if ( queued_packet.stream_index == mAudioStreamId ) { + //ret = videoStore->writeAudioFramePacket(&queued_packet, mFormatContext->streams[mAudioStreamId]); + } else { + Warning("Unknown stream id in queued packet (%d)", queued_packet.stream_index ); + ret = -1; + } + if ( ret < 0 ) { + //Less than zero and we skipped a frame + //av_free_packet( &queued_packet ); + } + } // end while packets in the packetqueue + Debug(2, "Wrote %d queued packets", packet_count ); + } // end if ! wasRecording + } else { + if ( videoStore ) { + Info("Deleting videoStore instance"); + delete videoStore; + videoStore = NULL; + } + + //Buffer video packets + if ( packet.flags & AV_PKT_FLAG_KEY ) { + packetqueue.clearQueue(); + } + packetqueue.queuePacket(&packet); + } // end if + if ( packet.stream_index == mVideoStreamId ) { #if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0) if ( avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet ) < 0 ) @@ -572,123 +643,41 @@ int FfmpegCamera::CaptureAndRecord(Image &image, bool recording, char* event_fil #endif Fatal( "Unable to decode frame at frame %d", frameCount ); - Debug( 4, "Decoded video packet at frame %d", frameCount ); + Debug( 4, "Decoded video packet at frame %d", frameCount ); - if ( frameComplete ) { - Debug( 3, "Got frame %d", frameCount ); - - avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height); + if ( frameComplete ) { + Debug( 4, "Got frame %d", frameCount ); - //Buffer video packets - if (!recording) { - if(packet.flags & AV_PKT_FLAG_KEY) { - // packetqueue->clearQueues(); - } - // packetqueue->queueVideoPacket(&packet); + uint8_t* directbuffer; + + /* Request a writeable buffer of the target image */ + directbuffer = image.WriteBuffer(width, height, colours, subpixelorder); + if( directbuffer == NULL ) { + Error("Failed requesting writeable buffer for the captured image."); + av_free_packet( &packet ); + return (-1); + } + avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height); + + if ( videoStore && recording ) { + //Write the packet to our video store + int ret = videoStore->writeVideoFramePacket(&packet, mFormatContext->streams[mVideoStreamId]);//, &lastKeyframePkt); + if ( ret < 0 ) { //Less than zero and we skipped a frame + av_free_packet( &packet ); + return 0; } + } - //Video recording - if ( recording && !wasRecording ) { - //Instantiate the video storage module - - if (record_audio) { - if (mAudioStreamId == -1) { - Debug(3, "Record Audio on but no audio stream found"); - videoStore = new VideoStore((const char *) event_file, "mp4", - mFormatContext->streams[mVideoStreamId], - NULL, - startTime, - this->getMonitor()->getOrientation()); - - } else { - Debug(3, "Video module initiated with audio stream"); - videoStore = new VideoStore((const char *) event_file, "mp4", - mFormatContext->streams[mVideoStreamId], - mFormatContext->streams[mAudioStreamId], - startTime, - this->getMonitor()->getOrientation()); - } - } else { - Debug(3, "Record_audio is false so exclude audio stream"); - videoStore = new VideoStore((const char *) event_file, "mp4", - mFormatContext->streams[mVideoStreamId], - NULL, - startTime, - this->getMonitor()->getOrientation()); - } - wasRecording = true; - strcpy(oldDirectory, event_file); - // while (packetqueue->popVideoPacket(&queuedpacket)) { - // int ret = videoStore->writeVideoFramePacket(&packet, mFormatContext->streams[mVideoStreamId]); //, &lastKeyframePkt); - // if (ret < 0) {//Less than zero and we skipped a frame - // av_free_packet(&packet); - // return 0; - // } - // } - - } else if ( ( ! recording ) && wasRecording && videoStore ) { - Info("Deleting videoStore instance"); - delete videoStore; - videoStore = NULL; - } - - // The directory we are recording to is no longer tied to the current - // event. Need to re-init the videostore with the correct directory and - // start recording again - if (recording && wasRecording && (strcmp(oldDirectory, event_file) != 0) - && (packet.flags & AV_PKT_FLAG_KEY)) { - Info("Re-starting video storage module"); - if(videoStore){ - delete videoStore; - videoStore = NULL; - } - - if (record_audio) { - if (mAudioStreamId == -1) { - Debug(3, "Record Audio on but no audio stream found"); - videoStore = new VideoStore((const char *) event_file, "mp4", - mFormatContext->streams[mVideoStreamId], - NULL, - startTime, - this->getMonitor()->getOrientation()); - } else { - Debug(3, "Video module initiated with audio stream"); - videoStore = new VideoStore((const char *) event_file, "mp4", - mFormatContext->streams[mVideoStreamId], - mFormatContext->streams[mAudioStreamId], - startTime, - this->getMonitor()->getOrientation()); - } - } else { - Debug(3, "Record_audio is false so exclude audio stream"); - videoStore = new VideoStore((const char *) event_file, "mp4", - mFormatContext->streams[mVideoStreamId], - NULL, startTime, - this->getMonitor()->getOrientation()); - } - strcpy(oldDirectory, event_file); - } - - if ( videoStore && recording ) { - //Write the packet to our video store - int ret = videoStore->writeVideoFramePacket(&packet, - mFormatContext->streams[mVideoStreamId]); //, &lastKeyframePkt); - if(ret<0){//Less than zero and we skipped a frame - av_free_packet( &packet ); - return 0; - } - } - #if HAVE_LIBSWSCALE - if ( mConvertContext == NULL ) { + if ( mConvertContext == NULL ) { mConvertContext = sws_getContext(mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL); - if ( mConvertContext == NULL ) - Fatal( "Unable to create conversion context for %s", mPath.c_str() ); + if ( mConvertContext == NULL ) + Fatal( "Unable to create conversion context for %s", mPath.c_str() ); } if (sws_scale(mConvertContext, mRawFrame->data, mRawFrame->linesize, @@ -696,29 +685,39 @@ int FfmpegCamera::CaptureAndRecord(Image &image, bool recording, char* event_fil Fatal("Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount); #else // HAVE_LIBSWSCALE - Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" ); + Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" ); #endif // HAVE_LIBSWSCALE - frameCount++; - } // end if frameComplete - } else if ( packet.stream_index == mAudioStreamId ) { //FIXME best way to copy all other streams - if ( videoStore && recording ) { - if ( record_audio ) { - Debug(4, "Recording audio packet" ); - //Write the packet to our video store - int ret = videoStore->writeAudioFramePacket(&packet, - mFormatContext->streams[packet.stream_index]); //FIXME no relevance of last key frame - if ( ret < 0 ) {//Less than zero and we skipped a frame - av_free_packet( &packet ); - return 0; - } - } else { - Debug(4, "Not recording audio packet" ); + frameCount++; + } else { + Debug( 3, "Not framecomplete after av_read_frame" ); + } // end if frameComplete + } else if ( packet.stream_index == mAudioStreamId ) { //FIXME best way to copy all other streams + Debug( 4, "Audio stream index %d", packet.stream_index ); + if ( videoStore ) { + if ( record_audio ) { + Debug(3, "Recording audio packet streamindex(%d) packetstreamindex(%d)", mAudioStreamId, packet.stream_index ); + //Write the packet to our video store + //FIXME no relevance of last key frame + int ret = videoStore->writeAudioFramePacket( &packet, mFormatContext->streams[packet.stream_index] ); + if ( ret < 0 ) {//Less than zero and we skipped a frame + av_free_packet( &packet ); + return 0; } + } else { + Debug(4, "Not recording audio packet" ); } } - av_free_packet( &packet ); - } // end while ! frameComplete - return (frameCount); + } else { +#if LIBAVUTIL_VERSION_CHECK(54, 23, 0, 23, 0) + Debug( 3, "Some other stream index %d, %s", packet.stream_index, av_get_media_type_string( mFormatContext->streams[packet.stream_index]->codec->codec_type) ); +#else + Debug( 3, "Some other stream index %d", packet.stream_index ); +#endif + } + av_free_packet( &packet ); + } // end while ! frameComplete + return (frameCount); } + #endif // HAVE_LIBAVFORMAT From d5f60ad3f8b40fa237db1406c1341f4025f1a23d Mon Sep 17 00:00:00 2001 From: Isaac Connor Date: Wed, 31 Aug 2016 16:58:59 -0400 Subject: [PATCH 2/2] merge pertinent restructuring and packet queue worko --- src/zm_ffmpeg_camera.h | 87 ++++++++++--------- src/zm_packetqueue.cpp | 48 ++-------- src/zm_packetqueue.h | 14 +-- src/zm_videostore.cpp | 193 +++++++++++++++++++++++++---------------- src/zm_videostore.h | 28 +++--- 5 files changed, 188 insertions(+), 182 deletions(-) diff --git a/src/zm_ffmpeg_camera.h b/src/zm_ffmpeg_camera.h index cb762254a..32e6a848b 100644 --- a/src/zm_ffmpeg_camera.h +++ b/src/zm_ffmpeg_camera.h @@ -26,6 +26,7 @@ //#include "zm_utils.h" #include "zm_ffmpeg.h" #include "zm_videostore.h" +#include "zm_packetqueue.h" // // Class representing 'ffmpeg' cameras, i.e. those which are @@ -33,62 +34,64 @@ // class FfmpegCamera : public Camera { -protected: - std::string mPath; - std::string mMethod; - std::string mOptions; + protected: + std::string mPath; + std::string mMethod; + std::string mOptions; - int frameCount; + int frameCount; #if HAVE_LIBAVFORMAT - AVFormatContext *mFormatContext; - int mVideoStreamId; - int mAudioStreamId; - AVCodecContext *mCodecContext; - AVCodec *mCodec; - AVFrame *mRawFrame; - AVFrame *mFrame; - _AVPIXELFORMAT imagePixFormat; + AVFormatContext *mFormatContext; + int mVideoStreamId; + int mAudioStreamId; + AVCodecContext *mCodecContext; + AVCodec *mCodec; + AVFrame *mRawFrame; + AVFrame *mFrame; + _AVPIXELFORMAT imagePixFormat; - int OpenFfmpeg(); - int ReopenFfmpeg(); - int CloseFfmpeg(); - static int FfmpegInterruptCallback(void *ctx); - static void* ReopenFfmpegThreadCallback(void *ctx); - bool mIsOpening; - bool mCanCapture; - int mOpenStart; - pthread_t mReopenThread; + int OpenFfmpeg(); + int ReopenFfmpeg(); + int CloseFfmpeg(); + static int FfmpegInterruptCallback(void *ctx); + static void* ReopenFfmpegThreadCallback(void *ctx); + bool mIsOpening; + bool mCanCapture; + int mOpenStart; + pthread_t mReopenThread; #endif // HAVE_LIBAVFORMAT - bool wasRecording; - VideoStore *videoStore; - char oldDirectory[4096]; - zm_packetqueue packetqueue; - //AVPacket lastKeyframePkt; + bool wasRecording; + VideoStore *videoStore; + char oldDirectory[4096]; + zm_packetqueue packetqueue; + + // Last Key frame + //AVPacket lastKeyframePkt; #if HAVE_LIBSWSCALE - struct SwsContext *mConvertContext; + struct SwsContext *mConvertContext; #endif - int64_t startTime; + int64_t startTime; -public: - FfmpegCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ); - ~FfmpegCamera(); + public: + FfmpegCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ); + ~FfmpegCamera(); - const std::string &Path() const { return( mPath ); } - const std::string &Options() const { return( mOptions ); } - const std::string &Method() const { return( mMethod ); } + const std::string &Path() const { return( mPath ); } + const std::string &Options() const { return( mOptions ); } + const std::string &Method() const { return( mMethod ); } - void Initialise(); - void Terminate(); + void Initialise(); + void Terminate(); - int PrimeCapture(); - int PreCapture(); - int Capture( Image &image ); - int CaptureAndRecord( Image &image, bool recording, char* event_directory ); - int PostCapture(); + int PrimeCapture(); + int PreCapture(); + int Capture( Image &image ); + int CaptureAndRecord( Image &image, bool recording, char* event_directory ); + int PostCapture(); }; #endif // ZM_FFMPEG_CAMERA_H diff --git a/src/zm_packetqueue.cpp b/src/zm_packetqueue.cpp index f9459d112..fbf48613d 100644 --- a/src/zm_packetqueue.cpp +++ b/src/zm_packetqueue.cpp @@ -24,27 +24,18 @@ using namespace std; -zm_packetqueue::zm_packetqueue() - : MaxVideoQueueSize(VIDEO_QUEUESIZE) -, MaxAudioQueueSize(AUDIO_QUEUESIZE) { +zm_packetqueue::zm_packetqueue(){ + } zm_packetqueue::~zm_packetqueue() { + } -bool zm_packetqueue::queueVideoPacket(AVPacket* packet) { - return queuePacket(VideoQueue, packet); -} - -bool zm_packetqueue::queueAudioPacket(AVPacket* packet) -{ - return queuePacket(AudioQueue, packet); -} - -bool zm_packetqueue::queuePacket(queue& pktQueue, AVPacket* packet){ +bool zm_packetqueue::queuePacket( AVPacket* packet ) { AVPacket input_ref = { 0 }; - if (av_packet_ref(&input_ref, packet) < 0){ + if ( av_packet_ref(&input_ref, packet) < 0 ) { return false; } pktQueue.push(*packet); @@ -52,10 +43,8 @@ bool zm_packetqueue::queuePacket(queue& pktQueue, AVPacket* packet){ return true; } -bool zm_packetqueue::popPacket(queue& pktQueue, AVPacket* packet) -{ - if (pktQueue.empty()) - { +bool zm_packetqueue::popPacket( AVPacket* packet ) { + if ( pktQueue.empty() ) { return false; } @@ -65,27 +54,8 @@ bool zm_packetqueue::popPacket(queue& pktQueue, AVPacket* packet) return true; } -void zm_packetqueue::clearQueue(std::queue& pktQueue) -{ - while(!pktQueue.empty()) - { +void zm_packetqueue::clearQueue() { + while(!pktQueue.empty()) { pktQueue.pop(); } } - -void zm_packetqueue::clearQueues() -{ - clearQueue(VideoQueue); - clearQueue(AudioQueue); -} - -bool zm_packetqueue::popAudioPacket(AVPacket* packet) -{ - return popPacket(AudioQueue, packet); -} - -bool zm_packetqueue::popVideoPacket(AVPacket* packet) -{ - return popPacket(VideoQueue, packet); -} - diff --git a/src/zm_packetqueue.h b/src/zm_packetqueue.h index 7a016037f..f142dba45 100644 --- a/src/zm_packetqueue.h +++ b/src/zm_packetqueue.h @@ -33,19 +33,13 @@ class zm_packetqueue { public: zm_packetqueue(); virtual ~zm_packetqueue(); - bool queuePacket(std::queue& pktQueue, AVPacket* packet); - bool queueVideoPacket(AVPacket* packet); - bool queueAudioPacket(AVPacket* packet); - bool popPacket(std::queue& pktQueue, AVPacket* packet); + bool queuePacket( AVPacket* packet ); + bool popPacket( AVPacket* packet ); bool popVideoPacket(AVPacket* packet); bool popAudioPacket(AVPacket* packet); - void clearQueues(); - void clearQueue(std::queue& pktQueue); + void clearQueue( ); private: - int MaxVideoQueueSize; - int MaxAudioQueueSize; - std::queue VideoQueue; - std::queue AudioQueue; + std::queue pktQueue; }; diff --git a/src/zm_videostore.cpp b/src/zm_videostore.cpp index 45cd1d280..055c50a37 100644 --- a/src/zm_videostore.cpp +++ b/src/zm_videostore.cpp @@ -29,19 +29,16 @@ #include "zm_videostore.h" extern "C"{ -#include "libavutil/time.h" + #include "libavutil/time.h" } VideoStore::VideoStore(const char *filename_in, const char *format_in, - AVStream *input_st, - AVStream *inpaud_st, + AVStream *input_video_stream, + AVStream *input_audio_stream, int64_t nStartTime, Monitor::Orientation orientation ) { - AVDictionary *pmetadata = NULL; - int dsr; - //store inputs in variables local to class filename = filename_in; format = format_in; @@ -49,10 +46,10 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in, keyframeMessage = false; keyframeSkipNumber = 0; - Info("Opening video storage stream %s format: %d\n", filename, format); + Info("Opening video storage stream %s format: %s\n", filename, format); - //Init everything we need int ret; + //Init everything we need av_register_all(); ret = avformat_alloc_output_context2(&oc, NULL, NULL, filename); @@ -74,88 +71,122 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in, } } - dsr = av_dict_set(&pmetadata, "title", "Zoneminder Security Recording", 0); + AVDictionary *pmetadata = NULL; + int dsr = av_dict_set(&pmetadata, "title", "Zoneminder Security Recording", 0); if (dsr < 0) Warning("%s:%d: title set failed", __FILE__, __LINE__ ); oc->metadata = pmetadata; - fmt = oc->oformat; + output_format = oc->oformat; - video_st = avformat_new_stream(oc, input_st->codec->codec); - if (!video_st) { + video_stream = avformat_new_stream(oc, (AVCodec *)input_video_stream->codec->codec); + if (!video_stream) { Fatal("Unable to create video out stream\n"); } - ret = avcodec_copy_context(video_st->codec, input_st->codec); + ret = avcodec_copy_context(video_stream->codec, input_video_stream->codec); if (ret < 0) { - Fatal("Unable to copy input video context to output video context " - "%s\n", av_make_error_string(ret).c_str()); + Fatal("Unable to copy input video context to output video context %s\n", + av_make_error_string(ret).c_str()); } - if ( video_st->sample_aspect_ratio.den != video_st->codec->sample_aspect_ratio.den ) { + if ( input_video_stream->codec->sample_aspect_ratio.den && ( video_stream->sample_aspect_ratio.den != input_video_stream->codec->sample_aspect_ratio.den ) ) { + Warning("Fixing sample_aspect_ratio.den from (%d) to (%d)", video_stream->sample_aspect_ratio.den, input_video_stream->codec->sample_aspect_ratio.den ); + video_stream->sample_aspect_ratio.den = input_video_stream->codec->sample_aspect_ratio.den; + } else { + Debug(3, "aspect ratio denominator is (%d)", video_stream->sample_aspect_ratio.den ); + } + if ( input_video_stream->codec->sample_aspect_ratio.num && ( video_stream->sample_aspect_ratio.num != input_video_stream->codec->sample_aspect_ratio.num ) ) { + Warning("Fixing sample_aspect_ratio.num from video_stream(%d) to input_video_stream(%d)", video_stream->sample_aspect_ratio.num, input_video_stream->codec->sample_aspect_ratio.num ); + video_stream->sample_aspect_ratio.num = input_video_stream->codec->sample_aspect_ratio.num; + } else { + Debug(3, "aspect ratio numerator is (%d)", video_stream->sample_aspect_ratio.num ); + } + if ( video_stream->codec->codec_id != input_video_stream->codec->codec_id ) { + Warning("Fixing video_stream->codec->codec_id"); + video_stream->codec->codec_id = input_video_stream->codec->codec_id; + } + if ( ! video_stream->codec->time_base.num ) { + Warning("video_stream->codec->time_base.num is not set%d/%d. Fixing by setting it to 1", video_stream->codec->time_base.num, video_stream->codec->time_base.den); + Warning("video_stream->codec->time_base.num is not set%d/%d. Fixing by setting it to 1", video_stream->time_base.num, video_stream->time_base.den); + video_stream->codec->time_base.num = video_stream->time_base.num; + video_stream->codec->time_base.den = video_stream->time_base.den; + } + + if ( video_stream->sample_aspect_ratio.den != video_stream->codec->sample_aspect_ratio.den ) { Warning("Fixingample_aspect_ratio.den"); - video_st->sample_aspect_ratio.den = video_st->codec->sample_aspect_ratio.den; + video_stream->sample_aspect_ratio.den = video_stream->codec->sample_aspect_ratio.den; } - if ( video_st->sample_aspect_ratio.num != input_st->codec->sample_aspect_ratio.num ) { + if ( video_stream->sample_aspect_ratio.num != input_video_stream->codec->sample_aspect_ratio.num ) { Warning("Fixingample_aspect_ratio.num"); - video_st->sample_aspect_ratio.num = input_st->codec->sample_aspect_ratio.num; + video_stream->sample_aspect_ratio.num = input_video_stream->codec->sample_aspect_ratio.num; } - if ( video_st->codec->codec_id != input_st->codec->codec_id ) { - Warning("Fixing video_st->codec->codec_id"); - video_st->codec->codec_id = input_st->codec->codec_id; + if ( video_stream->codec->codec_id != input_video_stream->codec->codec_id ) { + Warning("Fixing video_stream->codec->codec_id"); + video_stream->codec->codec_id = input_video_stream->codec->codec_id; } - if ( ! video_st->codec->time_base.num ) { - Warning("video_st->codec->time_base.num is not set%d/%d. Fixing by setting it to 1", video_st->codec->time_base.num, video_st->codec->time_base.den); - Warning("video_st->codec->time_base.num is not set%d/%d. Fixing by setting it to 1", video_st->time_base.num, video_st->time_base.den); - video_st->codec->time_base.num = video_st->time_base.num; - video_st->codec->time_base.den = video_st->time_base.den; + if ( ! video_stream->codec->time_base.num ) { + Warning("video_stream->codec->time_base.num is not set%d/%d. Fixing by setting it to 1", video_stream->codec->time_base.num, video_stream->codec->time_base.den); + Warning("video_stream->codec->time_base.num is not set%d/%d. Fixing by setting it to 1", video_stream->time_base.num, video_stream->time_base.den); + video_stream->codec->time_base.num = video_stream->time_base.num; + video_stream->codec->time_base.den = video_stream->time_base.den; } - video_st->codec->codec_tag = 0; + video_stream->codec->codec_tag = 0; if (oc->oformat->flags & AVFMT_GLOBALHEADER) { - video_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; + video_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } if ( orientation ) { if ( orientation == Monitor::ROTATE_0 ) { } else if ( orientation == Monitor::ROTATE_90 ) { - dsr = av_dict_set( &video_st->metadata, "rotate", "90", 0); + dsr = av_dict_set( &video_stream->metadata, "rotate", "90", 0); if (dsr < 0) Warning("%s:%d: title set failed", __FILE__, __LINE__ ); } else if ( orientation == Monitor::ROTATE_180 ) { - dsr = av_dict_set( &video_st->metadata, "rotate", "180", 0); + dsr = av_dict_set( &video_stream->metadata, "rotate", "180", 0); if (dsr < 0) Warning("%s:%d: title set failed", __FILE__, __LINE__ ); } else if ( orientation == Monitor::ROTATE_270 ) { - dsr = av_dict_set( &video_st->metadata, "rotate", "270", 0); + dsr = av_dict_set( &video_stream->metadata, "rotate", "270", 0); if (dsr < 0) Warning("%s:%d: title set failed", __FILE__, __LINE__ ); } else { Warning( "Unsupported Orientation(%d)", orientation ); } } + if (input_audio_stream) { - if (inpaud_st) { - audio_st = avformat_new_stream(oc, inpaud_st->codec->codec); - if (!audio_st) { - Error("Unable to create audio out stream\n"); - audio_st = NULL; + if ( input_audio_stream->codec->codec_id != AV_CODEC_ID_AAC ) { + Warning("Can't transcode to AAC at this time"); + audio_stream = NULL; } else { - ret = avcodec_copy_context(audio_st->codec, inpaud_st->codec); + Debug(3, "Got something other than AAC (%d)", input_audio_stream->codec->codec_id ); + + audio_stream = avformat_new_stream(oc, (AVCodec *)input_audio_stream->codec->codec); + if (!audio_stream) { + Error("Unable to create audio out stream\n"); + audio_stream = NULL; + } + ret = avcodec_copy_context(audio_stream->codec, input_audio_stream->codec); if (ret < 0) { Fatal("Unable to copy audio context %s\n", av_make_error_string(ret).c_str()); } - audio_st->codec->codec_tag = 0; - if (oc->oformat->flags & AVFMT_GLOBALHEADER) { - audio_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; + audio_stream->codec->codec_tag = 0; + if ( audio_stream->codec->channels > 1 ) { + Warning("Audio isn't mono, changing it."); + audio_stream->codec->channels = 1; } - } + if (oc->oformat->flags & AVFMT_GLOBALHEADER) { + audio_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; + } + } // end if is AAC } else { Debug(3, "No Audio output stream"); - audio_st = NULL; + audio_stream = NULL; } /* open the output file, if needed */ - if (!(fmt->flags & AVFMT_NOFILE)) { + if (!(output_format->flags & AVFMT_NOFILE)) { ret = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,NULL,NULL); if (ret < 0) { Fatal("Could not open output file '%s': %s\n", filename, @@ -200,15 +231,15 @@ VideoStore::~VideoStore(){ // I wonder if we should be closing the file first. // I also wonder if we really need to be doing all the context allocation/de-allocation constantly, or whether we can just re-use it. Just do a file open/close/writeheader/etc. // What if we were only doing audio recording? - if ( video_st ) { - avcodec_close(video_st->codec); + if ( video_stream ) { + avcodec_close(video_stream->codec); } - if (audio_st) { - avcodec_close(audio_st->codec); + if (audio_stream) { + avcodec_close(audio_stream->codec); } // WHen will be not using a file ? - if (!(fmt->flags & AVFMT_NOFILE)) { + if (!(output_format->flags & AVFMT_NOFILE)) { /* Close the output file. */ if ( int rc = avio_close(oc->pb) ) { Error("Error closing avio %s", av_err2str( rc ) ); @@ -238,35 +269,42 @@ void VideoStore::dumpPacket( AVPacket *pkt ){ Info("%s:%d:DEBUG: %s", __FILE__, __LINE__, b); } -int VideoStore::writeVideoFramePacket(AVPacket *ipkt, AVStream *input_st){//, AVPacket *lastKeyframePkt){ +int VideoStore::writeVideoFramePacket(AVPacket *ipkt, AVStream *input_video_stream){//, AVPacket *lastKeyframePkt){ - Debug(3, "before ost_tbcket %d", startTime ); - zm_dump_stream_format( oc, ipkt->stream_index, 0, 1 ); - Debug(3, "before ost_tbcket %d", startTime ); - int64_t ost_tb_start_time = av_rescale_q(startTime, AV_TIME_BASE_Q, video_st->time_base); + Debug(2, "writeVideoFrame"); + Debug(3, "before ost_tbcket starttime %d, timebase%d", startTime, video_stream->time_base ); + //zm_dump_stream_format( oc, ipkt->stream_index, 0, 1 ); + Debug(2, "writeVideoFrame %x", video_stream); + int64_t ost_tb_start_time = av_rescale_q(startTime, AV_TIME_BASE_Q, video_stream->time_base); + Debug(3, "before ost_tbcket starttime %d, ost_tbcket %d", startTime, ost_tb_start_time ); + Debug(2, "writeVideoFrame"); AVPacket opkt, safepkt; AVPicture pict; + Debug(2, "writeVideoFrame init_packet"); av_init_packet(&opkt); //Scale the PTS of the outgoing packet to be the correct time base if (ipkt->pts != AV_NOPTS_VALUE) { - opkt.pts = av_rescale_q(ipkt->pts-startPts, input_st->time_base, video_st->time_base) - ost_tb_start_time; + opkt.pts = av_rescale_q(ipkt->pts-startPts, input_video_stream->time_base, video_stream->time_base) - ost_tb_start_time; + Debug(3, "opkt.pts = %d from ipkt->pts(%d) - startPts(%d), input->time_base(%d) video_stream->time-base(%d)", opkt.pts, ipkt->pts, startPts, input_video_stream->time_base, video_stream->time_base ); } else { opkt.pts = AV_NOPTS_VALUE; } //Scale the DTS of the outgoing packet to be the correct time base if(ipkt->dts == AV_NOPTS_VALUE) { - opkt.dts = av_rescale_q(input_st->cur_dts-startDts, AV_TIME_BASE_Q, video_st->time_base); + opkt.dts = av_rescale_q(input_video_stream->cur_dts-startDts, AV_TIME_BASE_Q, video_stream->time_base); + Debug(3, "opkt.dts = %d from input_video_stream->cur_dts(%d) - startDts(%d), video_stream->time-base(%d)", opkt.dts, input_video_stream->cur_dts, startDts, video_stream->time_base ); } else { - opkt.dts = av_rescale_q(ipkt->dts-startDts, input_st->time_base, video_st->time_base); + opkt.dts = av_rescale_q(ipkt->dts-startDts, input_video_stream->time_base, video_stream->time_base); + Debug(3, "opkt.dts = %d from ipkt->dts(%d) - startDts(%d), video_stream->time-base(%d)", opkt.dts, ipkt->dts, startDts, video_stream->time_base ); } opkt.dts -= ost_tb_start_time; - opkt.duration = av_rescale_q(ipkt->duration, input_st->time_base, video_st->time_base); + opkt.duration = av_rescale_q(ipkt->duration, input_video_stream->time_base, video_stream->time_base); opkt.flags = ipkt->flags; opkt.pos=-1; @@ -275,9 +313,9 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt, AVStream *input_st){//, AV opkt.stream_index = ipkt->stream_index; /*opkt.flags |= AV_PKT_FLAG_KEY;*/ - if (video_st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (fmt->flags & AVFMT_RAWPICTURE)) { + if (video_stream->codec->codec_type == AVMEDIA_TYPE_VIDEO && (output_format->flags & AVFMT_RAWPICTURE)) { /* store AVPicture in AVPacket, as expected by the output format */ - avpicture_fill(&pict, opkt.data, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height); + avpicture_fill(&pict, opkt.data, video_stream->codec->pix_fmt, video_stream->codec->width, video_stream->codec->height); opkt.data = (uint8_t *)&pict; opkt.size = sizeof(AVPicture); opkt.flags |= AV_PKT_FLAG_KEY; @@ -289,7 +327,7 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt, AVStream *input_st){//, AV Warning("%s:%d: Mangled AVPacket: discarding frame", __FILE__, __LINE__ ); dumpPacket(&opkt); - } else if ((prevDts > 0) && (prevDts >= opkt.dts)) { + } else if ((prevDts > 0) && (prevDts > opkt.dts)) { Warning("%s:%d: DTS out of order: %lld \u226E %lld; discarding frame", __FILE__, __LINE__, prevDts, opkt.dts); prevDts = opkt.dts; dumpPacket(&opkt); @@ -309,31 +347,32 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt, AVStream *input_st){//, AV av_free_packet(&opkt); return 0; + } -int VideoStore::writeAudioFramePacket(AVPacket *ipkt, AVStream *input_st){ +int VideoStore::writeAudioFramePacket(AVPacket *ipkt, AVStream *input_video_stream){ + Debug(2, "writeAudioFrame"); - if(!audio_st) { - Error("Called writeAudioFramePacket when no audio_st"); - return -1;//FIXME -ve return codes do not free packet in ffmpeg_camera at the moment + if(!audio_stream) { + Error("Called writeAudioFramePacket when no audio_stream"); + return 0;//FIXME -ve return codes do not free packet in ffmpeg_camera at the moment } /*if(!keyframeMessage) return -1;*/ //zm_dump_stream_format( oc, ipkt->stream_index, 0, 1 ); // What is this doing? Getting the time of the start of this video chunk? Does that actually make sense? - int64_t ost_tb_start_time = av_rescale_q(startTime, AV_TIME_BASE_Q, audio_st->time_base); + int64_t ost_tb_start_time = av_rescale_q(startTime, AV_TIME_BASE_Q, audio_stream->time_base); AVPacket opkt; av_init_packet(&opkt); Debug(3, "after init packet" ); - //Scale the PTS of the outgoing packet to be the correct time base if (ipkt->pts != AV_NOPTS_VALUE) { Debug(3, "Rescaling output pts"); - opkt.pts = av_rescale_q(ipkt->pts-startPts, input_st->time_base, audio_st->time_base) - ost_tb_start_time; + opkt.pts = av_rescale_q(ipkt->pts-startPts, input_video_stream->time_base, audio_stream->time_base) - ost_tb_start_time; } else { Debug(3, "Setting output pts to AV_NOPTS_VALUE"); opkt.pts = AV_NOPTS_VALUE; @@ -342,30 +381,30 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt, AVStream *input_st){ //Scale the DTS of the outgoing packet to be the correct time base if(ipkt->dts == AV_NOPTS_VALUE) { Debug(4, "ipkt->dts == AV_NOPTS_VALUE %d to %d", AV_NOPTS_VALUE, opkt.dts ); - opkt.dts = av_rescale_q(input_st->cur_dts-startDts, AV_TIME_BASE_Q, audio_st->time_base); + opkt.dts = av_rescale_q(input_video_stream->cur_dts-startDts, AV_TIME_BASE_Q, audio_stream->time_base); Debug(4, "ipkt->dts == AV_NOPTS_VALUE %d to %d", AV_NOPTS_VALUE, opkt.dts ); } else { Debug(4, "ipkt->dts != AV_NOPTS_VALUE %d to %d", AV_NOPTS_VALUE, opkt.dts ); - opkt.dts = av_rescale_q(ipkt->dts-startDts, input_st->time_base, audio_st->time_base); + opkt.dts = av_rescale_q(ipkt->dts-startDts, input_video_stream->time_base, audio_stream->time_base); Debug(4, "ipkt->dts != AV_NOPTS_VALUE %d to %d", AV_NOPTS_VALUE, opkt.dts ); } opkt.dts -= ost_tb_start_time; // Seems like it would be really weird for the codec type to NOT be audiu - if (audio_st->codec->codec_type == AVMEDIA_TYPE_AUDIO && ipkt->dts != AV_NOPTS_VALUE) { + if (audio_stream->codec->codec_type == AVMEDIA_TYPE_AUDIO && ipkt->dts != AV_NOPTS_VALUE) { Debug( 4, "code is audio, dts != AV_NOPTS_VALUE " ); - int duration = av_get_audio_frame_duration(input_st->codec, ipkt->size); + int duration = av_get_audio_frame_duration(input_video_stream->codec, ipkt->size); if(!duration) - duration = input_st->codec->frame_size; + duration = input_video_stream->codec->frame_size; //FIXME where to get filter_in_rescale_delta_last //FIXME av_rescale_delta doesn't exist in ubuntu vivid libavtools - opkt.dts = opkt.pts = av_rescale_delta(input_st->time_base, ipkt->dts, - (AVRational){1, input_st->codec->sample_rate}, duration, &filter_in_rescale_delta_last, - audio_st->time_base) - ost_tb_start_time; + opkt.dts = opkt.pts = av_rescale_delta(input_video_stream->time_base, ipkt->dts, + (AVRational){1, input_video_stream->codec->sample_rate}, duration, &filter_in_rescale_delta_last, + audio_stream->time_base) - ost_tb_start_time; } - opkt.duration = av_rescale_q(ipkt->duration, input_st->time_base, audio_st->time_base); + opkt.duration = av_rescale_q(ipkt->duration, input_video_stream->time_base, audio_stream->time_base); opkt.pos=-1; opkt.flags = ipkt->flags; diff --git a/src/zm_videostore.h b/src/zm_videostore.h index a11973b4a..0ef51cd1c 100644 --- a/src/zm_videostore.h +++ b/src/zm_videostore.h @@ -10,30 +10,30 @@ class VideoStore { private: - AVOutputFormat *fmt; + AVOutputFormat *output_format; AVFormatContext *oc; - AVStream *video_st; - AVStream *audio_st; + AVStream *video_stream; + AVStream *audio_stream; const char *filename; const char *format; - bool keyframeMessage; - int keyframeSkipNumber; + bool keyframeMessage; + int keyframeSkipNumber; - int64_t startTime; - int64_t startPts; - int64_t startDts; - int64_t prevDts; - int64_t filter_in_rescale_delta_last; + int64_t startTime; + int64_t startPts; + int64_t startDts; + int64_t prevDts; + int64_t filter_in_rescale_delta_last; public: - VideoStore(const char *filename_in, const char *format_in, AVStream *input_st, AVStream *inpaud_st, int64_t nStartTime, Monitor::Orientation p_orientation ); + VideoStore(const char *filename_in, const char *format_in, AVStream *input_video_stream, AVStream *input_audio_stream, int64_t nStartTime, Monitor::Orientation p_orientation ); ~VideoStore(); - int writeVideoFramePacket(AVPacket *pkt, AVStream *input_st);//, AVPacket *lastKeyframePkt); - int writeAudioFramePacket(AVPacket *pkt, AVStream *input_st); - void dumpPacket( AVPacket *pkt ); + int writeVideoFramePacket(AVPacket *pkt, AVStream *input_st);//, AVPacket *lastKeyframePkt); + int writeAudioFramePacket(AVPacket *pkt, AVStream *input_st); + void dumpPacket( AVPacket *pkt ); }; /*