cleanups, code style, free resample_context
This commit is contained in:
parent
8f6007bb49
commit
bd22d72e18
|
@ -63,10 +63,6 @@ FfmpegCamera::FfmpegCamera( int p_id, const std::string &p_path, const std::stri
|
||||||
mOpenStart = 0;
|
mOpenStart = 0;
|
||||||
mReopenThread = 0;
|
mReopenThread = 0;
|
||||||
videoStore = NULL;
|
videoStore = NULL;
|
||||||
audio_last_pts = 0;
|
|
||||||
audio_last_dts = 0;
|
|
||||||
video_last_pts = 0;
|
|
||||||
video_last_dts = 0;
|
|
||||||
|
|
||||||
#if HAVE_LIBSWSCALE
|
#if HAVE_LIBSWSCALE
|
||||||
mConvertContext = NULL;
|
mConvertContext = NULL;
|
||||||
|
@ -313,14 +309,12 @@ int FfmpegCamera::OpenFfmpeg() {
|
||||||
// The one we want Might not be the first
|
// The one we want Might not be the first
|
||||||
mVideoStreamId = -1;
|
mVideoStreamId = -1;
|
||||||
mAudioStreamId = -1;
|
mAudioStreamId = -1;
|
||||||
for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
|
for (unsigned int i=0; i < mFormatContext->nb_streams; i++ ) {
|
||||||
{
|
|
||||||
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
|
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
|
||||||
if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
|
if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO ) {
|
||||||
#else
|
#else
|
||||||
if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
|
if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO ) {
|
||||||
#endif
|
#endif
|
||||||
{
|
|
||||||
if ( mVideoStreamId == -1 ) {
|
if ( mVideoStreamId == -1 ) {
|
||||||
mVideoStreamId = i;
|
mVideoStreamId = i;
|
||||||
// if we break, then we won't find the audio stream
|
// if we break, then we won't find the audio stream
|
||||||
|
@ -330,11 +324,10 @@ int FfmpegCamera::OpenFfmpeg() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
|
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
|
||||||
if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
|
if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO ) {
|
||||||
#else
|
#else
|
||||||
if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO )
|
if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO ) {
|
||||||
#endif
|
#endif
|
||||||
{
|
|
||||||
if ( mAudioStreamId == -1 ) {
|
if ( mAudioStreamId == -1 ) {
|
||||||
mAudioStreamId = i;
|
mAudioStreamId = i;
|
||||||
} else {
|
} else {
|
||||||
|
@ -423,6 +416,14 @@ int FfmpegCamera::OpenFfmpeg() {
|
||||||
Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
|
Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mConvertContext = sws_getContext(mVideoCodecContext->width,
|
||||||
|
mVideoCodecContext->height,
|
||||||
|
mVideoCodecContext->pix_fmt,
|
||||||
|
width, height,
|
||||||
|
imagePixFormat, SWS_BICUBIC, NULL,
|
||||||
|
NULL, NULL);
|
||||||
|
if ( mConvertContext == NULL )
|
||||||
|
Fatal( "Unable to create conversion context for %s", mPath.c_str() );
|
||||||
#else // HAVE_LIBSWSCALE
|
#else // HAVE_LIBSWSCALE
|
||||||
Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
|
Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
|
||||||
#endif // HAVE_LIBSWSCALE
|
#endif // HAVE_LIBSWSCALE
|
||||||
|
@ -476,8 +477,7 @@ int FfmpegCamera::CloseFfmpeg(){
|
||||||
mAudioCodecContext = NULL; // Freed by av_close_input_file
|
mAudioCodecContext = NULL; // Freed by av_close_input_file
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( mFormatContext )
|
if ( mFormatContext ) {
|
||||||
{
|
|
||||||
#if !LIBAVFORMAT_VERSION_CHECK(53, 17, 0, 25, 0)
|
#if !LIBAVFORMAT_VERSION_CHECK(53, 17, 0, 25, 0)
|
||||||
av_close_input_file( mFormatContext );
|
av_close_input_file( mFormatContext );
|
||||||
#else
|
#else
|
||||||
|
@ -690,7 +690,7 @@ Debug(5, "After av_read_frame (%d)", ret );
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Debug(3, "about to decode video" );
|
Debug(4, "about to decode video" );
|
||||||
ret = zm_avcodec_decode_video( mVideoCodecContext, mRawFrame, &frameComplete, &packet );
|
ret = zm_avcodec_decode_video( mVideoCodecContext, mRawFrame, &frameComplete, &packet );
|
||||||
if ( ret < 0 ) {
|
if ( ret < 0 ) {
|
||||||
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
|
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
|
||||||
|
@ -715,25 +715,11 @@ Debug(5, "After av_read_frame (%d)", ret );
|
||||||
}
|
}
|
||||||
avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
|
avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
|
||||||
|
|
||||||
#if HAVE_LIBSWSCALE
|
|
||||||
if ( mConvertContext == NULL ) {
|
|
||||||
mConvertContext = sws_getContext(mVideoCodecContext->width,
|
|
||||||
mVideoCodecContext->height,
|
|
||||||
mVideoCodecContext->pix_fmt,
|
|
||||||
width, height,
|
|
||||||
imagePixFormat, SWS_BICUBIC, NULL,
|
|
||||||
NULL, NULL);
|
|
||||||
if ( mConvertContext == NULL )
|
|
||||||
Fatal( "Unable to create conversion context for %s", mPath.c_str() );
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sws_scale(mConvertContext, mRawFrame->data, mRawFrame->linesize,
|
if (sws_scale(mConvertContext, mRawFrame->data, mRawFrame->linesize,
|
||||||
0, mVideoCodecContext->height, mFrame->data, mFrame->linesize) < 0)
|
0, mVideoCodecContext->height, mFrame->data, mFrame->linesize) < 0) {
|
||||||
Fatal("Unable to convert raw format %u to target format %u at frame %d",
|
Fatal("Unable to convert raw format %u to target format %u at frame %d",
|
||||||
mVideoCodecContext->pix_fmt, imagePixFormat, frameCount);
|
mVideoCodecContext->pix_fmt, imagePixFormat, frameCount);
|
||||||
#else // HAVE_LIBSWSCALE
|
}
|
||||||
Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
|
|
||||||
#endif // HAVE_LIBSWSCALE
|
|
||||||
|
|
||||||
frameCount++;
|
frameCount++;
|
||||||
} else {
|
} else {
|
||||||
|
@ -769,6 +755,6 @@ Debug(5, "After av_read_frame (%d)", ret );
|
||||||
//}
|
//}
|
||||||
} // end while ! frameComplete
|
} // end while ! frameComplete
|
||||||
return (frameCount);
|
return (frameCount);
|
||||||
}
|
} // end FfmpegCamera::CaptureAndRecord
|
||||||
|
|
||||||
#endif // HAVE_LIBAVFORMAT
|
#endif // HAVE_LIBAVFORMAT
|
||||||
|
|
|
@ -2833,7 +2833,7 @@ int Monitor::Capture() {
|
||||||
|
|
||||||
unsigned int deinterlacing_value = deinterlacing & 0xff;
|
unsigned int deinterlacing_value = deinterlacing & 0xff;
|
||||||
|
|
||||||
if ( deinterlacing_value == 4) {
|
if ( deinterlacing_value == 4 ) {
|
||||||
if ( FirstCapture != 1 ) {
|
if ( FirstCapture != 1 ) {
|
||||||
/* Copy the next image into the shared memory */
|
/* Copy the next image into the shared memory */
|
||||||
capture_image->CopyBuffer(*(next_buffer.image));
|
capture_image->CopyBuffer(*(next_buffer.image));
|
||||||
|
|
|
@ -206,6 +206,7 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
|
||||||
|
|
||||||
audio_output_codec = NULL;
|
audio_output_codec = NULL;
|
||||||
audio_input_context = NULL;
|
audio_input_context = NULL;
|
||||||
|
resample_context = NULL;
|
||||||
|
|
||||||
if (audio_input_stream) {
|
if (audio_input_stream) {
|
||||||
audio_input_context = audio_input_stream->codec;
|
audio_input_context = audio_input_stream->codec;
|
||||||
|
@ -259,6 +260,18 @@ Debug(2, "Have audio_output_context");
|
||||||
audio_output_context->sample_fmt = AV_SAMPLE_FMT_FLTP;
|
audio_output_context->sample_fmt = AV_SAMPLE_FMT_FLTP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Debug(3, "Audio Time bases input stream (%d/%d) input codec: (%d/%d) output_stream (%d/%d) output codec (%d/%d)",
|
||||||
|
audio_input_stream->time_base.num,
|
||||||
|
audio_input_stream->time_base.den,
|
||||||
|
audio_input_context->time_base.num,
|
||||||
|
audio_input_context->time_base.den,
|
||||||
|
audio_output_stream->time_base.num,
|
||||||
|
audio_output_stream->time_base.den,
|
||||||
|
audio_output_context->time_base.num,
|
||||||
|
audio_output_context->time_base.den
|
||||||
|
);
|
||||||
|
audio_output_stream->time_base = (AVRational){ 1, audio_output_context->sample_rate };
|
||||||
|
|
||||||
Debug(3, "Audio Time bases input stream (%d/%d) input codec: (%d/%d) output_stream (%d/%d) output codec (%d/%d)",
|
Debug(3, "Audio Time bases input stream (%d/%d) input codec: (%d/%d) output_stream (%d/%d) output codec (%d/%d)",
|
||||||
audio_input_stream->time_base.num,
|
audio_input_stream->time_base.num,
|
||||||
audio_input_stream->time_base.den,
|
audio_input_stream->time_base.den,
|
||||||
|
@ -429,13 +442,13 @@ Debug(2, "Have audio_output_context");
|
||||||
//os->ctx_inited = 1;
|
//os->ctx_inited = 1;
|
||||||
//avio_flush(ctx->pb);
|
//avio_flush(ctx->pb);
|
||||||
//av_dict_free(&opts);
|
//av_dict_free(&opts);
|
||||||
|
zm_dump_stream_format( oc, 0, 0, 1 );
|
||||||
|
if ( audio_output_stream )
|
||||||
|
zm_dump_stream_format( oc, 1, 0, 1 );
|
||||||
|
|
||||||
/* Write the stream header, if any. */
|
/* Write the stream header, if any. */
|
||||||
ret = avformat_write_header(oc, NULL);
|
ret = avformat_write_header(oc, NULL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
zm_dump_stream_format( oc, 0, 0, 1 );
|
|
||||||
if ( audio_output_stream )
|
|
||||||
zm_dump_stream_format( oc, 1, 0, 1 );
|
|
||||||
Error("Error occurred when writing output file header to %s: %s\n",
|
Error("Error occurred when writing output file header to %s: %s\n",
|
||||||
filename,
|
filename,
|
||||||
av_make_error_string(ret).c_str());
|
av_make_error_string(ret).c_str());
|
||||||
|
@ -485,13 +498,16 @@ VideoStore::~VideoStore(){
|
||||||
|
|
||||||
/* free the stream */
|
/* free the stream */
|
||||||
avformat_free_context(oc);
|
avformat_free_context(oc);
|
||||||
|
|
||||||
|
if ( resample_context )
|
||||||
|
swr_free( &resample_context );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void VideoStore::dumpPacket( AVPacket *pkt ){
|
void VideoStore::dumpPacket( AVPacket *pkt ){
|
||||||
char b[10240];
|
char b[10240];
|
||||||
|
|
||||||
snprintf(b, sizeof(b), " pts: %" PRId64 ", dts: %" PRId64 ", data: %p, size: %d, sindex: %d, dflags: %04x, s-pos: %" PRId64 ", c-duration: %" PRId64 "\n"
|
snprintf(b, sizeof(b), " pts: %" PRId64 ", dts: %" PRId64 ", data: %p, size: %d, sindex: %d, dflags: %04x, s-pos: %" PRId64 ", c-duration: %d\n"
|
||||||
, pkt->pts
|
, pkt->pts
|
||||||
, pkt->dts
|
, pkt->dts
|
||||||
, pkt->data
|
, pkt->data
|
||||||
|
@ -506,8 +522,6 @@ void VideoStore::dumpPacket( AVPacket *pkt ){
|
||||||
|
|
||||||
int VideoStore::writeVideoFramePacket( AVPacket *ipkt ) {
|
int VideoStore::writeVideoFramePacket( AVPacket *ipkt ) {
|
||||||
|
|
||||||
AVPacket opkt;
|
|
||||||
AVPicture pict;
|
|
||||||
|
|
||||||
Debug(4, "writeVideoFrame init_packet");
|
Debug(4, "writeVideoFrame init_packet");
|
||||||
av_init_packet(&opkt);
|
av_init_packet(&opkt);
|
||||||
|
@ -585,6 +599,7 @@ if ( opkt.dts != AV_NOPTS_VALUE ) {
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
if (video_output_context->codec_type == AVMEDIA_TYPE_VIDEO && (output_format->flags & AVFMT_RAWPICTURE)) {
|
if (video_output_context->codec_type == AVMEDIA_TYPE_VIDEO && (output_format->flags & AVFMT_RAWPICTURE)) {
|
||||||
|
AVPicture pict;
|
||||||
Debug(3, "video and RAWPICTURE");
|
Debug(3, "video and RAWPICTURE");
|
||||||
/* store AVPicture in AVPacket, as expected by the output format */
|
/* store AVPicture in AVPacket, as expected by the output format */
|
||||||
avpicture_fill(&pict, opkt.data, video_output_context->pix_fmt, video_output_context->width, video_output_context->height, 0);
|
avpicture_fill(&pict, opkt.data, video_output_context->pix_fmt, video_output_context->width, video_output_context->height, 0);
|
||||||
|
@ -641,7 +656,6 @@ int VideoStore::writeAudioFramePacket( AVPacket *ipkt ) {
|
||||||
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
AVPacket opkt;
|
|
||||||
|
|
||||||
av_init_packet(&opkt);
|
av_init_packet(&opkt);
|
||||||
Debug(5, "after init packet" );
|
Debug(5, "after init packet" );
|
||||||
|
|
|
@ -24,6 +24,8 @@ private:
|
||||||
AVStream *video_input_stream;
|
AVStream *video_input_stream;
|
||||||
AVStream *audio_input_stream;
|
AVStream *audio_input_stream;
|
||||||
|
|
||||||
|
// Move this into the object so that we aren't constantly allocating/deallocating it on the stack
|
||||||
|
AVPacket opkt;
|
||||||
// we are transcoding
|
// we are transcoding
|
||||||
AVFrame *input_frame;
|
AVFrame *input_frame;
|
||||||
AVFrame *output_frame;
|
AVFrame *output_frame;
|
||||||
|
|
Loading…
Reference in New Issue