remove redundant code and memory leaks
This commit is contained in:
parent
ef5e296144
commit
6702b10dee
|
@ -184,47 +184,33 @@ int hacked_up_context2_for_older_ffmpeg(AVFormatContext **avctx, AVOutputFormat
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!oformat) {
|
|
||||||
if (format) {
|
|
||||||
oformat = av_guess_format(format, NULL, NULL);
|
|
||||||
if (!oformat) {
|
|
||||||
av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
|
|
||||||
ret = AVERROR(EINVAL);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
oformat = av_guess_format(NULL, filename, NULL);
|
|
||||||
if (!oformat) {
|
|
||||||
ret = AVERROR(EINVAL);
|
|
||||||
av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n", filename);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
avformat_free_context(s);
|
avformat_free_context(s);
|
||||||
return ret;
|
return ret;
|
||||||
} else {
|
}
|
||||||
|
|
||||||
s->oformat = oformat;
|
s->oformat = oformat;
|
||||||
|
#if 0
|
||||||
if (s->oformat->priv_data_size > 0) {
|
if (s->oformat->priv_data_size > 0) {
|
||||||
s->priv_data = av_mallocz(s->oformat->priv_data_size);
|
|
||||||
if (s->priv_data) {
|
|
||||||
if (s->oformat->priv_class) {
|
if (s->oformat->priv_class) {
|
||||||
|
// This looks wrong, we just allocated priv_data and now we are losing the pointer to it.FIXME
|
||||||
*(const AVClass**)s->priv_data = s->oformat->priv_class;
|
*(const AVClass**)s->priv_data = s->oformat->priv_class;
|
||||||
av_opt_set_defaults(s->priv_data);
|
av_opt_set_defaults(s->priv_data);
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
|
s->priv_data = av_mallocz(s->oformat->priv_data_size);
|
||||||
|
if ( ! s->priv_data) {
|
||||||
av_log(s, AV_LOG_ERROR, "Out of memory\n");
|
av_log(s, AV_LOG_ERROR, "Out of memory\n");
|
||||||
ret = AVERROR(ENOMEM);
|
ret = AVERROR(ENOMEM);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
s->priv_data = NULL;
|
s->priv_data = NULL;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (filename) strncpy(s->filename, filename, sizeof(s->filename));
|
if (filename) strncpy(s->filename, filename, sizeof(s->filename));
|
||||||
*avctx = s;
|
*avctx = s;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
static void zm_log_fps(double d, const char *postfix) {
|
static void zm_log_fps(double d, const char *postfix) {
|
||||||
uint64_t v = lrintf(d * 100);
|
uint64_t v = lrintf(d * 100);
|
||||||
|
|
|
@ -285,10 +285,8 @@ int FfmpegCamera::OpenFfmpeg() {
|
||||||
Info( "Stream open %s", mPath.c_str() );
|
Info( "Stream open %s", mPath.c_str() );
|
||||||
|
|
||||||
#if !LIBAVFORMAT_VERSION_CHECK(53, 6, 0, 6, 0)
|
#if !LIBAVFORMAT_VERSION_CHECK(53, 6, 0, 6, 0)
|
||||||
Debug ( 1, "Calling av_find_stream_info" );
|
|
||||||
if ( av_find_stream_info( mFormatContext ) < 0 )
|
if ( av_find_stream_info( mFormatContext ) < 0 )
|
||||||
#else
|
#else
|
||||||
Debug ( 1, "Calling avformat_find_stream_info" );
|
|
||||||
if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
|
if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
|
||||||
#endif
|
#endif
|
||||||
Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
|
Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
|
||||||
|
@ -559,12 +557,12 @@ int FfmpegCamera::CloseFfmpeg() {
|
||||||
|
|
||||||
if ( mVideoCodecContext ) {
|
if ( mVideoCodecContext ) {
|
||||||
avcodec_close(mVideoCodecContext);
|
avcodec_close(mVideoCodecContext);
|
||||||
//av_free(mVideoCodecContext);
|
avcodec_free_context(&mVideoCodecContext);
|
||||||
mVideoCodecContext = NULL; // Freed by av_close_input_file
|
mVideoCodecContext = NULL; // Freed by av_close_input_file
|
||||||
}
|
}
|
||||||
if ( mAudioCodecContext ) {
|
if ( mAudioCodecContext ) {
|
||||||
avcodec_close(mAudioCodecContext);
|
avcodec_close(mAudioCodecContext);
|
||||||
//av_free(mAudioCodecContext);
|
avcodec_free_context(&mAudioCodecContext);
|
||||||
mAudioCodecContext = NULL; // Freed by av_close_input_file
|
mAudioCodecContext = NULL; // Freed by av_close_input_file
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -554,11 +554,15 @@ bool Monitor::connect() {
|
||||||
// alarmed images that must be discarded when event is created
|
// alarmed images that must be discarded when event is created
|
||||||
|
|
||||||
// Couldn't we just make sure there is always enough frames in the ring buffer?
|
// Couldn't we just make sure there is always enough frames in the ring buffer?
|
||||||
|
if ( purpose == ANALYSIS ) {
|
||||||
|
if ( analysis_fps ) {
|
||||||
pre_event_buffer_count = pre_event_count + alarm_frame_count - 1;
|
pre_event_buffer_count = pre_event_count + alarm_frame_count - 1;
|
||||||
pre_event_buffer = new ZMPacket[pre_event_buffer_count];
|
pre_event_buffer = new ZMPacket[pre_event_buffer_count];
|
||||||
for ( int i = 0; i < pre_event_buffer_count; i++ ) {
|
for ( int i = 0; i < pre_event_buffer_count; i++ ) {
|
||||||
pre_event_buffer[i].image = new Image( width, height, camera->Colours(), camera->SubpixelOrder());
|
pre_event_buffer[i].image = new Image( width, height, camera->Colours(), camera->SubpixelOrder());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
Debug(3, "Success connecting");
|
Debug(3, "Success connecting");
|
||||||
return true;
|
return true;
|
||||||
} // Monitor::connect
|
} // Monitor::connect
|
||||||
|
@ -1614,7 +1618,7 @@ Debug(3,"Not ready?");
|
||||||
shared_data->last_read_time = now.tv_sec;
|
shared_data->last_read_time = now.tv_sec;
|
||||||
mutex.unlock();
|
mutex.unlock();
|
||||||
|
|
||||||
if ( analysis_fps ) {
|
if ( (purpose == ANALYSIS) && analysis_fps ) {
|
||||||
// If analysis fps is set, add analysed image to dedicated pre event buffer
|
// If analysis fps is set, add analysed image to dedicated pre event buffer
|
||||||
Debug(3,"analysis fps image_count(%d) pre_event_buffer_count(%d)", image_count, pre_event_buffer_count );
|
Debug(3,"analysis fps image_count(%d) pre_event_buffer_count(%d)", image_count, pre_event_buffer_count );
|
||||||
int pre_index = pre_event_buffer_count ? image_count%pre_event_buffer_count : 0;
|
int pre_index = pre_event_buffer_count ? image_count%pre_event_buffer_count : 0;
|
||||||
|
|
|
@ -95,6 +95,23 @@ VideoStore::VideoStore(
|
||||||
Debug(2,"Copied video context from input stream");
|
Debug(2,"Copied video context from input stream");
|
||||||
zm_dump_codec(video_in_ctx);
|
zm_dump_codec(video_in_ctx);
|
||||||
#endif
|
#endif
|
||||||
|
// Fix deprecated formats
|
||||||
|
switch ( video_in_ctx->pix_fmt ) {
|
||||||
|
case AV_PIX_FMT_YUVJ420P :
|
||||||
|
video_in_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||||
|
break;
|
||||||
|
case AV_PIX_FMT_YUVJ422P :
|
||||||
|
video_in_ctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||||
|
break;
|
||||||
|
case AV_PIX_FMT_YUVJ444P :
|
||||||
|
video_in_ctx->pix_fmt = AV_PIX_FMT_YUV444P;
|
||||||
|
break;
|
||||||
|
case AV_PIX_FMT_YUVJ440P :
|
||||||
|
video_in_ctx->pix_fmt = AV_PIX_FMT_YUV440P;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
Debug(2, "No input ctx");
|
Debug(2, "No input ctx");
|
||||||
video_in_ctx = avcodec_alloc_context3(NULL);
|
video_in_ctx = avcodec_alloc_context3(NULL);
|
||||||
|
@ -139,6 +156,24 @@ Debug(2,"Copied video context from input stream");
|
||||||
Warning("Unsupported Orientation(%d)", orientation);
|
Warning("Unsupported Orientation(%d)", orientation);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Fix deprecated formats
|
||||||
|
switch ( video_out_ctx->pix_fmt ) {
|
||||||
|
case AV_PIX_FMT_YUVJ420P :
|
||||||
|
video_out_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||||
|
break;
|
||||||
|
case AV_PIX_FMT_YUVJ422P :
|
||||||
|
video_out_ctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||||
|
break;
|
||||||
|
case AV_PIX_FMT_YUVJ444P :
|
||||||
|
video_out_ctx->pix_fmt = AV_PIX_FMT_YUV444P;
|
||||||
|
break;
|
||||||
|
case AV_PIX_FMT_YUVJ440P :
|
||||||
|
video_out_ctx->pix_fmt = AV_PIX_FMT_YUV440P;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/** Create a new frame to store the */
|
/** Create a new frame to store the */
|
||||||
|
@ -462,8 +497,8 @@ bool VideoStore::open() {
|
||||||
} // end bool VideoStore::open()
|
} // end bool VideoStore::open()
|
||||||
|
|
||||||
void VideoStore::write_audio_packet( AVPacket &pkt ) {
|
void VideoStore::write_audio_packet( AVPacket &pkt ) {
|
||||||
Debug(2, "writing flushed packet pts(%d) dts(%d) duration(%d)", pkt.pts,
|
//Debug(2, "writing audio packet pts(%d) dts(%d) duration(%d)", pkt.pts,
|
||||||
pkt.dts, pkt.duration);
|
//pkt.dts, pkt.duration);
|
||||||
pkt.pts = audio_next_pts;
|
pkt.pts = audio_next_pts;
|
||||||
pkt.dts = audio_next_dts;
|
pkt.dts = audio_next_dts;
|
||||||
|
|
||||||
|
@ -474,7 +509,7 @@ void VideoStore::write_audio_packet( AVPacket &pkt ) {
|
||||||
audio_next_pts += pkt.duration;
|
audio_next_pts += pkt.duration;
|
||||||
audio_next_dts += pkt.duration;
|
audio_next_dts += pkt.duration;
|
||||||
|
|
||||||
Debug(2, "writing flushed packet pts(%d) dts(%d) duration(%d)", pkt.pts,
|
Debug(2, "writing audio packet pts(%d) dts(%d) duration(%d)", pkt.pts,
|
||||||
pkt.dts, pkt.duration);
|
pkt.dts, pkt.duration);
|
||||||
pkt.stream_index = audio_out_stream->index;
|
pkt.stream_index = audio_out_stream->index;
|
||||||
av_interleaved_write_frame(oc, &pkt);
|
av_interleaved_write_frame(oc, &pkt);
|
||||||
|
@ -591,18 +626,18 @@ Debug(3, "dts:%d, pts:%d, keyframe:%d", pkt.dts, pkt.pts, keyframe );
|
||||||
// allocation/de-allocation constantly, or whether we can just re-use it.
|
// allocation/de-allocation constantly, or whether we can just re-use it.
|
||||||
// Just do a file open/close/writeheader/etc.
|
// Just do a file open/close/writeheader/etc.
|
||||||
// What if we were only doing audio recording?
|
// What if we were only doing audio recording?
|
||||||
if ( video_out_stream ) {
|
|
||||||
avcodec_close(video_out_ctx);
|
|
||||||
video_out_ctx = NULL;
|
|
||||||
Debug(4, "Success freeing video_out_ctx");
|
|
||||||
}
|
|
||||||
// Used by both audio and video conversions
|
// Used by both audio and video conversions
|
||||||
if ( in_frame ) {
|
if ( in_frame ) {
|
||||||
av_frame_free(&in_frame);
|
av_frame_free(&in_frame);
|
||||||
in_frame = NULL;
|
in_frame = NULL;
|
||||||
}
|
}
|
||||||
|
if ( audio_in_ctx ) {
|
||||||
|
avcodec_free_context(&audio_in_ctx);
|
||||||
|
audio_in_ctx = NULL;
|
||||||
|
}
|
||||||
if ( audio_out_stream ) {
|
if ( audio_out_stream ) {
|
||||||
avcodec_close(audio_out_ctx);
|
avcodec_close(audio_out_ctx);
|
||||||
|
avcodec_free_context(&audio_out_ctx);
|
||||||
audio_out_ctx = NULL;
|
audio_out_ctx = NULL;
|
||||||
#ifdef HAVE_LIBAVRESAMPLE
|
#ifdef HAVE_LIBAVRESAMPLE
|
||||||
if ( resample_ctx ) {
|
if ( resample_ctx ) {
|
||||||
|
@ -625,6 +660,7 @@ Debug(3, "dts:%d, pts:%d, keyframe:%d", pkt.dts, pkt.pts, keyframe );
|
||||||
video_in_ctx = NULL;
|
video_in_ctx = NULL;
|
||||||
}
|
}
|
||||||
if ( video_out_ctx ) {
|
if ( video_out_ctx ) {
|
||||||
|
avcodec_close(video_out_ctx);
|
||||||
avcodec_free_context(&video_out_ctx);
|
avcodec_free_context(&video_out_ctx);
|
||||||
video_out_ctx = NULL;
|
video_out_ctx = NULL;
|
||||||
}
|
}
|
||||||
|
@ -995,6 +1031,7 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
|
||||||
opkt.dts = opkt.pts = ( zm_packet->timestamp.tv_sec*1000000 + zm_packet->timestamp.tv_usec ) - video_last_pts;
|
opkt.dts = opkt.pts = ( zm_packet->timestamp.tv_sec*1000000 + zm_packet->timestamp.tv_usec ) - video_last_pts;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
opkt.duration = 0;
|
||||||
|
|
||||||
int keyframe = opkt.flags & AV_PKT_FLAG_KEY;
|
int keyframe = opkt.flags & AV_PKT_FLAG_KEY;
|
||||||
Debug(3, "dts:%d, pts:%d, keyframe:%d", opkt.dts, opkt.pts, keyframe );
|
Debug(3, "dts:%d, pts:%d, keyframe:%d", opkt.dts, opkt.pts, keyframe );
|
||||||
|
|
Loading…
Reference in New Issue