more debug

This commit is contained in:
Isaac Connor 2017-12-08 13:46:02 -05:00
parent 533b370a81
commit aff08358f4
4 changed files with 68 additions and 47 deletions

View File

@ -230,12 +230,14 @@ static void zm_log_fps(double d, const char *postfix) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
void zm_dump_codecpar ( const AVCodecParameters *par ) {
Debug(1, "Dumping codecpar codec_type(%d) codec_id(%d) codec_tag(%d) width(%d) height(%d)",
Debug(1, "Dumping codecpar codec_type(%d) codec_id(%d) codec_tag(%d) width(%d) height(%d) bit_rate(%d) foramt(%d)",
par->codec_type,
par->codec_id,
par->codec_tag,
par->width,
par->height
par->height,
par->bit_rate,
par->format
);
}
#endif

View File

@ -1245,11 +1245,12 @@ bool Monitor::Analyse() {
return false;
}
// if have event, sent frames until we find a video packet, at which point do analysis. Adaptive skip should only affect which frames we do analysis on.
// if have event, send frames until we find a video packet, at which point do analysis. Adaptive skip should only affect which frames we do analysis on.
int packets_processed = 0;
ZMPacket *snap;
// Is it possible for snap->score to be ! -1?
while ( ( snap = packetqueue->get_analysis_packet() ) && ( snap->score == -1 ) ) {
snap->lock();
unsigned int index = snap->image_index;
@ -1522,14 +1523,15 @@ Error("Already ahve evnet!");
// We can't just loop here forever, because we may be capturing just as fast, and never leave the loop.
// Only loop until we hit the analysis index
while ( ( queued_packet = packetqueue->popPacket() ) ) {
Debug(2,"adding packet (%d) qp lwindex(%d), written(%d)", queued_packet->image_index, last_write, written );
Debug(2,"adding packet (%d) qp last_write_index(%d), written(%d)", queued_packet->image_index, last_write, written );
if ( snap == queued_packet ) {
event->AddPacket( queued_packet );
packetqueue->increment_analysis_it();
// Pop may have already incrememented it
//packetqueue->increment_analysis_it();
break;
} else {
queued_packet->lock();
Debug(2,"adding packet (%d) qp lwindex(%d), written(%d)", queued_packet->image_index, last_write, written );
Debug(2,"adding packet (%d) qp last_write_index(%d), written(%d)", queued_packet->image_index, last_write, written );
event->AddPacket( queued_packet );
queued_packet->unlock();
}

View File

@ -38,12 +38,12 @@ bool zm_packetqueue::queuePacket( ZMPacket* zm_packet ) {
if ( zm_packet->image_index != -1 ) {
// If we can never queue the same packet, then they can never go past
if ( zm_packet->image_index == first_video_packet_index ) {
Debug(2, "queuing packet that is already on the queue");
Debug(2, "queuing packet that is already on the queue(%d)", zm_packet->image_index );
ZMPacket *p;
while ( (p = pktQueue.front()) && ( p->image_index != zm_packet->image_index ) ) {
if ( *analysis_it == p ) {
while ( pktQueue.size() && (p = pktQueue.front()) && ( p->image_index != zm_packet->image_index ) ) {
if ( ( analysis_it != pktQueue.end() ) && ( *analysis_it == p ) ) {
Debug(2, "Increasing analysis_it");
analysis_it ++;
++analysis_it;
}
pktQueue.pop_front();
@ -51,11 +51,14 @@ bool zm_packetqueue::queuePacket( ZMPacket* zm_packet ) {
Debug(2, "Descreasing video_packet_count (%d)", video_packet_count);
video_packet_count -= 1;
} else {
Debug(2, "Deleteing audio frame(%d)", p->image_index);
delete p;
p = NULL;
}
Debug(2,"pktQueue.size(%d)", pktQueue.size() );
} // end while there are packets at the head of the queue that are not this one
if ( p->image_index == zm_packet->image_index ) {
if ( p && ( p->image_index == zm_packet->image_index ) ) {
// it should
video_packet_count -= 1;
pktQueue.pop_front();
@ -100,6 +103,7 @@ ZMPacket* zm_packetqueue::popPacket( ) {
if ( packet->codec_type == AVMEDIA_TYPE_VIDEO ) {
video_packet_count -= 1;
if ( video_packet_count ) {
// There is another video packet, so it must be the next one
first_video_packet_index += 1;
first_video_packet_index %= max_video_packet_count;
} else {
@ -165,20 +169,31 @@ unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream
packet = pktQueue.front();
if ( *analysis_it == packet )
analysis_it ++;
if ( packet->codec_type == AVMEDIA_TYPE_VIDEO )
if ( packet->codec_type == AVMEDIA_TYPE_VIDEO ) {
video_packet_count -= 1;
if ( video_packet_count ) {
// There is another video packet, so it must be the next one
first_video_packet_index += 1;
first_video_packet_index %= max_video_packet_count;
} else {
first_video_packet_index = -1;
}
}
pktQueue.pop_front();
if ( packet->image_index == -1 )
delete packet;
delete_count += 1;
} // while our iterator is not the first packet
#if 0
if ( pktQueue.size() ) {
packet = pktQueue.front();
first_video_packet_index = packet->image_index;
} else {
first_video_packet_index = -1;
}
#endif
Debug(3, "Deleted (%d) packets", delete_count );
return delete_count;
@ -215,8 +230,8 @@ ZMPacket *zm_packetqueue::get_analysis_packet() {
if ( analysis_it == pktQueue.end() )
return NULL;
Debug(2, "Distance from head: (%d)", std::distance( pktQueue.begin(), analysis_it ) );
Debug(2, "Distance from end: (%d)", std::distance( analysis_it, pktQueue.end() ) );
//Debug(2, "Distance from head: (%d)", std::distance( pktQueue.begin(), analysis_it ) );
//Debug(2, "Distance from end: (%d)", std::distance( analysis_it, pktQueue.end() ) );
return *analysis_it;
} // end ZMPacket *zm_packetqueue::get_analysis_packet()

View File

@ -177,7 +177,7 @@ Debug(2,"Using mjpeg");
} else {
pf =
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
video_in_stream->codecpar->format;
(AVPixelFormat)video_in_stream->codecpar->format;
#else
video_in_stream->codec->pix_fmt;
#endif
@ -289,8 +289,8 @@ Debug(2,"Using mjpeg");
/* video time_base can be set to whatever is handy and supported by encoder */
video_out_ctx->time_base = (AVRational){1, 1000}; // microseconds as base frame rate
video_out_ctx->gop_size = 12;
video_out_ctx->qmin = 10;
video_out_ctx->qmax = 51;
video_out_ctx->qmin = 2;
video_out_ctx->qmax = 31;
video_out_ctx->qcompress = 0.6;
video_out_ctx->bit_rate = 400000;
video_out_ctx->max_b_frames = 1;
@ -339,7 +339,7 @@ Debug(2,"Using mjpeg");
return false;
}
}
} // end if can't open codec
Debug(2,"Sucess opening codec");
AVDictionaryEntry *e = NULL;
while ( (e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL ) {
@ -714,6 +714,7 @@ VideoStore::~VideoStore() {
}
bool VideoStore::setup_resampler() {
//I think this is unneccessary, we should be able to just pass in the decoder from the input.
#ifdef HAVE_LIBAVRESAMPLE
// Newer ffmpeg wants to keep everything separate... so have to lookup our own
// decoder, can't reuse the one from the camera.
@ -740,7 +741,6 @@ bool VideoStore::setup_resampler() {
audio_out_stream = avformat_new_stream(oc, audio_out_codec);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// audio_out_ctx = audio_out_stream->codec;
audio_out_ctx = avcodec_alloc_context3(audio_out_codec);
if ( !audio_out_ctx ) {
Error("could not allocate codec ctx for AAC\n");
@ -754,10 +754,9 @@ bool VideoStore::setup_resampler() {
/* put sample parameters */
audio_out_ctx->bit_rate = audio_in_ctx->bit_rate;
audio_out_ctx->sample_rate = audio_in_ctx->sample_rate;
audio_out_ctx->sample_fmt = audio_in_ctx->sample_fmt;
audio_out_ctx->channels = audio_in_ctx->channels;
audio_out_ctx->channel_layout = audio_in_ctx->channel_layout;
audio_out_ctx->sample_fmt = audio_in_ctx->sample_fmt;
//audio_out_ctx->refcounted_frames = 1;
if ( audio_out_codec->supported_samplerates ) {
int found = 0;
@ -785,21 +784,10 @@ bool VideoStore::setup_resampler() {
audio_out_ctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
}
audio_out_ctx->time_base = (AVRational){1, audio_out_ctx->sample_rate};
// Example code doesn't set the codec tb. I think it just uses whatever defaults
//audio_out_ctx->time_base = (AVRational){1, audio_out_ctx->sample_rate};
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
if ( (ret = avcodec_parameters_from_context(audio_out_stream->codecpar,
audio_out_ctx)) < 0 ) {
Error("Could not initialize stream parameteres");
return false;
}
audio_out_stream->codecpar->frame_size = audio_out_ctx->frame_size;
#else
avcodec_copy_context( audio_out_stream->codec, audio_out_ctx );
#endif
audio_out_stream->time_base = (AVRational){1, audio_out_ctx->sample_rate};
AVDictionary *opts = NULL;
av_dict_set(&opts, "strict", "experimental", 0); // Needed to allow AAC
ret = avcodec_open2(audio_out_ctx, audio_out_codec, &opts);
@ -812,8 +800,22 @@ bool VideoStore::setup_resampler() {
return false;
}
audio_out_stream->time_base = (AVRational){1, audio_out_ctx->sample_rate};
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
if ( (ret = avcodec_parameters_from_context(
audio_out_stream->codecpar,
audio_out_ctx)) < 0 ) {
Error("Could not initialize stream parameteres");
return false;
}
//audio_out_stream->codecpar->frame_size = audio_out_ctx->frame_size;
//audio_out_stream->codecpar->bit_rate = audio_out_ctx->bit_rate;
#else
avcodec_copy_context( audio_out_stream->codec, audio_out_ctx );
#endif
Debug(1,
"Audio out bit_rate (%d) sample_rate(%d) channels(%d) fmt(%d) "
"Audio out context bit_rate (%d) sample_rate(%d) channels(%d) fmt(%d) "
"layout(%d) frame_size(%d)",
audio_out_ctx->bit_rate, audio_out_ctx->sample_rate,
audio_out_ctx->channels, audio_out_ctx->sample_fmt,
@ -821,7 +823,7 @@ bool VideoStore::setup_resampler() {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
Debug(1,
"Audio out bit_rate (%d) sample_rate(%d) channels(%d) fmt(%d) "
"Audio out stream bit_rate (%d) sample_rate(%d) channels(%d) fmt(%d) "
"layout(%d) frame_size(%d)",
audio_out_stream->codecpar->bit_rate, audio_out_stream->codecpar->sample_rate,
audio_out_stream->codecpar->channels, audio_out_stream->codecpar->format,
@ -849,6 +851,7 @@ bool VideoStore::setup_resampler() {
av_frame_free(&in_frame);
return false;
}
out_frame->sample_rate = audio_out_ctx->sample_rate;
// Setup the audio resampler
resample_ctx = avresample_alloc_context();
@ -857,13 +860,13 @@ bool VideoStore::setup_resampler() {
return false;
}
uint64_t mono_layout = av_get_channel_layout("mono");
// Some formats (i.e. WAV) do not produce the proper channel layout
if ( audio_in_ctx->channel_layout == 0 ) {
uint64_t layout = av_get_channel_layout("mono");
av_opt_set_int(resample_ctx, "in_channel_layout",
av_get_channel_layout("mono"), 0);
Debug(1, "Bad channel layout. Need to set it to mono (%d).", layout);
av_opt_set_int(resample_ctx, "in_channel_layout", mono_layout, 0);
Debug(1, "Bad channel layout. Need to set it to mono (%d).", mono_layout);
} else {
Debug(1, "channel layout. set it to mono (%d).", audio_in_ctx->channel_layout);
av_opt_set_int(resample_ctx, "in_channel_layout",
audio_in_ctx->channel_layout, 0);
}
@ -873,8 +876,7 @@ bool VideoStore::setup_resampler() {
av_opt_set_int(resample_ctx, "in_channels", audio_in_ctx->channels, 0);
// av_opt_set_int( resample_ctx, "out_channel_layout",
// audio_out_ctx->channel_layout, 0);
av_opt_set_int(resample_ctx, "out_channel_layout",
av_get_channel_layout("mono"), 0);
av_opt_set_int(resample_ctx, "out_channel_layout", mono_layout, 0);
av_opt_set_int(resample_ctx, "out_sample_fmt",
audio_out_ctx->sample_fmt, 0);
av_opt_set_int(resample_ctx, "out_sample_rate",
@ -926,7 +928,7 @@ void VideoStore::dumpPacket(AVPacket *pkt) {
snprintf(b, sizeof(b),
" pts: %" PRId64 ", dts: %" PRId64
", data: %p, size: %d, sindex: %d, dflags: %04x, s-pos: %" PRId64
", data: %p, size: %d, stream_index: %d, dflags: %04x, pos: %" PRId64
", duration: %"
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
PRId64
@ -1041,6 +1043,7 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
zm_packet->out_frame->coded_picture_number = frame_count;
zm_packet->out_frame->display_picture_number = frame_count;
zm_packet->out_frame->sample_aspect_ratio = (AVRational){ 0, 1 };
zm_packet->out_frame->pkt_duration = 0;
if ( ! video_last_pts ) {
int64_t temp = zm_packet->timestamp->tv_sec*1000;
@ -1051,11 +1054,10 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
Debug(2, "No video_lsat_pts, set to (%" PRId64 ") secs(%d) usecs(%d)",
video_last_pts, zm_packet->timestamp->tv_sec, zm_packet->timestamp->tv_usec );
zm_packet->out_frame->pts = 0;
zm_packet->out_frame->pkt_duration = 0;
} else {
//uint64_t seconds = zm_packet->timestamp->tv_sec*1000000;
zm_packet->out_frame->pts = ( zm_packet->timestamp->tv_sec*1000 + zm_packet->timestamp->tv_usec/1000 ) - video_last_pts;
zm_packet->out_frame->duration = zm_packet->out_frame->pts - video_last_pts;
zm_packet->out_frame->pkt_duration = zm_packet->out_frame->pts - video_last_pts;
Debug(2, " Setting pts for frame(%d), set to (%" PRId64 ") from (%" PRId64 " - secs(%d) usecs(%d)",
frame_count, zm_packet->out_frame->pts, video_last_pts, zm_packet->timestamp->tv_sec, zm_packet->timestamp->tv_usec );
}
@ -1124,8 +1126,8 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
opkt.dts = opkt.pts = ( zm_packet->timestamp->tv_sec*1000 + zm_packet->timestamp->tv_usec/1000 ) - video_last_pts;
}
}
opkt.duration = 0;
opkt.duration = 0;
dumpPacket(&opkt);
write_video_packet( opkt );
@ -1245,7 +1247,7 @@ int VideoStore::writeAudioFramePacket(ZMPacket *zm_packet) {
// Resample the in into the audioSampleBuffer until we proceed the whole
// decoded data
if ((ret = avresample_convert(resample_ctx, NULL, 0, 0, in_frame->data,
0, frame_size)) < 0) {
0, in_frame->nb_samples)) < 0) {
Error("Could not resample frame (error '%s')\n",
av_make_error_string(ret).c_str());
av_frame_unref(in_frame);