fix tabs/shiftwidth
This commit is contained in:
parent
5eebc50bfb
commit
b41a3b1b25
|
@ -29,7 +29,7 @@
|
||||||
#include "zm_videostore.h"
|
#include "zm_videostore.h"
|
||||||
|
|
||||||
extern "C"{
|
extern "C"{
|
||||||
#include "libavutil/time.h"
|
#include "libavutil/time.h"
|
||||||
}
|
}
|
||||||
|
|
||||||
VideoStore::VideoStore(const char *filename_in, const char *format_in,
|
VideoStore::VideoStore(const char *filename_in, const char *format_in,
|
||||||
|
@ -37,313 +37,313 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
|
||||||
AVStream *inpaud_st,
|
AVStream *inpaud_st,
|
||||||
int64_t nStartTime) {
|
int64_t nStartTime) {
|
||||||
|
|
||||||
AVDictionary *pmetadata = NULL;
|
AVDictionary *pmetadata = NULL;
|
||||||
int dsr;
|
int dsr;
|
||||||
|
|
||||||
//store inputs in variables local to class
|
//store inputs in variables local to class
|
||||||
filename = filename_in;
|
filename = filename_in;
|
||||||
format = format_in;
|
format = format_in;
|
||||||
|
|
||||||
keyframeMessage = false;
|
keyframeMessage = false;
|
||||||
keyframeSkipNumber = 0;
|
keyframeSkipNumber = 0;
|
||||||
|
|
||||||
Info("Opening video storage stream %s format: %d\n", filename, format);
|
Info("Opening video storage stream %s format: %d\n", filename, format);
|
||||||
|
|
||||||
//Init everything we need
|
//Init everything we need
|
||||||
int ret;
|
int ret;
|
||||||
av_register_all();
|
av_register_all();
|
||||||
|
|
||||||
ret = avformat_alloc_output_context2(&oc, NULL, NULL, filename);
|
ret = avformat_alloc_output_context2(&oc, NULL, NULL, filename);
|
||||||
if ( ret < 0 ) {
|
if ( ret < 0 ) {
|
||||||
Warning("Could not create video storage stream %s as no output context"
|
Warning("Could not create video storage stream %s as no output context"
|
||||||
" could be assigned based on filename: %s",
|
" could be assigned based on filename: %s",
|
||||||
filename,
|
filename,
|
||||||
av_make_error_string(ret).c_str()
|
av_make_error_string(ret).c_str()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
//Couldn't deduce format from filename, trying from format name
|
//Couldn't deduce format from filename, trying from format name
|
||||||
|
if (!oc) {
|
||||||
|
avformat_alloc_output_context2(&oc, NULL, format, filename);
|
||||||
if (!oc) {
|
if (!oc) {
|
||||||
avformat_alloc_output_context2(&oc, NULL, format, filename);
|
Fatal("Could not create video storage stream %s as no output context"
|
||||||
if (!oc) {
|
" could not be assigned based on filename or format %s",
|
||||||
Fatal("Could not create video storage stream %s as no output context"
|
filename, format);
|
||||||
" could not be assigned based on filename or format %s",
|
|
||||||
filename, format);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
dsr = av_dict_set(&pmetadata, "title", "Zoneminder Security Recording", 0);
|
dsr = av_dict_set(&pmetadata, "title", "Zoneminder Security Recording", 0);
|
||||||
if (dsr < 0) Warning("%s:%d: title set failed", __FILE__, __LINE__ );
|
if (dsr < 0) Warning("%s:%d: title set failed", __FILE__, __LINE__ );
|
||||||
|
|
||||||
oc->metadata = pmetadata;
|
oc->metadata = pmetadata;
|
||||||
|
|
||||||
fmt = oc->oformat;
|
fmt = oc->oformat;
|
||||||
|
|
||||||
video_st = avformat_new_stream(oc, (AVCodec *)input_st->codec->codec);
|
video_st = avformat_new_stream(oc, (AVCodec *)input_st->codec->codec);
|
||||||
if (!video_st) {
|
if (!video_st) {
|
||||||
Fatal("Unable to create video out stream\n");
|
Fatal("Unable to create video out stream\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = avcodec_copy_context(video_st->codec, input_st->codec);
|
ret = avcodec_copy_context(video_st->codec, input_st->codec);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
Fatal("Unable to copy input video context to output video context "
|
Fatal("Unable to copy input video context to output video context %s\n",
|
||||||
"%s\n", av_make_error_string(ret).c_str());
|
av_make_error_string(ret).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
video_st->codec->codec_tag = 0;
|
video_st->codec->codec_tag = 0;
|
||||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
|
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
|
||||||
video_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
video_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (inpaud_st) {
|
if (inpaud_st) {
|
||||||
|
|
||||||
audio_st = avformat_new_stream(oc, (AVCodec *)inpaud_st->codec->codec);
|
audio_st = avformat_new_stream(oc, (AVCodec *)inpaud_st->codec->codec);
|
||||||
if (!audio_st) {
|
if (!audio_st) {
|
||||||
Error("Unable to create audio out stream\n");
|
Error("Unable to create audio out stream\n");
|
||||||
audio_st = NULL;
|
audio_st = NULL;
|
||||||
} else {
|
|
||||||
ret = avcodec_copy_context(audio_st->codec, inpaud_st->codec);
|
|
||||||
if (ret < 0) {
|
|
||||||
Fatal("Unable to copy audio context %s\n", av_make_error_string(ret).c_str());
|
|
||||||
}
|
|
||||||
audio_st->codec->codec_tag = 0;
|
|
||||||
if ( audio_st->codec->channels > 1 ) {
|
|
||||||
Warning("Audio isn't mono, changing it.");
|
|
||||||
audio_st->codec->channels = 1;
|
|
||||||
}
|
|
||||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
|
|
||||||
audio_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
Debug(3, "No Audio output stream");
|
ret = avcodec_copy_context(audio_st->codec, inpaud_st->codec);
|
||||||
audio_st = NULL;
|
if (ret < 0) {
|
||||||
|
Fatal("Unable to copy audio context %s\n", av_make_error_string(ret).c_str());
|
||||||
|
}
|
||||||
|
audio_st->codec->codec_tag = 0;
|
||||||
|
if ( audio_st->codec->channels > 1 ) {
|
||||||
|
Warning("Audio isn't mono, changing it.");
|
||||||
|
audio_st->codec->channels = 1;
|
||||||
|
}
|
||||||
|
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
|
||||||
|
audio_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
Debug(3, "No Audio output stream");
|
||||||
|
audio_st = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/* open the output file, if needed */
|
/* open the output file, if needed */
|
||||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||||
ret = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,NULL,NULL);
|
ret = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,NULL,NULL);
|
||||||
if (ret < 0) {
|
|
||||||
Fatal("Could not open output file '%s': %s\n", filename,
|
|
||||||
av_make_error_string(ret).c_str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov", 0);
|
|
||||||
//if ((ret = avformat_write_header(ctx, &opts)) < 0) {
|
|
||||||
//}
|
|
||||||
//os->ctx_inited = 1;
|
|
||||||
//avio_flush(ctx->pb);
|
|
||||||
//av_dict_free(&opts);
|
|
||||||
|
|
||||||
/* Write the stream header, if any. */
|
|
||||||
ret = avformat_write_header(oc, NULL);
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
zm_dump_stream_format( oc, 0, 0, 1 );
|
Fatal("Could not open output file '%s': %s\n", filename,
|
||||||
Fatal("Error occurred when writing output file header to %s: %s\n",
|
av_make_error_string(ret).c_str());
|
||||||
filename,
|
|
||||||
av_make_error_string(ret).c_str());
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
prevDts = 0;
|
//av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov", 0);
|
||||||
startPts = 0;
|
//if ((ret = avformat_write_header(ctx, &opts)) < 0) {
|
||||||
startDts = 0;
|
//}
|
||||||
filter_in_rescale_delta_last = AV_NOPTS_VALUE;
|
//os->ctx_inited = 1;
|
||||||
|
//avio_flush(ctx->pb);
|
||||||
|
//av_dict_free(&opts);
|
||||||
|
|
||||||
startTime=av_gettime()-nStartTime;//oc->start_time;
|
/* Write the stream header, if any. */
|
||||||
Info("VideoStore startTime=%d\n",startTime);
|
ret = avformat_write_header(oc, NULL);
|
||||||
|
if (ret < 0) {
|
||||||
|
zm_dump_stream_format( oc, 0, 0, 1 );
|
||||||
|
Fatal("Error occurred when writing output file header to %s: %s\n",
|
||||||
|
filename,
|
||||||
|
av_make_error_string(ret).c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
prevDts = 0;
|
||||||
|
startPts = 0;
|
||||||
|
startDts = 0;
|
||||||
|
filter_in_rescale_delta_last = AV_NOPTS_VALUE;
|
||||||
|
|
||||||
|
startTime=av_gettime()-nStartTime;//oc->start_time;
|
||||||
|
Info("VideoStore startTime=%d\n",startTime);
|
||||||
} // VideoStore::VideoStore
|
} // VideoStore::VideoStore
|
||||||
|
|
||||||
|
|
||||||
VideoStore::~VideoStore(){
|
VideoStore::~VideoStore(){
|
||||||
/* Write the trailer before close */
|
/* Write the trailer before close */
|
||||||
if ( int rc = av_write_trailer(oc) ) {
|
if ( int rc = av_write_trailer(oc) ) {
|
||||||
Error("Error writing trailer %s", av_err2str( rc ) );
|
Error("Error writing trailer %s", av_err2str( rc ) );
|
||||||
} else {
|
} else {
|
||||||
Debug(3, "Sucess Writing trailer");
|
Debug(3, "Sucess Writing trailer");
|
||||||
}
|
}
|
||||||
|
|
||||||
// I wonder if we should be closing the file first.
|
// I wonder if we should be closing the file first.
|
||||||
// I also wonder if we really need to be doing all the context allocation/de-allocation constantly, or whether we can just re-use it. Just do a file open/close/writeheader/etc.
|
// I also wonder if we really need to be doing all the context allocation/de-allocation constantly, or whether we can just re-use it. Just do a file open/close/writeheader/etc.
|
||||||
// What if we were only doing audio recording?
|
// What if we were only doing audio recording?
|
||||||
if ( video_st ) {
|
if ( video_st ) {
|
||||||
avcodec_close(video_st->codec);
|
avcodec_close(video_st->codec);
|
||||||
}
|
}
|
||||||
if (audio_st) {
|
if (audio_st) {
|
||||||
avcodec_close(audio_st->codec);
|
avcodec_close(audio_st->codec);
|
||||||
}
|
}
|
||||||
|
|
||||||
// WHen will be not using a file ?
|
// WHen will be not using a file ?
|
||||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||||
/* Close the output file. */
|
/* Close the output file. */
|
||||||
if ( int rc = avio_close(oc->pb) ) {
|
if ( int rc = avio_close(oc->pb) ) {
|
||||||
Error("Error closing avio %s", av_err2str( rc ) );
|
Error("Error closing avio %s", av_err2str( rc ) );
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Debug(3, "Not closing avio because we are not writing to a file.");
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
Debug(3, "Not closing avio because we are not writing to a file.");
|
||||||
|
}
|
||||||
|
|
||||||
/* free the stream */
|
/* free the stream */
|
||||||
avformat_free_context(oc);
|
avformat_free_context(oc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void VideoStore::dumpPacket( AVPacket *pkt ){
|
void VideoStore::dumpPacket( AVPacket *pkt ){
|
||||||
char b[10240];
|
char b[10240];
|
||||||
|
|
||||||
snprintf(b, sizeof(b), " pts: %" PRId64 ", dts: %" PRId64 ", data: %p, size: %d, sindex: %d, dflags: %04x, s-pos: %" PRId64 ", c-duration: %" PRId64 "\n"
|
snprintf(b, sizeof(b), " pts: %" PRId64 ", dts: %" PRId64 ", data: %p, size: %d, sindex: %d, dflags: %04x, s-pos: %" PRId64 ", c-duration: %" PRId64 "\n"
|
||||||
, pkt->pts
|
, pkt->pts
|
||||||
, pkt->dts
|
, pkt->dts
|
||||||
, pkt->data
|
, pkt->data
|
||||||
, pkt->size
|
, pkt->size
|
||||||
, pkt->stream_index
|
, pkt->stream_index
|
||||||
, pkt->flags
|
, pkt->flags
|
||||||
, pkt->pos
|
, pkt->pos
|
||||||
, pkt->convergence_duration
|
, pkt->convergence_duration
|
||||||
);
|
);
|
||||||
Info("%s:%d:DEBUG: %s", __FILE__, __LINE__, b);
|
Info("%s:%d:DEBUG: %s", __FILE__, __LINE__, b);
|
||||||
}
|
}
|
||||||
|
|
||||||
int VideoStore::writeVideoFramePacket(AVPacket *ipkt, AVStream *input_st){//, AVPacket *lastKeyframePkt){
|
int VideoStore::writeVideoFramePacket(AVPacket *ipkt, AVStream *input_st){//, AVPacket *lastKeyframePkt){
|
||||||
|
|
||||||
int64_t ost_tb_start_time = av_rescale_q(startTime, AV_TIME_BASE_Q, video_st->time_base);
|
int64_t ost_tb_start_time = av_rescale_q(startTime, AV_TIME_BASE_Q, video_st->time_base);
|
||||||
|
|
||||||
AVPacket opkt, safepkt;
|
AVPacket opkt, safepkt;
|
||||||
AVPicture pict;
|
AVPicture pict;
|
||||||
|
|
||||||
av_init_packet(&opkt);
|
av_init_packet(&opkt);
|
||||||
|
|
||||||
//Scale the PTS of the outgoing packet to be the correct time base
|
//Scale the PTS of the outgoing packet to be the correct time base
|
||||||
if (ipkt->pts != AV_NOPTS_VALUE) {
|
if (ipkt->pts != AV_NOPTS_VALUE) {
|
||||||
opkt.pts = av_rescale_q(ipkt->pts-startPts, input_st->time_base, video_st->time_base) - ost_tb_start_time;
|
opkt.pts = av_rescale_q(ipkt->pts-startPts, input_st->time_base, video_st->time_base) - ost_tb_start_time;
|
||||||
} else {
|
} else {
|
||||||
opkt.pts = AV_NOPTS_VALUE;
|
opkt.pts = AV_NOPTS_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
//Scale the DTS of the outgoing packet to be the correct time base
|
//Scale the DTS of the outgoing packet to be the correct time base
|
||||||
if(ipkt->dts == AV_NOPTS_VALUE) {
|
if(ipkt->dts == AV_NOPTS_VALUE) {
|
||||||
opkt.dts = av_rescale_q(input_st->cur_dts-startDts, AV_TIME_BASE_Q, video_st->time_base);
|
opkt.dts = av_rescale_q(input_st->cur_dts-startDts, AV_TIME_BASE_Q, video_st->time_base);
|
||||||
} else {
|
} else {
|
||||||
opkt.dts = av_rescale_q(ipkt->dts-startDts, input_st->time_base, video_st->time_base);
|
opkt.dts = av_rescale_q(ipkt->dts-startDts, input_st->time_base, video_st->time_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
opkt.dts -= ost_tb_start_time;
|
opkt.dts -= ost_tb_start_time;
|
||||||
|
|
||||||
opkt.duration = av_rescale_q(ipkt->duration, input_st->time_base, video_st->time_base);
|
opkt.duration = av_rescale_q(ipkt->duration, input_st->time_base, video_st->time_base);
|
||||||
opkt.flags = ipkt->flags;
|
opkt.flags = ipkt->flags;
|
||||||
opkt.pos=-1;
|
opkt.pos=-1;
|
||||||
|
|
||||||
opkt.data = ipkt->data;
|
opkt.data = ipkt->data;
|
||||||
opkt.size = ipkt->size;
|
opkt.size = ipkt->size;
|
||||||
opkt.stream_index = ipkt->stream_index;
|
opkt.stream_index = ipkt->stream_index;
|
||||||
/*opkt.flags |= AV_PKT_FLAG_KEY;*/
|
/*opkt.flags |= AV_PKT_FLAG_KEY;*/
|
||||||
|
|
||||||
if (video_st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (fmt->flags & AVFMT_RAWPICTURE)) {
|
if (video_st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (fmt->flags & AVFMT_RAWPICTURE)) {
|
||||||
/* store AVPicture in AVPacket, as expected by the output format */
|
/* store AVPicture in AVPacket, as expected by the output format */
|
||||||
avpicture_fill(&pict, opkt.data, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);
|
avpicture_fill(&pict, opkt.data, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);
|
||||||
opkt.data = (uint8_t *)&pict;
|
opkt.data = (uint8_t *)&pict;
|
||||||
opkt.size = sizeof(AVPicture);
|
opkt.size = sizeof(AVPicture);
|
||||||
opkt.flags |= AV_PKT_FLAG_KEY;
|
opkt.flags |= AV_PKT_FLAG_KEY;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(&safepkt, &opkt, sizeof(AVPacket));
|
||||||
|
|
||||||
|
if ((opkt.data == NULL)||(opkt.size < 1)) {
|
||||||
|
Warning("%s:%d: Mangled AVPacket: discarding frame", __FILE__, __LINE__ );
|
||||||
|
dumpPacket(&opkt);
|
||||||
|
|
||||||
|
} else if ((prevDts > 0) && (prevDts >= opkt.dts)) {
|
||||||
|
Warning("%s:%d: DTS out of order: %lld \u226E %lld; discarding frame", __FILE__, __LINE__, prevDts, opkt.dts);
|
||||||
|
prevDts = opkt.dts;
|
||||||
|
dumpPacket(&opkt);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
prevDts = opkt.dts; // Unsure if av_interleaved_write_frame() clobbers opkt.dts when out of order, so storing in advance
|
||||||
|
ret = av_interleaved_write_frame(oc, &opkt);
|
||||||
|
if(ret<0){
|
||||||
|
// There's nothing we can really do if the frame is rejected, just drop it and get on with the next
|
||||||
|
Warning("%s:%d: Writing frame [av_interleaved_write_frame()] failed: %s(%d) ", __FILE__, __LINE__, av_make_error_string(ret).c_str(), (ret));
|
||||||
|
dumpPacket(&safepkt);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
memcpy(&safepkt, &opkt, sizeof(AVPacket));
|
|
||||||
|
|
||||||
if ((opkt.data == NULL)||(opkt.size < 1)) {
|
|
||||||
Warning("%s:%d: Mangled AVPacket: discarding frame", __FILE__, __LINE__ );
|
|
||||||
dumpPacket(&opkt);
|
|
||||||
|
|
||||||
} else if ((prevDts > 0) && (prevDts >= opkt.dts)) {
|
|
||||||
Warning("%s:%d: DTS out of order: %lld \u226E %lld; discarding frame", __FILE__, __LINE__, prevDts, opkt.dts);
|
|
||||||
prevDts = opkt.dts;
|
|
||||||
dumpPacket(&opkt);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
prevDts = opkt.dts; // Unsure if av_interleaved_write_frame() clobbers opkt.dts when out of order, so storing in advance
|
|
||||||
ret = av_interleaved_write_frame(oc, &opkt);
|
|
||||||
if(ret<0){
|
|
||||||
// There's nothing we can really do if the frame is rejected, just drop it and get on with the next
|
|
||||||
Warning("%s:%d: Writing frame [av_interleaved_write_frame()] failed: %s(%d) ", __FILE__, __LINE__, av_make_error_string(ret).c_str(), (ret));
|
|
||||||
dumpPacket(&safepkt);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
av_free_packet(&opkt);
|
av_free_packet(&opkt);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int VideoStore::writeAudioFramePacket(AVPacket *ipkt, AVStream *input_st){
|
int VideoStore::writeAudioFramePacket(AVPacket *ipkt, AVStream *input_st){
|
||||||
|
|
||||||
if(!audio_st) {
|
if(!audio_st) {
|
||||||
Error("Called writeAudioFramePacket when no audio_st");
|
Error("Called writeAudioFramePacket when no audio_st");
|
||||||
return -1;//FIXME -ve return codes do not free packet in ffmpeg_camera at the moment
|
return -1;//FIXME -ve return codes do not free packet in ffmpeg_camera at the moment
|
||||||
}
|
}
|
||||||
/*if(!keyframeMessage)
|
/*if(!keyframeMessage)
|
||||||
return -1;*/
|
return -1;*/
|
||||||
//zm_dump_stream_format( oc, ipkt->stream_index, 0, 1 );
|
//zm_dump_stream_format( oc, ipkt->stream_index, 0, 1 );
|
||||||
|
|
||||||
// What is this doing? Getting the time of the start of this video chunk? Does that actually make sense?
|
// What is this doing? Getting the time of the start of this video chunk? Does that actually make sense?
|
||||||
int64_t ost_tb_start_time = av_rescale_q(startTime, AV_TIME_BASE_Q, audio_st->time_base);
|
int64_t ost_tb_start_time = av_rescale_q(startTime, AV_TIME_BASE_Q, audio_st->time_base);
|
||||||
|
|
||||||
AVPacket opkt;
|
AVPacket opkt;
|
||||||
|
|
||||||
av_init_packet(&opkt);
|
av_init_packet(&opkt);
|
||||||
Debug(3, "after init packet" );
|
Debug(3, "after init packet" );
|
||||||
|
|
||||||
|
|
||||||
//Scale the PTS of the outgoing packet to be the correct time base
|
//Scale the PTS of the outgoing packet to be the correct time base
|
||||||
if (ipkt->pts != AV_NOPTS_VALUE) {
|
if (ipkt->pts != AV_NOPTS_VALUE) {
|
||||||
Debug(3, "Rescaling output pts");
|
Debug(3, "Rescaling output pts");
|
||||||
opkt.pts = av_rescale_q(ipkt->pts-startPts, input_st->time_base, audio_st->time_base) - ost_tb_start_time;
|
opkt.pts = av_rescale_q(ipkt->pts-startPts, input_st->time_base, audio_st->time_base) - ost_tb_start_time;
|
||||||
} else {
|
} else {
|
||||||
Debug(3, "Setting output pts to AV_NOPTS_VALUE");
|
Debug(3, "Setting output pts to AV_NOPTS_VALUE");
|
||||||
opkt.pts = AV_NOPTS_VALUE;
|
opkt.pts = AV_NOPTS_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
//Scale the DTS of the outgoing packet to be the correct time base
|
//Scale the DTS of the outgoing packet to be the correct time base
|
||||||
if(ipkt->dts == AV_NOPTS_VALUE) {
|
if(ipkt->dts == AV_NOPTS_VALUE) {
|
||||||
Debug(4, "ipkt->dts == AV_NOPTS_VALUE %d to %d", AV_NOPTS_VALUE, opkt.dts );
|
Debug(4, "ipkt->dts == AV_NOPTS_VALUE %d to %d", AV_NOPTS_VALUE, opkt.dts );
|
||||||
opkt.dts = av_rescale_q(input_st->cur_dts-startDts, AV_TIME_BASE_Q, audio_st->time_base);
|
opkt.dts = av_rescale_q(input_st->cur_dts-startDts, AV_TIME_BASE_Q, audio_st->time_base);
|
||||||
Debug(4, "ipkt->dts == AV_NOPTS_VALUE %d to %d", AV_NOPTS_VALUE, opkt.dts );
|
Debug(4, "ipkt->dts == AV_NOPTS_VALUE %d to %d", AV_NOPTS_VALUE, opkt.dts );
|
||||||
} else {
|
} else {
|
||||||
Debug(4, "ipkt->dts != AV_NOPTS_VALUE %d to %d", AV_NOPTS_VALUE, opkt.dts );
|
Debug(4, "ipkt->dts != AV_NOPTS_VALUE %d to %d", AV_NOPTS_VALUE, opkt.dts );
|
||||||
opkt.dts = av_rescale_q(ipkt->dts-startDts, input_st->time_base, audio_st->time_base);
|
opkt.dts = av_rescale_q(ipkt->dts-startDts, input_st->time_base, audio_st->time_base);
|
||||||
Debug(4, "ipkt->dts != AV_NOPTS_VALUE %d to %d", AV_NOPTS_VALUE, opkt.dts );
|
Debug(4, "ipkt->dts != AV_NOPTS_VALUE %d to %d", AV_NOPTS_VALUE, opkt.dts );
|
||||||
}
|
}
|
||||||
opkt.dts -= ost_tb_start_time;
|
opkt.dts -= ost_tb_start_time;
|
||||||
|
|
||||||
// Seems like it would be really weird for the codec type to NOT be audiu
|
// Seems like it would be really weird for the codec type to NOT be audiu
|
||||||
if (audio_st->codec->codec_type == AVMEDIA_TYPE_AUDIO && ipkt->dts != AV_NOPTS_VALUE) {
|
if (audio_st->codec->codec_type == AVMEDIA_TYPE_AUDIO && ipkt->dts != AV_NOPTS_VALUE) {
|
||||||
Debug( 4, "code is audio, dts != AV_NOPTS_VALUE " );
|
Debug( 4, "code is audio, dts != AV_NOPTS_VALUE " );
|
||||||
int duration = av_get_audio_frame_duration(input_st->codec, ipkt->size);
|
int duration = av_get_audio_frame_duration(input_st->codec, ipkt->size);
|
||||||
if(!duration)
|
if(!duration)
|
||||||
duration = input_st->codec->frame_size;
|
duration = input_st->codec->frame_size;
|
||||||
|
|
||||||
//FIXME where to get filter_in_rescale_delta_last
|
//FIXME where to get filter_in_rescale_delta_last
|
||||||
//FIXME av_rescale_delta doesn't exist in ubuntu vivid libavtools
|
//FIXME av_rescale_delta doesn't exist in ubuntu vivid libavtools
|
||||||
opkt.dts = opkt.pts = av_rescale_delta(input_st->time_base, ipkt->dts,
|
opkt.dts = opkt.pts = av_rescale_delta(input_st->time_base, ipkt->dts,
|
||||||
(AVRational){1, input_st->codec->sample_rate}, duration, &filter_in_rescale_delta_last,
|
(AVRational){1, input_st->codec->sample_rate}, duration, &filter_in_rescale_delta_last,
|
||||||
audio_st->time_base) - ost_tb_start_time;
|
audio_st->time_base) - ost_tb_start_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
opkt.duration = av_rescale_q(ipkt->duration, input_st->time_base, audio_st->time_base);
|
opkt.duration = av_rescale_q(ipkt->duration, input_st->time_base, audio_st->time_base);
|
||||||
opkt.pos=-1;
|
opkt.pos=-1;
|
||||||
opkt.flags = ipkt->flags;
|
opkt.flags = ipkt->flags;
|
||||||
|
|
||||||
opkt.data = ipkt->data;
|
opkt.data = ipkt->data;
|
||||||
opkt.size = ipkt->size;
|
opkt.size = ipkt->size;
|
||||||
opkt.stream_index = ipkt->stream_index;
|
opkt.stream_index = ipkt->stream_index;
|
||||||
|
|
||||||
int ret;
|
int ret;
|
||||||
ret = av_interleaved_write_frame(oc, &opkt);
|
ret = av_interleaved_write_frame(oc, &opkt);
|
||||||
if(ret!=0){
|
if(ret!=0){
|
||||||
Fatal("Error encoding audio frame packet: %s\n", av_make_error_string(ret).c_str());
|
Fatal("Error encoding audio frame packet: %s\n", av_make_error_string(ret).c_str());
|
||||||
}
|
}
|
||||||
Debug(4,"Success writing audio frame" );
|
Debug(4,"Success writing audio frame" );
|
||||||
av_free_packet(&opkt);
|
av_free_packet(&opkt);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue