Take first PTS into account when calculating last pts for figuring out which input to use.

This commit is contained in:
Isaac Connor 2021-04-11 16:35:16 -04:00
parent dab89622af
commit f077ec6145
3 changed files with 16 additions and 3 deletions

View File

@ -55,6 +55,8 @@ Camera::Camera(
mAudioStream(nullptr),
mFormatContext(nullptr),
mSecondFormatContext(nullptr),
mFirstVideoPTS(0),
mFirstAudioPTS(0),
mLastVideoPTS(0),
mLastAudioPTS(0),
bytes(0)

View File

@ -58,6 +58,8 @@ protected:
AVStream *mAudioStream;
AVFormatContext *mFormatContext; // One for video, one for audio
AVFormatContext *mSecondFormatContext; // One for video, one for audio
int64_t mFirstVideoPTS;
int64_t mFirstAudioPTS;
int64_t mLastVideoPTS;
int64_t mLastAudioPTS;
unsigned int bytes;

View File

@ -200,7 +200,10 @@ int FfmpegCamera::Capture(ZMPacket &zm_packet) {
) ) {
// if audio stream is behind video stream, then read from audio, otherwise video
mFormatContextPtr = mSecondFormatContext;
Debug(4, "Using audio input");
Debug(4, "Using audio input because audio PTS %" PRId64 " < video PTS %" PRId64,
av_rescale_q(mLastAudioPTS, mAudioStream->time_base, AV_TIME_BASE_Q),
av_rescale_q(mLastVideoPTS, mVideoStream->time_base, AV_TIME_BASE_Q)
);
} else {
mFormatContextPtr = mFormatContext;
Debug(4, "Using video input because %" PRId64 " >= %" PRId64,
@ -240,9 +243,15 @@ int FfmpegCamera::Capture(ZMPacket &zm_packet) {
zm_packet.pts = av_rescale_q(packet.pts, stream->time_base, AV_TIME_BASE_Q);
if ( packet.pts != AV_NOPTS_VALUE ) {
if ( stream == mVideoStream ) {
mLastVideoPTS = packet.pts;
if (mFirstVideoPTS == AV_NOPTS_VALUE)
mFirstVideoPTS = packet.pts;
mLastVideoPTS = packet.pts - mFirstVideoPTS;
} else {
mLastAudioPTS = packet.pts;
if (mFirstAudioPTS == AV_NOPTS_VALUE)
mFirstAudioPTS = packet.pts;
mLastAudioPTS = packet.pts - mFirstAudioPTS;
}
}
zm_av_packet_unref(&packet);