Merge branch 'zma_to_thread' of github.com:ConnorTechnology/ZoneMinder into zma_to_thread

This commit is contained in:
Isaac Connor 2017-11-17 16:49:31 -05:00
commit 4217d8ec4d
6 changed files with 43 additions and 6 deletions

View File

@ -650,6 +650,9 @@ LocalCamera::LocalCamera(
} }
} // end if capture and conversion_tye == swscale } // end if capture and conversion_tye == swscale
#endif #endif
mVideoStreamId = 0;
mAudioStreamId = -1;
video_stream = NULL;
} // end LocalCamera::LocalCamera } // end LocalCamera::LocalCamera
LocalCamera::~LocalCamera() { LocalCamera::~LocalCamera() {
@ -2107,5 +2110,32 @@ int LocalCamera::PostCapture() {
} }
return( 0 ); return( 0 );
} }
AVStream *LocalCamera::get_VideoStream() {
if ( ! video_stream ) {
AVFormatContext *oc = avformat_alloc_context();
video_stream = avformat_new_stream( oc, NULL );
if ( video_stream ) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
video_stream->codecpar->width = width;
video_stream->codecpar->height = height;
video_stream->codecpar->format = GetFFMPEGPixelFormat(colours,subpixelorder);
video_stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
#else
video_stream->codec->width = width;
video_stream->codec->height = height;
video_stream->codec->pix_fmt = GetFFMPEGPixelFormat(colours,subpixelorder);
video_stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
#endif
} else {
Error("Can't create video stream");
}
} else {
Debug(2,"Have videostream");
}
Debug(2,"Get videoStream");
return video_stream;
}
#endif // ZM_HAS_V4L #endif // ZM_HAS_V4L

View File

@ -159,6 +159,7 @@ public:
int Capture(ZMPacket &p); int Capture(ZMPacket &p);
int PostCapture(); int PostCapture();
static bool GetCurrentSettings( const char *device, char *output, int version, bool verbose ); static bool GetCurrentSettings( const char *device, char *output, int version, bool verbose );
AVStream* get_VideoStream();
}; };
#endif // ZM_HAS_V4L #endif // ZM_HAS_V4L

View File

@ -498,7 +498,7 @@ void Logger::logPrint( bool hex, const char * const filepath, const int line, co
va_start( argPtr, fstring ); va_start( argPtr, fstring );
if ( hex ) { if ( hex ) {
unsigned char *data = va_arg( argPtr, unsigned char * ); unsigned char *data = va_arg( argPtr, unsigned char * );
int len = va_arg( argPtr, int ); int len = va_arg( argPtr, int32_t );
int i; int i;
logPtr += snprintf( logPtr, sizeof(logString)-(logPtr-logString), "%d:", len ); logPtr += snprintf( logPtr, sizeof(logString)-(logPtr-logString), "%d:", len );
for ( i = 0; i < len; i++ ) { for ( i = 0; i < len; i++ ) {

View File

@ -600,6 +600,7 @@ Monitor::~Monitor() {
if ( purpose == ANALYSIS ) { if ( purpose == ANALYSIS ) {
shared_data->state = state = IDLE; shared_data->state = state = IDLE;
// I think we set it to the count so that it is technically 1 behind capture, which starts at 0
shared_data->last_read_index = image_buffer_count; shared_data->last_read_index = image_buffer_count;
shared_data->last_read_time = 0; shared_data->last_read_time = 0;
@ -1202,10 +1203,13 @@ bool Monitor::Analyse() {
ZMPacket *snap = &image_buffer[index]; ZMPacket *snap = &image_buffer[index];
if ( snap->packet.stream_index != camera->get_VideoStreamId() ) { if ( snap->packet.stream_index != camera->get_VideoStreamId() ) {
Debug(2, "Non video packet in analysis (%d) != (%d)", snap->packet.stream_index, camera->get_VideoStreamId() );
if ( event ) { if ( event ) {
//event->AddFrame( snap_image, *timestamp, score ); //event->AddFrame( snap_image, *timestamp, score );
event->AddPacket( snap, 0 ); event->AddPacket( snap, 0 );
} }
shared_data->last_read_index = index % image_buffer_count;
shared_data->last_read_time = now.tv_sec;
mutex.unlock(); mutex.unlock();
return false; return false;
} }

View File

@ -86,8 +86,10 @@ VideoStore::VideoStore(
video_in_stream_index = video_in_stream->index; video_in_stream_index = video_in_stream->index;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0) #if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
video_in_ctx = avcodec_alloc_context3(NULL); video_in_ctx = avcodec_alloc_context3(NULL);
Debug(2, "copy to context");
avcodec_parameters_to_context(video_in_ctx, avcodec_parameters_to_context(video_in_ctx,
video_in_stream->codecpar); video_in_stream->codecpar);
Debug(2, "dump to context");
zm_dump_codecpar( video_in_stream->codecpar ); zm_dump_codecpar( video_in_stream->codecpar );
//video_in_ctx.codec_id = video_in_stream->codecpar.codec_id; //video_in_ctx.codec_id = video_in_stream->codecpar.codec_id;
#else #else

View File

@ -317,7 +317,7 @@ int main(int argc, char *argv[]) {
DELTA_TIMEVAL(delta_time, now, last_capture_times[i], DT_PREC_3); DELTA_TIMEVAL(delta_time, now, last_capture_times[i], DT_PREC_3);
long sleep_time = next_delays[i]-delta_time.delta; long sleep_time = next_delays[i]-delta_time.delta;
if ( sleep_time > 0 ) { if ( sleep_time > 0 ) {
Debug(2,"usleeping (%d)", sleep_time*(DT_MAXGRAN/DT_PREC_3) ); //Debug(2,"usleeping (%d)", sleep_time*(DT_MAXGRAN/DT_PREC_3) );
usleep(sleep_time*(DT_MAXGRAN/DT_PREC_3)); usleep(sleep_time*(DT_MAXGRAN/DT_PREC_3));
} }
last_capture_times[i] = now; last_capture_times[i] = now;