Merge branch 'zma_to_thread' of github.com:ConnorTechnology/ZoneMinder into zma_to_thread
This commit is contained in:
commit
c94544dad7
|
@ -43,13 +43,11 @@ int Event::pre_alarm_count = 0;
|
|||
|
||||
Event::PreAlarmData Event::pre_alarm_data[MAX_PRE_ALARM_FRAMES] = { { 0 } };
|
||||
|
||||
Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string &p_cause, const StringSetMap &p_noteSetMap, bool p_videoEvent ) :
|
||||
Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string &p_cause, const StringSetMap &p_noteSetMap ) :
|
||||
monitor( p_monitor ),
|
||||
start_time( p_start_time ),
|
||||
cause( p_cause ),
|
||||
noteSetMap( p_noteSetMap ),
|
||||
videoEvent( p_videoEvent ),
|
||||
videowriter( NULL )
|
||||
noteSetMap( p_noteSetMap )
|
||||
{
|
||||
|
||||
std::string notes;
|
||||
|
@ -71,7 +69,7 @@ Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string
|
|||
|
||||
static char sql[ZM_SQL_MED_BUFSIZ];
|
||||
struct tm *stime = localtime( &start_time.tv_sec );
|
||||
snprintf( sql, sizeof(sql), "insert into Events ( MonitorId, StorageId, Name, StartTime, Width, Height, Cause, Notes, StateId, Orientation, Videoed, DefaultVideo, SaveJPEGs ) values ( %d, %d, 'New Event', from_unixtime( %ld ), %d, %d, '%s', '%s', %d, %d, %d, '', %d )",
|
||||
snprintf( sql, sizeof(sql), "INSERT INTO Events ( MonitorId, StorageId, Name, StartTime, Width, Height, Cause, Notes, StateId, Orientation, Videoed, DefaultVideo, SaveJPEGs ) values ( %d, %d, 'New Event', from_unixtime( %ld ), %d, %d, '%s', '%s', %d, %d, %d, '', %d )",
|
||||
monitor->Id(),
|
||||
storage->Id(),
|
||||
start_time.tv_sec,
|
||||
|
@ -81,7 +79,7 @@ Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string
|
|||
notes.c_str(),
|
||||
state_id,
|
||||
monitor->getOrientation(),
|
||||
videoEvent,
|
||||
( monitor->GetOptVideoWriter() != 0 ? 1 : 0 ),
|
||||
monitor->GetOptSaveJPEGs()
|
||||
);
|
||||
if ( mysql_query( &dbconn, sql ) ) {
|
||||
|
@ -97,6 +95,7 @@ Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string
|
|||
alarm_frames = 0;
|
||||
tot_score = 0;
|
||||
max_score = 0;
|
||||
have_video_keyframe = false;
|
||||
|
||||
struct stat statbuf;
|
||||
char id_file[PATH_MAX];
|
||||
|
@ -179,7 +178,6 @@ Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string
|
|||
snprintf( video_name, sizeof(video_name), "%d-%s.%s", id, "video", container.c_str() );
|
||||
snprintf( video_file, sizeof(video_file), staticConfig.video_file_format, path, video_name );
|
||||
Debug(1,"Writing video file to %s", video_file );
|
||||
videowriter = NULL;
|
||||
Camera * camera = monitor->getCamera();
|
||||
videoStore = new VideoStore(
|
||||
video_file,
|
||||
|
@ -192,9 +190,6 @@ Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string
|
|||
delete videoStore;
|
||||
videoStore = NULL;
|
||||
}
|
||||
} else {
|
||||
/* No video object */
|
||||
videowriter = NULL;
|
||||
}
|
||||
|
||||
} // Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string &p_cause, const StringSetMap &p_noteSetMap, bool p_videoEvent )
|
||||
|
@ -224,7 +219,7 @@ Event::~Event() {
|
|||
Error( "Can't update event: %s", mysql_error( &dbconn ) );
|
||||
exit( mysql_errno( &dbconn ) );
|
||||
}
|
||||
}
|
||||
} // ~Event
|
||||
|
||||
void Event::createNotes( std::string ¬es ) {
|
||||
notes.clear();
|
||||
|
@ -261,39 +256,6 @@ Debug(3, "Writing image to %s", event_file );
|
|||
return rc;
|
||||
} // end Event::WriteFrameImage( Image *image, struct timeval timestamp, const char *event_file, bool alarm_frame )
|
||||
|
||||
bool Event::WriteFrameVideo( const Image *image, const struct timeval timestamp, VideoWriter* videow ) {
|
||||
const Image* frameimg = image;
|
||||
Image ts_image;
|
||||
|
||||
/* Checking for invalid parameters */
|
||||
if ( videow == NULL ) {
|
||||
Error("NULL Video object");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If the image does not contain a timestamp, add the timestamp */
|
||||
if ( !config.timestamp_on_capture ) {
|
||||
ts_image = *image;
|
||||
monitor->TimestampImage( &ts_image, ×tamp );
|
||||
frameimg = &ts_image;
|
||||
}
|
||||
|
||||
/* Calculate delta time */
|
||||
struct DeltaTimeval delta_time3;
|
||||
DELTA_TIMEVAL( delta_time3, timestamp, start_time, DT_PREC_3 );
|
||||
unsigned int timeMS = (delta_time3.sec * delta_time3.prec) + delta_time3.fsec;
|
||||
|
||||
/* Encode and write the frame */
|
||||
if ( videowriter->Encode(frameimg, timeMS) != 0 ) {
|
||||
Error("Failed encoding video frame");
|
||||
}
|
||||
|
||||
/* Add the frame to the timecodes file */
|
||||
fprintf(timecodes_fd, "%u\n", timeMS);
|
||||
|
||||
return( true );
|
||||
}
|
||||
|
||||
bool Event::WritePacket( ZMPacket &packet ) {
|
||||
|
||||
if ( videoStore->writePacket( &packet ) < 0 )
|
||||
|
@ -440,9 +402,6 @@ void Event::AddFramesInternal( int n_frames, int start_frame, Image **images, st
|
|||
Debug( 1, "Writing pre-capture frame %d", frames );
|
||||
WriteFrameImage( images[i], *(timestamps[i]), event_file );
|
||||
}
|
||||
if ( videowriter != NULL ) {
|
||||
WriteFrameVideo( images[i], *(timestamps[i]), videowriter );
|
||||
}
|
||||
|
||||
struct DeltaTimeval delta_time;
|
||||
DELTA_TIMEVAL( delta_time, *(timestamps[i]), start_time, DT_PREC_2 );
|
||||
|
@ -470,7 +429,9 @@ void Event::AddPacket( ZMPacket *packet, int score, Image *alarm_image ) {
|
|||
frames++;
|
||||
|
||||
if ( videoStore ) {
|
||||
videoStore->writePacket( packet );
|
||||
have_video_keyframe = have_video_keyframe || ( packet->codec_type == AVMEDIA_TYPE_VIDEO && ( packet->packet.flags & AV_PKT_FLAG_KEY ) );
|
||||
if ( have_video_keyframe )
|
||||
videoStore->writePacket( packet );
|
||||
//FIXME if it fails, we should write a jpeg
|
||||
}
|
||||
if ( packet->codec_type == AVMEDIA_TYPE_VIDEO ) {
|
||||
|
|
|
@ -80,20 +80,16 @@ class Event {
|
|||
struct timeval end_time;
|
||||
std::string cause;
|
||||
StringSetMap noteSetMap;
|
||||
bool videoEvent;
|
||||
int frames;
|
||||
int alarm_frames;
|
||||
unsigned int tot_score;
|
||||
unsigned int max_score;
|
||||
char path[PATH_MAX];
|
||||
VideoWriter* videowriter;
|
||||
VideoStore *videoStore;
|
||||
FILE* timecodes_fd;
|
||||
char video_name[PATH_MAX];
|
||||
char video_file[PATH_MAX];
|
||||
char timecodes_name[PATH_MAX];
|
||||
char timecodes_file[PATH_MAX];
|
||||
int last_db_frame;
|
||||
bool have_video_keyframe; // a flag to tell us if we have had a video keyframe when writing an mp4. The first frame SHOULD be a video keyframe.
|
||||
|
||||
void createNotes( std::string ¬es );
|
||||
|
||||
|
@ -101,7 +97,7 @@ class Event {
|
|||
static bool OpenFrameSocket( int );
|
||||
static bool ValidateFrameSocket( int );
|
||||
|
||||
Event( Monitor *p_monitor, struct timeval p_start_time, const std::string &p_cause, const StringSetMap &p_noteSetMap, bool p_videoEvent=false );
|
||||
Event( Monitor *p_monitor, struct timeval p_start_time, const std::string &p_cause, const StringSetMap &p_noteSetMap );
|
||||
~Event();
|
||||
|
||||
int Id() const { return( id ); }
|
||||
|
@ -116,7 +112,6 @@ class Event {
|
|||
|
||||
bool SendFrameImage( const Image *image, bool alarm_frame=false );
|
||||
bool WriteFrameImage( Image *image, struct timeval timestamp, const char *event_file, bool alarm_frame=false );
|
||||
bool WriteFrameVideo( const Image *image, const struct timeval timestamp, VideoWriter* videow );
|
||||
|
||||
void updateNotes( const StringSetMap &stringSetMap );
|
||||
|
||||
|
|
|
@ -112,22 +112,6 @@ FfmpegCamera::FfmpegCamera( int p_id, const std::string &p_path, const std::stri
|
|||
mOpenStart = 0;
|
||||
mReopenThread = 0;
|
||||
|
||||
#if HAVE_LIBSWSCALE
|
||||
mConvertContext = NULL;
|
||||
#endif
|
||||
/* Has to be located inside the constructor so other components such as zma will receive correct colours and subpixel order */
|
||||
if ( colours == ZM_COLOUR_RGB32 ) {
|
||||
subpixelorder = ZM_SUBPIX_ORDER_RGBA;
|
||||
imagePixFormat = AV_PIX_FMT_RGBA;
|
||||
} else if ( colours == ZM_COLOUR_RGB24 ) {
|
||||
subpixelorder = ZM_SUBPIX_ORDER_RGB;
|
||||
imagePixFormat = AV_PIX_FMT_RGB24;
|
||||
} else if ( colours == ZM_COLOUR_GRAY8 ) {
|
||||
subpixelorder = ZM_SUBPIX_ORDER_NONE;
|
||||
imagePixFormat = AV_PIX_FMT_GRAY8;
|
||||
} else {
|
||||
Panic("Unexpected colours: %d",colours);
|
||||
}
|
||||
} // end FFmpegCamera::FFmpegCamera
|
||||
|
||||
FfmpegCamera::~FfmpegCamera() {
|
||||
|
@ -387,16 +371,6 @@ int FfmpegCamera::OpenFfmpeg() {
|
|||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
#ifdef AV_CODEC_ID_H265
|
||||
if ( mVideoCodecContext->codec_id == AV_CODEC_ID_H265 ) {
|
||||
Debug( 1, "Input stream appears to be h265. The stored event file may not be viewable in browser." );
|
||||
} else {
|
||||
#endif
|
||||
Error( "Input stream is not h264. The stored event file may not be viewable in browser." );
|
||||
#ifdef AV_CODEC_ID_H265
|
||||
}
|
||||
#endif
|
||||
} // end if h264
|
||||
#endif
|
||||
|
||||
|
@ -427,7 +401,7 @@ int FfmpegCamera::OpenFfmpeg() {
|
|||
}
|
||||
} // end if success opening codec
|
||||
|
||||
if (mVideoCodecContext->hwaccel != NULL) {
|
||||
if ( mVideoCodecContext->hwaccel != NULL ) {
|
||||
Debug(1, "HWACCEL in use");
|
||||
} else {
|
||||
Debug(1, "HWACCEL not in use");
|
||||
|
@ -457,54 +431,7 @@ int FfmpegCamera::OpenFfmpeg() {
|
|||
} // end if have audio stream
|
||||
|
||||
Debug ( 1, "Opened codec" );
|
||||
# if 0
|
||||
|
||||
// Allocate space for the native video frame
|
||||
mRawFrame = zm_av_frame_alloc();
|
||||
|
||||
// Allocate space for the converted video frame
|
||||
mFrame = zm_av_frame_alloc();
|
||||
|
||||
if ( mRawFrame == NULL || mFrame == NULL )
|
||||
Fatal( "Unable to allocate frame for %s", mPath.c_str() );
|
||||
|
||||
Debug ( 1, "Allocated frames" );
|
||||
|
||||
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
|
||||
int pSize = av_image_get_buffer_size( imagePixFormat, width, height,1 );
|
||||
#else
|
||||
int pSize = avpicture_get_size( imagePixFormat, width, height );
|
||||
#endif
|
||||
|
||||
if ( (unsigned int)pSize != imagesize ) {
|
||||
Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
|
||||
}
|
||||
|
||||
Debug ( 1, "Validated imagesize" );
|
||||
|
||||
#if HAVE_LIBSWSCALE
|
||||
Debug ( 1, "Calling sws_isSupportedInput" );
|
||||
if ( !sws_isSupportedInput(mVideoCodecContext->pix_fmt) ) {
|
||||
Fatal("swscale does not support the codec format: %c%c%c%c", (mVideoCodecContext->pix_fmt)&0xff, ((mVideoCodecContext->pix_fmt >> 8)&0xff), ((mVideoCodecContext->pix_fmt >> 16)&0xff), ((mVideoCodecContext->pix_fmt >> 24)&0xff));
|
||||
}
|
||||
|
||||
if ( !sws_isSupportedOutput(imagePixFormat) ) {
|
||||
Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
|
||||
}
|
||||
|
||||
mConvertContext = sws_getContext(mVideoCodecContext->width,
|
||||
mVideoCodecContext->height,
|
||||
mVideoCodecContext->pix_fmt,
|
||||
width, height,
|
||||
imagePixFormat, SWS_BICUBIC, NULL,
|
||||
NULL, NULL);
|
||||
if ( mConvertContext == NULL )
|
||||
Fatal( "Unable to create conversion context for %s", mPath.c_str() );
|
||||
#else // HAVE_LIBSWSCALE
|
||||
Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
|
||||
#endif // HAVE_LIBSWSCALE
|
||||
|
||||
#endif
|
||||
if ( (unsigned int)mVideoCodecContext->width != width || (unsigned int)mVideoCodecContext->height != height ) {
|
||||
Warning( "Monitor dimensions are %dx%d but camera is sending %dx%d", width, height, mVideoCodecContext->width, mVideoCodecContext->height );
|
||||
}
|
||||
|
@ -548,12 +475,6 @@ int FfmpegCamera::CloseFfmpeg() {
|
|||
mRawFrame = NULL;
|
||||
}
|
||||
|
||||
#if HAVE_LIBSWSCALE
|
||||
if ( mConvertContext ) {
|
||||
sws_freeContext( mConvertContext );
|
||||
mConvertContext = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
if ( mVideoCodecContext ) {
|
||||
avcodec_close(mVideoCodecContext);
|
||||
|
@ -576,7 +497,7 @@ int FfmpegCamera::CloseFfmpeg() {
|
|||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
} // end int FfmpegCamera::CloseFfmpeg()
|
||||
|
||||
int FfmpegCamera::FfmpegInterruptCallback(void *ctx) {
|
||||
Debug(3,"FfmpegInteruptCallback");
|
||||
|
@ -590,9 +511,9 @@ int FfmpegCamera::FfmpegInterruptCallback(void *ctx) {
|
|||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
} // end int FfmpegCamera::FfmpegInterruptCallback(void *ctx)
|
||||
|
||||
void *FfmpegCamera::ReopenFfmpegThreadCallback(void *ctx){
|
||||
void *FfmpegCamera::ReopenFfmpegThreadCallback(void *ctx) {
|
||||
Debug(3,"FfmpegReopenThreadtCallback");
|
||||
if ( ctx == NULL ) return NULL;
|
||||
|
||||
|
@ -614,6 +535,6 @@ void *FfmpegCamera::ReopenFfmpegThreadCallback(void *ctx){
|
|||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
} // end void *FfmpegCamera::ReopenFfmpegThreadCallback(void *ctx)
|
||||
|
||||
#endif // HAVE_LIBAVFORMAT
|
||||
|
|
|
@ -50,7 +50,6 @@ class FfmpegCamera : public Camera {
|
|||
AVCodec *mAudioCodec;
|
||||
AVFrame *mRawFrame;
|
||||
AVFrame *mFrame;
|
||||
_AVPIXELFORMAT imagePixFormat;
|
||||
|
||||
bool hwaccel;
|
||||
#if HAVE_AVUTIL_HWCONTEXT_H
|
||||
|
@ -80,12 +79,6 @@ class FfmpegCamera : public Camera {
|
|||
pthread_t mReopenThread;
|
||||
#endif // HAVE_LIBAVFORMAT
|
||||
|
||||
#if HAVE_LIBSWSCALE
|
||||
struct SwsContext *mConvertContext;
|
||||
#endif
|
||||
|
||||
int64_t startTime;
|
||||
|
||||
public:
|
||||
FfmpegCamera(
|
||||
int p_id,
|
||||
|
|
|
@ -241,6 +241,7 @@ Monitor::Monitor(
|
|||
int p_orientation,
|
||||
unsigned int p_deinterlacing,
|
||||
int p_savejpegs,
|
||||
int p_colours,
|
||||
VideoWriter p_videowriter,
|
||||
std::string &p_encoderparams,
|
||||
std::string &p_output_codec,
|
||||
|
@ -282,6 +283,7 @@ Monitor::Monitor(
|
|||
orientation( (Orientation)p_orientation ),
|
||||
deinterlacing( p_deinterlacing ),
|
||||
savejpegspref( p_savejpegs ),
|
||||
colours( p_colours ),
|
||||
videowriter( p_videowriter ),
|
||||
encoderparams( p_encoderparams ),
|
||||
output_codec( p_output_codec ),
|
||||
|
@ -340,6 +342,49 @@ Monitor::Monitor(
|
|||
|
||||
/* Parse encoder parameters */
|
||||
ParseEncoderParameters(encoderparams.c_str(), &encoderparamsvec);
|
||||
#if HAVE_LIBSWSCALE
|
||||
mConvertContext = NULL;
|
||||
#endif
|
||||
/* Has to be located inside the constructor so other components such as zma will receive correct colours and subpixel order */
|
||||
if ( colours == ZM_COLOUR_RGB32 ) {
|
||||
subpixelorder = ZM_SUBPIX_ORDER_RGBA;
|
||||
imagePixFormat = AV_PIX_FMT_RGBA;
|
||||
} else if ( colours == ZM_COLOUR_RGB24 ) {
|
||||
subpixelorder = ZM_SUBPIX_ORDER_RGB;
|
||||
imagePixFormat = AV_PIX_FMT_RGB24;
|
||||
} else if ( colours == ZM_COLOUR_GRAY8 ) {
|
||||
subpixelorder = ZM_SUBPIX_ORDER_NONE;
|
||||
imagePixFormat = AV_PIX_FMT_GRAY8;
|
||||
} else {
|
||||
Panic("Unexpected colours: %d",colours);
|
||||
}
|
||||
#if HAVE_LIBSWSCALE
|
||||
//FIXME, need to be able to query the camera input for what it is going to be getting, which needs to be called after the camera is open.
|
||||
//Debug ( 1, "Calling sws_isSupportedInput" );
|
||||
//if ( !sws_isSupportedInput(mVideoCodecContext->pix_fmt) ) {
|
||||
//Fatal("swscale does not support the codec format: %c%c%c%c", (mVideoCodecContext->pix_fmt)&0xff, ((mVideoCodecContext->pix_fmt >> 8)&0xff), ((mVideoCodecContext->pix_fmt >> 16)&0xff), ((mVideoCodecContext->pix_fmt >> 24)&0xff));
|
||||
//}
|
||||
|
||||
if ( !sws_isSupportedOutput(imagePixFormat) ) {
|
||||
Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
|
||||
}
|
||||
|
||||
// We don't know this yet, need to open the camera first.
|
||||
//mConvertContext = sws_getContext(mVideoCodecContext->width,
|
||||
//mVideoCodecContext->height,
|
||||
//mVideoCodecContext->pix_fmt,
|
||||
//width, height,
|
||||
//imagePixFormat, SWS_BICUBIC, NULL,
|
||||
//NULL, NULL);
|
||||
//if ( mConvertContext == NULL )
|
||||
//Fatal( "Unable to create conversion context for %s", mPath.c_str() );
|
||||
#else // HAVE_LIBSWSCALE
|
||||
//Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
|
||||
#endif // HAVE_LIBSWSCALE
|
||||
|
||||
//if ( (unsigned int)mVideoCodecContext->width != width || (unsigned int)mVideoCodecContext->height != height ) {
|
||||
//Warning( "Monitor dimensions are %dx%d but camera is sending %dx%d", width, height, mVideoCodecContext->width, mVideoCodecContext->height );
|
||||
//}
|
||||
|
||||
fps = 0.0;
|
||||
event_count = 0;
|
||||
|
@ -572,6 +617,12 @@ Monitor::~Monitor() {
|
|||
delete videoStore;
|
||||
videoStore = NULL;
|
||||
}
|
||||
#if HAVE_LIBSWSCALE
|
||||
if ( mConvertContext ) {
|
||||
sws_freeContext( mConvertContext );
|
||||
mConvertContext = NULL;
|
||||
}
|
||||
#endif
|
||||
if ( timestamps ) {
|
||||
delete[] timestamps;
|
||||
timestamps = 0;
|
||||
|
@ -1132,7 +1183,7 @@ bool Monitor::CheckSignal( const Image *image ) {
|
|||
return true;
|
||||
}
|
||||
|
||||
} else if(colours == ZM_COLOUR_RGB32) {
|
||||
} else if ( colours == ZM_COLOUR_RGB32 ) {
|
||||
if ( usedsubpixorder == ZM_SUBPIX_ORDER_ARGB || usedsubpixorder == ZM_SUBPIX_ORDER_ABGR) {
|
||||
if ( ARGB_ABGR_ZEROALPHA(*(((const Rgb*)buffer)+index)) != ARGB_ABGR_ZEROALPHA(colour_val) )
|
||||
return true;
|
||||
|
@ -1183,16 +1234,15 @@ void Monitor::CheckAction() {
|
|||
} // end if shared_data->action
|
||||
}
|
||||
|
||||
// Would be nice if this JUST did analysis
|
||||
bool Monitor::Analyse() {
|
||||
// last_write_index is the last capture
|
||||
// last_read_index is the last analysis
|
||||
|
||||
if ( shared_data->last_read_index == shared_data->last_write_index ) {
|
||||
// I wonder how often this happens. Maybe if this happens we should sleep or something?
|
||||
//Debug(3, " shared_data->last_read_index == shared_data->last_write_index " );
|
||||
// If analysis is keeping up, then it happens lots
|
||||
if ( ! packetqueue.size() ) {
|
||||
Debug(2, "Nothing in packetqueue");
|
||||
return false;
|
||||
}
|
||||
|
||||
if ( ! Enabled() ) {
|
||||
Warning("SHouldn't be doing Analyze when not Enabled");
|
||||
return false;
|
||||
|
@ -1248,35 +1298,30 @@ bool Monitor::Analyse() {
|
|||
// last-write_index is the last frame captured
|
||||
skip_index = shared_data->last_write_index%image_buffer_count;
|
||||
}
|
||||
// Skip non-video frames
|
||||
int videostream_id = camera->get_VideoStreamId();
|
||||
unsigned int index = ( shared_data->last_read_index + 1 ) % image_buffer_count;
|
||||
while (
|
||||
( index != shared_data->last_write_index )
|
||||
&& (
|
||||
( index < skip_index )
|
||||
||
|
||||
( image_buffer[index].packet.stream_index != videostream_id )
|
||||
) ) {
|
||||
|
||||
Debug(2, "Skipping packet in analysis (%d) != (%d)",
|
||||
image_buffer[index].packet.stream_index, camera->get_VideoStreamId() );
|
||||
if ( event ) {
|
||||
event->AddPacket( &image_buffer[index], 0 );
|
||||
// process audio packets, writing them if there is an event.
|
||||
ZMPacket *queued_packet;
|
||||
while ( packetqueue.size() ) {
|
||||
if ( ( queued_packet = packetqueue.popPacket() ) ) {
|
||||
if ( queued_packet->packet.stream_index == video_stream_id ) {
|
||||
break;
|
||||
}
|
||||
if ( event ) {
|
||||
event->AddPacket( queued_packet );
|
||||
}
|
||||
delete queued_packet;
|
||||
queued_packet = NULL;
|
||||
}
|
||||
index ++;
|
||||
index = index % image_buffer_count;
|
||||
}
|
||||
// Still looking at audio packets
|
||||
if ( image_buffer[index].packet.stream_index != videostream_id ) {
|
||||
shared_data->last_read_index = index;
|
||||
|
||||
if ( ! queued_packet ) {
|
||||
shared_data->last_read_time = now.tv_sec;
|
||||
mutex.unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
//Debug(2, "timestamp for index (%d) %s", index, timeval_to_string( *timestamp ) );
|
||||
ZMPacket *snap = &image_buffer[index];
|
||||
ZMPacket *snap = queued_packet;
|
||||
struct timeval *timestamp = &snap->timestamp;
|
||||
Image *snap_image = snap->image;
|
||||
|
||||
|
@ -1420,10 +1465,8 @@ Debug(3,"before DetectMotion");
|
|||
|
||||
if ( ! event ) {
|
||||
// Create event
|
||||
event = new Event( this, *timestamp, "Continuous", noteSetMap, videoRecording );
|
||||
event = new Event( this, *timestamp, "Continuous", noteSetMap );
|
||||
shared_data->last_event_id = event->Id();
|
||||
//set up video store data
|
||||
snprintf(video_store_data->event_file, sizeof(video_store_data->event_file), "%s", event->getEventFile());
|
||||
video_store_data->recording = event->StartTime();
|
||||
|
||||
Info( "%s: %03d - Opening new event %d, section start", name, image_count, event->Id() );
|
||||
|
@ -1448,7 +1491,7 @@ Debug(9, "Score: (%d)", score );
|
|||
if ( analysis_fps ) {
|
||||
// If analysis fps is set,
|
||||
// compute the index for pre event images in the dedicated buffer
|
||||
pre_index = image_count%pre_event_buffer_count;
|
||||
pre_index = image_count % pre_event_buffer_count;
|
||||
Debug(3, "Pre Index = (%d) = image_count(%d) %% pre_event_buffer_count (%d)", pre_index, image_count, pre_event_buffer_count );
|
||||
|
||||
// Seek forward the next filled slot in to the buffer (oldest data)
|
||||
|
@ -1955,6 +1998,7 @@ int Monitor::LoadLocalMonitors( const char *device, Monitor **&monitors, Purpose
|
|||
orientation,
|
||||
deinterlacing,
|
||||
savejpegs,
|
||||
colours,
|
||||
videowriter,
|
||||
encoderparams,
|
||||
output_codec,
|
||||
|
@ -2143,6 +2187,7 @@ int Monitor::LoadRemoteMonitors( const char *protocol, const char *host, const c
|
|||
orientation,
|
||||
deinterlacing,
|
||||
savejpegs,
|
||||
colours,
|
||||
videowriter,
|
||||
encoderparams,
|
||||
output_codec,
|
||||
|
@ -2296,6 +2341,7 @@ int Monitor::LoadFileMonitors( const char *file, Monitor **&monitors, Purpose pu
|
|||
orientation,
|
||||
deinterlacing,
|
||||
savejpegs,
|
||||
colours,
|
||||
videowriter,
|
||||
encoderparams,
|
||||
output_codec,
|
||||
|
@ -2459,6 +2505,7 @@ int Monitor::LoadFfmpegMonitors( const char *file, Monitor **&monitors, Purpose
|
|||
orientation,
|
||||
deinterlacing,
|
||||
savejpegs,
|
||||
colours,
|
||||
videowriter,
|
||||
encoderparams,
|
||||
output_codec,
|
||||
|
@ -2790,6 +2837,7 @@ Monitor *Monitor::Load( unsigned int p_id, bool load_zones, Purpose purpose ) {
|
|||
orientation,
|
||||
deinterlacing,
|
||||
savejpegs,
|
||||
colours,
|
||||
videowriter,
|
||||
encoderparams,
|
||||
output_codec,
|
||||
|
@ -2893,23 +2941,26 @@ int Monitor::Capture() {
|
|||
return -1;
|
||||
} else if ( captureResult > 0 ) {
|
||||
|
||||
if ( packet->packet.size && ! packet->in_frame ) {
|
||||
// Analysis thread will take care of consuming and emptying the packets.
|
||||
packetqueue.queuePacket( packet );
|
||||
|
||||
if ( packet->packet.stream_index == camera->get_VideoStreamId() ) {
|
||||
if ( packet->packet.stream_index == video_stream_id ) {
|
||||
if ( packet->packet.size && ! packet->in_frame ) {
|
||||
packet->codec_type = camera->get_VideoStream()->codecpar->codec_type;
|
||||
if ( packet->decode( camera->get_VideoCodecContext() ) )
|
||||
packet->get_image();
|
||||
} else {
|
||||
packet->codec_type = camera->get_AudioStream()->codecpar->codec_type;
|
||||
packet->decode( camera->get_AudioCodecContext() );
|
||||
shared_data->last_write_index = index;
|
||||
shared_data->last_write_time = image_buffer[index].timestamp.tv_sec;
|
||||
mutex.unlock();
|
||||
return 1;
|
||||
// If not a AVPacket, then assume video for now.
|
||||
packet->codec_type = camera->get_VideoStream()->codecpar->codec_type;
|
||||
}
|
||||
} else {
|
||||
// If not a AVPacket, then assume video for now.
|
||||
packet->codec_type = camera->get_VideoStream()->codecpar->codec_type;
|
||||
} else { // probably audio
|
||||
packet->image = NULL;
|
||||
packet->codec_type = camera->get_AudioStream()->codecpar->codec_type;
|
||||
packet->decode( camera->get_AudioCodecContext() );
|
||||
// Don't update last_write_index because that is used for live streaming
|
||||
shared_data->last_write_time = image_buffer[index].timestamp.tv_sec;
|
||||
mutex.unlock();
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Deinterlacing */
|
||||
|
@ -2952,49 +3003,13 @@ int Monitor::Capture() {
|
|||
}
|
||||
|
||||
#if 0
|
||||
int video_stream_id = camera->get_VideoStreamId();
|
||||
//Video recording
|
||||
if ( video_store_data->recording.tv_sec ) {
|
||||
if ( shared_data->last_event_id != this->GetVideoWriterEventId() ) {
|
||||
Debug(2, "Have change of event. last_event(%d), our current (%d)",
|
||||
shared_data->last_event_id,
|
||||
this->GetVideoWriterEventId()
|
||||
);
|
||||
if ( videoStore ) {
|
||||
Debug(2, "Have videostore already?");
|
||||
// I don't know if this is important or not... but I figure we might as well write this last packet out to the store before closing it.
|
||||
// Also don't know how much it matters for audio.
|
||||
int ret = videoStore->writePacket( packet );
|
||||
if ( ret < 0 ) { //Less than zero and we skipped a frame
|
||||
Warning("Error writing last packet to videostore.");
|
||||
}
|
||||
|
||||
delete videoStore;
|
||||
videoStore = NULL;
|
||||
this->SetVideoWriterEventId( 0 );
|
||||
} // end if videoStore
|
||||
} // end if end of recording
|
||||
|
||||
if ( shared_data->last_event_id and ! videoStore ) {
|
||||
Debug(2,"New videostore");
|
||||
videoStore = new VideoStore(
|
||||
(const char *) video_store_data->event_file,
|
||||
"mp4",
|
||||
camera->get_VideoStream(),
|
||||
( record_audio ? camera->get_AudioStream() : NULL ),
|
||||
video_store_data->recording.tv_sec,
|
||||
this );
|
||||
|
||||
if ( ! videoStore->open() ) {
|
||||
delete videoStore;
|
||||
videoStore = NULL;
|
||||
} else {
|
||||
this->SetVideoWriterEventId(shared_data->last_event_id);
|
||||
|
||||
Debug(2, "Clearing packets");
|
||||
// Clear all packets that predate the moment when the recording began
|
||||
packetqueue.clear_unwanted_packets(&video_store_data->recording, video_stream_id);
|
||||
videoStore->write_packets(packetqueue);
|
||||
} // success opening
|
||||
} // end if ! was recording
|
||||
} else { // Not recording
|
||||
|
@ -3019,7 +3034,6 @@ int Monitor::Capture() {
|
|||
}
|
||||
} else if ( packet->packet.stream_index == video_stream_id ) {
|
||||
if ( packet->keyframe || packetqueue.size() ) // it's a keyframe or we already have something in the queue
|
||||
packetqueue.queuePacket( packet );
|
||||
} // end if audio or video
|
||||
} // end if recording or not
|
||||
|
||||
|
@ -3363,7 +3377,9 @@ bool Monitor::DumpSettings( char *output, bool verbose ) {
|
|||
unsigned int Monitor::Colours() const { return( camera->Colours() ); }
|
||||
unsigned int Monitor::SubpixelOrder() const { return( camera->SubpixelOrder() ); }
|
||||
int Monitor::PrimeCapture() {
|
||||
return( camera->PrimeCapture() );
|
||||
int ret = camera->PrimeCapture();
|
||||
video_stream_id = ret ? camera->get_VideoStreamId() : -1;
|
||||
return ret;
|
||||
}
|
||||
int Monitor::PreCapture() {
|
||||
return( camera->PreCapture() );
|
||||
|
|
|
@ -234,11 +234,17 @@ protected:
|
|||
bool videoRecording;
|
||||
|
||||
int savejpegspref;
|
||||
int colours;
|
||||
VideoWriter videowriter;
|
||||
std::string encoderparams;
|
||||
std::string output_codec;
|
||||
std::string output_container;
|
||||
std::vector<EncoderParameter_t> encoderparamsvec;
|
||||
_AVPIXELFORMAT imagePixFormat;
|
||||
unsigned int subpixelorder;
|
||||
#if HAVE_LIBSWSCALE
|
||||
struct SwsContext *mConvertContext;
|
||||
#endif
|
||||
bool record_audio; // Whether to store the audio that we receive
|
||||
|
||||
int brightness; // The statically saved brightness of the camera
|
||||
|
@ -315,8 +321,9 @@ protected:
|
|||
ZMPacket next_buffer; /* Used by four field deinterlacing */
|
||||
ZMPacket *pre_event_buffer;
|
||||
|
||||
Camera *camera;
|
||||
int video_stream_id; // will be filled in PrimeCapture
|
||||
|
||||
Camera *camera;
|
||||
Event *event;
|
||||
|
||||
int n_zones;
|
||||
|
@ -347,6 +354,7 @@ public:
|
|||
int p_orientation,
|
||||
unsigned int p_deinterlacing,
|
||||
int p_savejpegs,
|
||||
int p_colours,
|
||||
VideoWriter p_videowriter,
|
||||
std::string &p_encoderparams,
|
||||
std::string &p_output_codec,
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#define AUDIO_QUEUESIZE 50
|
||||
|
||||
zm_packetqueue::zm_packetqueue(){
|
||||
video_packet_count = 0;
|
||||
|
||||
}
|
||||
|
||||
|
@ -34,14 +35,8 @@ zm_packetqueue::~zm_packetqueue() {
|
|||
|
||||
bool zm_packetqueue::queuePacket( ZMPacket* zm_packet ) {
|
||||
pktQueue.push_back( zm_packet );
|
||||
|
||||
return true;
|
||||
}
|
||||
bool zm_packetqueue::queuePacket( AVPacket* av_packet ) {
|
||||
|
||||
ZMPacket *zm_packet = new ZMPacket( av_packet );
|
||||
|
||||
pktQueue.push_back( zm_packet );
|
||||
if ( zm_packet->codec_type == AVMEDIA_TYPE_VIDEO )
|
||||
video_packet_count += 1;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -53,6 +48,8 @@ ZMPacket* zm_packetqueue::popPacket( ) {
|
|||
|
||||
ZMPacket *packet = pktQueue.front();
|
||||
pktQueue.pop_front();
|
||||
if ( packet->codec_type == AVMEDIA_TYPE_VIDEO ) {
|
||||
video_packet_count -= 1;
|
||||
|
||||
return packet;
|
||||
}
|
||||
|
@ -74,21 +71,30 @@ unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream
|
|||
ZMPacket *zm_packet = *it;
|
||||
AVPacket *av_packet = &(zm_packet->packet);
|
||||
|
||||
Debug(4, "Looking at packet with stream index (%d) with keyframe (%d), frames_to_keep is (%d)", av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ), frames_to_keep );
|
||||
Debug(4, "Looking at packet with stream index (%d) with keyframe, frames_to_keep is (%d)", av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ), frames_to_keep );
|
||||
|
||||
// Want frames_to_keep video keyframes. Otherwise, we may not have enough
|
||||
if ( ( av_packet->stream_index == stream_id) && ( av_packet->flags & AV_PKT_FLAG_KEY ) ) {
|
||||
// Want frames_to_keep video frames. Otherwise, we may not have enough
|
||||
if ( av_packet->stream_index == stream_id ) {
|
||||
frames_to_keep --;
|
||||
}
|
||||
}
|
||||
// Might not be starting with a keyframe, but should always start with a keyframe
|
||||
|
||||
if ( frames_to_keep ) {
|
||||
Debug(3, "Hit end of queue, still need (%d) video keyframes", frames_to_keep );
|
||||
} else {
|
||||
AVPacket *av_packet = &( (*it)->packet );
|
||||
while ( ( av_packet->stream_index != stream_id ) || ! ( av_packet->flags & AV_PKT_FLAG_KEY ) && it != pktQueue.rend() ) {
|
||||
++it;
|
||||
}
|
||||
}
|
||||
unsigned int delete_count = 0;
|
||||
while ( it != pktQueue.rend() ) {
|
||||
Debug(4, "Deleting a packet from the front, count is (%d)", delete_count );
|
||||
|
||||
packet = pktQueue.front();
|
||||
if ( packet->codec_type == AVMEDIA_TYPE_VIDEO )
|
||||
video_packet_count -= 1;
|
||||
pktQueue.pop_front();
|
||||
delete packet;
|
||||
|
||||
|
@ -105,12 +111,16 @@ void zm_packetqueue::clearQueue() {
|
|||
pktQueue.pop_front();
|
||||
delete packet;
|
||||
}
|
||||
video_packet_count = 0;
|
||||
}
|
||||
|
||||
unsigned int zm_packetqueue::size() {
|
||||
return pktQueue.size();
|
||||
}
|
||||
|
||||
unsigned in zm_packetqueue::get_video_packet_count() {
|
||||
return video_packet_count;
|
||||
}
|
||||
|
||||
void zm_packetqueue::clear_unwanted_packets( timeval *recording_started, int mVideoStreamId ) {
|
||||
// Need to find the keyframe <= recording_started. Can get rid of audio packets.
|
||||
|
|
|
@ -34,19 +34,16 @@ class zm_packetqueue {
|
|||
public:
|
||||
zm_packetqueue();
|
||||
virtual ~zm_packetqueue();
|
||||
bool queuePacket( AVPacket* packet, struct timeval *timestamp );
|
||||
bool queuePacket( ZMPacket* packet );
|
||||
bool queuePacket( AVPacket* packet );
|
||||
ZMPacket * popPacket( );
|
||||
bool popVideoPacket(ZMPacket* packet);
|
||||
bool popAudioPacket(ZMPacket* packet);
|
||||
unsigned int clearQueue( unsigned int video_frames_to_keep, int stream_id );
|
||||
void clearQueue( );
|
||||
unsigned int size();
|
||||
unsigned int get_video_packet_count();
|
||||
void clear_unwanted_packets( timeval *recording, int mVideoStreamId );
|
||||
private:
|
||||
std::list<ZMPacket *> pktQueue;
|
||||
|
||||
unsigned int video_packets; // keep track of how many video packets we have, because we shouldn't have more than image_buffer_count
|
||||
};
|
||||
|
||||
#endif /* ZM_PACKETQUEUE_H */
|
||||
|
|
Loading…
Reference in New Issue