wip
This commit is contained in:
parent
3dafb5c225
commit
50d3b168fa
|
@ -29,10 +29,13 @@ int AnalysisThread::run() {
|
|||
if ( analysis_update_delay ) {
|
||||
cur_time = time( 0 );
|
||||
if ( (unsigned int)( cur_time - last_analysis_update_time ) > analysis_update_delay ) {
|
||||
Debug(4, "Updating " );
|
||||
analysis_rate = monitor->GetAnalysisRate();
|
||||
monitor->UpdateAdaptiveSkip();
|
||||
last_analysis_update_time = cur_time;
|
||||
}
|
||||
} else {
|
||||
Debug(4, "Not Updating " );
|
||||
}
|
||||
|
||||
if ( !monitor->Analyse() ) {
|
||||
|
@ -44,6 +47,6 @@ Debug(4, "Sleeping for %d", analysis_rate);
|
|||
}
|
||||
|
||||
//sigprocmask(SIG_UNBLOCK, &block_set, 0);
|
||||
}
|
||||
} // end while ! terminate
|
||||
return 0;
|
||||
} // end in AnalysisThread::run()
|
||||
|
|
|
@ -1898,14 +1898,14 @@ int LocalCamera::PrimeCapture() {
|
|||
}
|
||||
|
||||
int LocalCamera::PreCapture() {
|
||||
Debug( 2, "Pre-capturing" );
|
||||
Debug( 4, "Pre-capturing" );
|
||||
return( 0 );
|
||||
}
|
||||
|
||||
int LocalCamera::Capture( ZMPacket &zm_packet ) {
|
||||
|
||||
// We assume that the avpacket is allocated, and just needs to be filled
|
||||
Debug( 3, "Capturing" );
|
||||
Debug( 2, "Capturing" );
|
||||
static uint8_t* buffer = NULL;
|
||||
static uint8_t* directbuffer = NULL;
|
||||
static int capture_frame = -1;
|
||||
|
@ -2000,7 +2000,7 @@ int LocalCamera::Capture( ZMPacket &zm_packet ) {
|
|||
#if HAVE_LIBSWSCALE
|
||||
if ( conversion_type == 1 ) {
|
||||
|
||||
Debug( 9, "Calling sws_scale to perform the conversion" );
|
||||
Debug( 9, "Setting up a frame" );
|
||||
/* Use swscale to convert the image directly into the shared memory */
|
||||
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
|
||||
av_image_fill_arrays(tmpPicture->data,
|
||||
|
@ -2010,6 +2010,7 @@ int LocalCamera::Capture( ZMPacket &zm_packet ) {
|
|||
avpicture_fill( (AVPicture *)tmpPicture, directbuffer,
|
||||
imagePixFormat, width, height );
|
||||
#endif
|
||||
Debug( 9, "Calling sws_scale to perform the conversion" );
|
||||
sws_scale(
|
||||
imgConversionContext,
|
||||
capturePictures[capture_frame]->data,
|
||||
|
@ -2019,6 +2020,7 @@ int LocalCamera::Capture( ZMPacket &zm_packet ) {
|
|||
tmpPicture->data,
|
||||
tmpPicture->linesize
|
||||
);
|
||||
Debug( 9, "Done sws_scale to perform the conversion" );
|
||||
} else
|
||||
#endif
|
||||
if ( conversion_type == 2 ) {
|
||||
|
@ -2071,7 +2073,7 @@ int LocalCamera::PostCapture() {
|
|||
} else {
|
||||
Error( "Unable to requeue buffer due to not v4l2_data" )
|
||||
}
|
||||
}
|
||||
} else
|
||||
#endif // ZM_HAS_V4L2
|
||||
#if ZM_HAS_V4L1
|
||||
if ( v4l_version == 1 ) {
|
||||
|
|
|
@ -1149,6 +1149,7 @@ bool Monitor::Analyse() {
|
|||
//Debug(3, " shared_data->last_read_index == shared_data->last_write_index " );
|
||||
return false;
|
||||
}
|
||||
Debug(3, "ANal");
|
||||
|
||||
struct timeval now;
|
||||
gettimeofday( &now, NULL );
|
||||
|
@ -2870,6 +2871,7 @@ int Monitor::Capture() {
|
|||
}
|
||||
} else {
|
||||
captureResult = camera->Capture(packet);
|
||||
Debug(2,"Capture result (%d)", captureResult );
|
||||
if ( captureResult < 0 ) {
|
||||
// Unable to capture image for temporary reason
|
||||
// Fake a signal loss image
|
||||
|
@ -2882,13 +2884,15 @@ int Monitor::Capture() {
|
|||
}
|
||||
|
||||
int video_stream_id = camera->get_VideoStreamId();
|
||||
Debug(2,"Video stream is (%d)", video_stream_id );
|
||||
|
||||
//Video recording
|
||||
if ( video_store_data->recording.tv_sec ) {
|
||||
if ( shared_data->last_event_id != this->GetVideoWriterEventId() ) {
|
||||
Debug(2,"Recording since (%d)", video_store_data->recording.tv_sec );
|
||||
if ( shared_data->last_event_id != video_store_data->current_event ) {
|
||||
Debug(2, "Have change of event. last_event(%d), our current (%d)",
|
||||
shared_data->last_event_id,
|
||||
this->GetVideoWriterEventId()
|
||||
video_store_data->current_event
|
||||
);
|
||||
if ( videoStore ) {
|
||||
Debug(2, "Have videostore already?");
|
||||
|
@ -2903,6 +2907,8 @@ Debug(2, "Have videostore already?");
|
|||
videoStore = NULL;
|
||||
this->SetVideoWriterEventId( 0 );
|
||||
} // end if videoStore
|
||||
} else {
|
||||
Debug(2, "No change of event");
|
||||
} // end if end of recording
|
||||
|
||||
if ( shared_data->last_event_id and ! videoStore ) {
|
||||
|
@ -2933,27 +2939,34 @@ Debug(2,"New videostore");
|
|||
delete videoStore;
|
||||
videoStore = NULL;
|
||||
this->SetVideoWriterEventId( 0 );
|
||||
} else {
|
||||
Debug(2,"Not recording");
|
||||
}
|
||||
|
||||
// Buffer video packets, since we are not recording.
|
||||
// All audio packets are keyframes, so only if it's a video keyframe
|
||||
if ( ( packet.packet.stream_index == video_stream_id ) && ( packet.keyframe ) ) {
|
||||
if ( packet.packet.stream_index == video_stream_id ) {
|
||||
if ( packet.keyframe ) {
|
||||
packetqueue.clearQueue( this->GetPreEventCount(), video_stream_id );
|
||||
packetqueue.queuePacket( &packet );
|
||||
} else if ( packetqueue.size() ) {
|
||||
// it's a keyframe or we already have something in the queue
|
||||
packetqueue.queuePacket( &packet );
|
||||
}
|
||||
} else if ( packet.packet.stream_index == camera->get_AudioStreamId() ) {
|
||||
// The following lines should ensure that the queue always begins with a video keyframe
|
||||
if ( packet.packet.stream_index == camera->get_AudioStreamId() ) {
|
||||
//Debug(2, "Have audio packet, reocrd_audio is (%d) and packetqueue.size is (%d)", record_audio, packetqueue.size() );
|
||||
if ( record_audio && packetqueue.size() ) {
|
||||
// if it's audio, and we are doing audio, and there is already something in the queue
|
||||
packetqueue.queuePacket( &packet );
|
||||
}
|
||||
} else if ( packet.packet.stream_index == video_stream_id ) {
|
||||
if ( packet.keyframe || packetqueue.size() ) // it's a keyframe or we already have something in the queue
|
||||
packetqueue.queuePacket( &packet );
|
||||
} else {
|
||||
Debug(2,"Unknown stream");
|
||||
} // end if audio or video
|
||||
} // end if recording or not
|
||||
|
||||
if ( videoStore ) {
|
||||
Debug(2, "Writing packet");
|
||||
//Write the packet to our video store, it will be smart enough to know what to do
|
||||
int ret = videoStore->writePacket( &packet );
|
||||
if ( ret < 0 ) { //Less than zero and we skipped a frame
|
||||
|
@ -2962,6 +2975,8 @@ Debug(2,"New videostore");
|
|||
}
|
||||
} // end if de-interlacing or not
|
||||
|
||||
if ( deinterlacing_value ) {
|
||||
Debug(2,"Deinterlace");
|
||||
/* Deinterlacing */
|
||||
if ( deinterlacing_value == 1 ) {
|
||||
capture_image->Deinterlace_Discard();
|
||||
|
@ -2974,8 +2989,10 @@ Debug(2,"New videostore");
|
|||
} else if ( deinterlacing_value == 5 ) {
|
||||
capture_image->Deinterlace_Blend_CustomRatio( (deinterlacing>>8)&0xff );
|
||||
}
|
||||
}
|
||||
|
||||
if ( orientation != ROTATE_0 ) {
|
||||
Debug(2,"Rotate");
|
||||
switch ( orientation ) {
|
||||
case ROTATE_0 : {
|
||||
// No action required
|
||||
|
@ -3006,13 +3023,17 @@ Debug(2,"New videostore");
|
|||
}
|
||||
}
|
||||
|
||||
if ( privacy_bitmask )
|
||||
if ( privacy_bitmask ) {
|
||||
Debug(2,"privacy");
|
||||
capture_image->MaskPrivacy( privacy_bitmask );
|
||||
}
|
||||
|
||||
gettimeofday( image_buffer[index].timestamp, NULL );
|
||||
if ( config.timestamp_on_capture ) {
|
||||
Debug(2,"Timestamping");
|
||||
TimestampImage( capture_image, image_buffer[index].timestamp );
|
||||
}
|
||||
Debug(2,"Check signal");
|
||||
shared_data->signal = CheckSignal(capture_image);
|
||||
shared_data->last_write_index = index;
|
||||
shared_data->last_write_time = image_buffer[index].timestamp->tv_sec;
|
||||
|
@ -3039,7 +3060,7 @@ Debug(2,"New videostore");
|
|||
Error( "Can't run query: %s", mysql_error( &dbconn ) );
|
||||
}
|
||||
}
|
||||
}
|
||||
} // end if report fps
|
||||
|
||||
// Icon: I'm not sure these should be here. They have nothing to do with capturing
|
||||
if ( shared_data->action & GET_SETTINGS ) {
|
||||
|
|
11
src/zmc.cpp
11
src/zmc.cpp
|
@ -269,7 +269,8 @@ int main(int argc, char *argv[]) {
|
|||
struct timeval now;
|
||||
struct DeltaTimeval delta_time;
|
||||
while ( !zm_terminate ) {
|
||||
sigprocmask(SIG_BLOCK, &block_set, 0);
|
||||
Debug(2,"blocking");
|
||||
//sigprocmask(SIG_BLOCK, &block_set, 0);
|
||||
for ( int i = 0; i < n_monitors; i++ ) {
|
||||
long min_delay = MAXINT;
|
||||
|
||||
|
@ -316,14 +317,18 @@ int main(int argc, char *argv[]) {
|
|||
DELTA_TIMEVAL(delta_time, now, last_capture_times[i], DT_PREC_3);
|
||||
long sleep_time = next_delays[i]-delta_time.delta;
|
||||
if ( sleep_time > 0 ) {
|
||||
Debug(2,"usleeping (%d)", sleep_time*(DT_MAXGRAN/DT_PREC_3) );
|
||||
usleep(sleep_time*(DT_MAXGRAN/DT_PREC_3));
|
||||
}
|
||||
}
|
||||
last_capture_times[i] = now;
|
||||
} else {
|
||||
gettimeofday(&(last_capture_times[i]), NULL);
|
||||
}
|
||||
} // end if next_delay <= min_delay || next_delays[i] <= 0 )
|
||||
|
||||
} // end foreach n_monitors
|
||||
sigprocmask(SIG_UNBLOCK, &block_set, 0);
|
||||
Debug(2,"unblocking");
|
||||
//sigprocmask(SIG_UNBLOCK, &block_set, 0);
|
||||
} // end while ! zm_terminate
|
||||
for ( int i = 0; i < n_monitors; i++ ) {
|
||||
if ( analysis_threads[i] ) {
|
||||
|
|
Loading…
Reference in New Issue