Merge branch 'storageareas' of github.com:connortechnology/ZoneMinder into storageareas

This commit is contained in:
Isaac Connor 2017-11-14 02:40:12 -05:00
commit 65e7fb0014
23 changed files with 283 additions and 683 deletions

View File

@ -115,7 +115,7 @@ BEGIN {
, $Config{ZM_DB_USER}
, $Config{ZM_DB_PASS}
) or croak( "Can't connect to db" );
my $sql = 'select * from Config';
my $sql = 'SELECT Name,Value FROM Config';
my $sth = $dbh->prepare_cached( $sql ) or croak( "Can't prepare '$sql': ".$dbh->errstr() );
my $res = $sth->execute() or croak( "Can't execute: ".$sth->errstr() );
while( my $config = $sth->fetchrow_hashref() ) {

View File

@ -1,25 +0,0 @@
snprintf( swap_path, sizeof(swap_path), "%s/zmswap-m%d/zmswap-q%06d", staticConfig.PATH_SWAP.c_str(), monitor->Id(), connkey );
int len = snprintf(NULL, 0, "/zmswap-m%d", monitor->Id());
int swap_path_length = strlen(staticConfig.PATH_SWAP.c_str()) + snprintf(NULL, 0, "/zmswap-m%d", monitor->Id() ) + snprintf(NULL, 0, "/zmswap-q%06d", connkey ) + 1; // +1 for NULL terminator
if ( connkey && playback_buffer > 0 ) {
if ( swap_path_length + max_swap_len_suffix > PATH_MAX ) {
Error( "Swap Path is too long. %d > %d ", swap_path_length+max_swap_len_suffix, PATH_MAX );
} else {
swap_path = (char *)malloc( swap_path_length+max_swap_len_suffix );
Debug( 3, "Checking swap image path %s", staticConfig.PATH_SWAP.c_str() );
strncpy( swap_path, staticConfig.PATH_SWAP.c_str(), swap_path_length );
if ( checkSwapPath( swap_path, false ) ) {
snprintf( &(swap_path[swap_path_length]), max_swap_len_suffix, "/zmswap-m%d", monitor->Id() );
if ( checkSwapPath( swap_path, true ) ) {
snprintf( &(swap_path[swap_path_length]), max_swap_len_suffix, "/zmswap-q%06d", connkey );
if ( checkSwapPath( swap_path, true ) ) {
buffered_playback = true;
}
}
}

View File

@ -133,6 +133,7 @@ int av_dict_parse_string(AVDictionary **pm, const char *str,
#endif // HAVE_LIBAVCODEC || HAVE_LIBAVUTIL || HAVE_LIBSWSCALE
#if HAVE_LIBAVUTIL
#if LIBAVUTIL_VERSION_CHECK(56, 0, 0, 17, 100)
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb){
int64_t a, b, this_thing;
@ -156,6 +157,7 @@ simple_round:
return av_rescale_q(this_thing, fs_tb, out_tb);
}
#endif
#endif
int hacked_up_context2_for_older_ffmpeg(AVFormatContext **avctx, AVOutputFormat *oformat, const char *format, const char *filename) {
AVFormatContext *s = avformat_alloc_context();

View File

@ -458,7 +458,9 @@ int FfmpegCamera::OpenFfmpeg() {
// STolen from ispy
//this fixes issues with rtsp streams!! woot.
//mVideoCodecContext->flags2 |= CODEC_FLAG2_FAST | CODEC_FLAG2_CHUNKS | CODEC_FLAG_LOW_DELAY; // Enable faster H264 decode.
#ifdef CODEC_FLAG2_FAST
mVideoCodecContext->flags2 |= CODEC_FLAG2_FAST | CODEC_FLAG_LOW_DELAY;
#endif
#if HAVE_AVUTIL_HWCONTEXT_H
if ( mVideoCodecContext->codec_id == AV_CODEC_ID_H264 ) {

View File

@ -1534,34 +1534,28 @@ bool LocalCamera::GetCurrentSettings( const char *device, char *output, int vers
return( false );
}
if ( verbose )
{
if ( verbose ) {
sprintf( output+strlen(output), " Input %d\n", input.index );
sprintf( output+strlen(output), " Name: %s\n", input.name );
sprintf( output+strlen(output), " Type: %s\n", input.type==V4L2_INPUT_TYPE_TUNER?"Tuner":(input.type==V4L2_INPUT_TYPE_CAMERA?"Camera":"Unknown") );
sprintf( output+strlen(output), " Audioset: %08x\n", input.audioset );
sprintf( output+strlen(output), " Standards: 0x%llx\n", input.std );
}
else
{
} else {
sprintf( output+strlen(output), "i%d:%s|", input.index, input.name );
sprintf( output+strlen(output), "i%dT:%s|", input.index, input.type==V4L2_INPUT_TYPE_TUNER?"Tuner":(input.type==V4L2_INPUT_TYPE_CAMERA?"Camera":"Unknown") );
sprintf( output+strlen(output), "i%dS:%llx|", input.index, input.std );
}
if ( verbose )
{
if ( verbose ) {
sprintf( output+strlen(output), " %s", capString( input.status&V4L2_IN_ST_NO_POWER, "Power ", "off", "on", " (X)" ) );
sprintf( output+strlen(output), " %s", capString( input.status&V4L2_IN_ST_NO_SIGNAL, "Signal ", "not detected", "detected", " (X)" ) );
sprintf( output+strlen(output), " %s", capString( input.status&V4L2_IN_ST_NO_COLOR, "Colour Signal ", "not detected", "detected", "" ) );
sprintf( output+strlen(output), " %s", capString( input.status&V4L2_IN_ST_NO_H_LOCK, "Horizontal Lock ", "not detected", "detected", "" ) );
}
else
{
sprintf( output+strlen(output), "i%dSP:%d|", input.index, input.status&V4L2_IN_ST_NO_POWER?0:1 );
sprintf( output+strlen(output), "i%dSS:%d|", input.index, input.status&V4L2_IN_ST_NO_SIGNAL?0:1 );
sprintf( output+strlen(output), "i%dSC:%d|", input.index, input.status&V4L2_IN_ST_NO_COLOR?0:1 );
sprintf( output+strlen(output), "i%dHP:%d|", input.index, input.status&V4L2_IN_ST_NO_H_LOCK?0:1 );
} else {
sprintf( output+strlen(output), "i%dSP:%d|", input.index, (input.status&V4L2_IN_ST_NO_POWER)?0:1 );
sprintf( output+strlen(output), "i%dSS:%d|", input.index, (input.status&V4L2_IN_ST_NO_SIGNAL)?0:1 );
sprintf( output+strlen(output), "i%dSC:%d|", input.index, (input.status&V4L2_IN_ST_NO_COLOR)?0:1 );
sprintf( output+strlen(output), "i%dHP:%d|", input.index, (input.status&V4L2_IN_ST_NO_H_LOCK)?0:1 );
}
}
while ( inputIndex++ >= 0 );
@ -1570,12 +1564,10 @@ bool LocalCamera::GetCurrentSettings( const char *device, char *output, int vers
}
#endif // ZM_HAS_V4L2
#if ZM_HAS_V4L1
if ( version == 1 )
{
if ( version == 1 ) {
struct video_capability vid_cap;
memset( &vid_cap, 0, sizeof(video_capability) );
if ( ioctl( vid_fd, VIDIOCGCAP, &vid_cap ) < 0 )
{
if ( ioctl( vid_fd, VIDIOCGCAP, &vid_cap ) < 0 ) {
Error( "Failed to get video capabilities: %s", strerror(errno) );
if ( verbose )
sprintf( output, "Error, failed to get video capabilities %s: %s\n", queryDevice, strerror(errno) );
@ -1583,8 +1575,7 @@ bool LocalCamera::GetCurrentSettings( const char *device, char *output, int vers
sprintf( output, "error%d\n", errno );
return( false );
}
if ( verbose )
{
if ( verbose ) {
sprintf( output+strlen(output), "Video Capabilities\n" );
sprintf( output+strlen(output), " Name: %s\n", vid_cap.name );
sprintf( output+strlen(output), " Type: %d\n%s%s%s%s%s%s%s%s%s%s%s%s%s%s", vid_cap.type,
@ -2047,9 +2038,8 @@ int LocalCamera::PreCapture() {
int LocalCamera::Capture( Image &image ) {
Debug( 3, "Capturing" );
static uint8_t* buffer = NULL;
static uint8_t* directbuffer = NULL;
static int capture_frame = -1;
int buffer_bytesused = 0;
int capture_frame = -1;
int captures_per_frame = 1;
if ( channel_count > 1 )
@ -2059,7 +2049,6 @@ int LocalCamera::Capture( Image &image ) {
Warning( "Invalid Captures Per Frame setting: %d", captures_per_frame );
}
// Do the capture, unless we are the second or subsequent camera on a channel, in which case just reuse the buffer
if ( channel_prime ) {
#if ZM_HAS_V4L2
@ -2135,7 +2124,7 @@ int LocalCamera::Capture( Image &image ) {
Debug( 3, "Performing format conversion" );
/* Request a writeable buffer of the target image */
directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
uint8_t* directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
if ( directbuffer == NULL ) {
Error("Failed requesting writeable buffer for the captured image.");
return -1;
@ -2153,7 +2142,13 @@ int LocalCamera::Capture( Image &image ) {
avpicture_fill( (AVPicture *)tmpPicture, directbuffer,
imagePixFormat, width, height );
#endif
sws_scale( imgConversionContext, capturePictures[capture_frame]->data, capturePictures[capture_frame]->linesize, 0, height, tmpPicture->data, tmpPicture->linesize );
sws_scale( imgConversionContext,
capturePictures[capture_frame]->data,
capturePictures[capture_frame]->linesize,
0,
height,
tmpPicture->data,
tmpPicture->linesize );
}
#endif
if ( conversion_type == 2 ) {
@ -2174,7 +2169,7 @@ int LocalCamera::Capture( Image &image ) {
}
return 1;
}
} // end int LocalCamera::Capture()
int LocalCamera::PostCapture()
{

View File

@ -211,39 +211,52 @@ void VideoStream::SetupCodec( int colours, int subpixelorder, int width, int hei
Debug( 1, "Allocated stream" );
AVCodecContext *c = ost->codec;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
codec_context = avcodec_alloc_context3(NULL);
avcodec_parameters_to_context(codec_context, ost->codecpar);
#else
codec_context = ost->codec;
#endif
c->codec_id = codec->id;
c->codec_type = codec->type;
codec_context->codec_id = codec->id;
codec_context->codec_type = codec->type;
c->pix_fmt = strcmp( "mjpeg", ofc->oformat->name ) == 0 ? AV_PIX_FMT_YUVJ422P : AV_PIX_FMT_YUV420P;
codec_context->pix_fmt = strcmp( "mjpeg", ofc->oformat->name ) == 0 ? AV_PIX_FMT_YUVJ422P : AV_PIX_FMT_YUV420P;
if ( bitrate <= 100 ) {
// Quality based bitrate control (VBR). Scale is 1..31 where 1 is best.
// This gets rid of artifacts in the beginning of the movie; and well, even quality.
c->flags |= CODEC_FLAG_QSCALE;
c->global_quality = FF_QP2LAMBDA * (31 - (31 * (bitrate / 100.0)));
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
codec_context->flags |= AV_CODEC_FLAG_QSCALE;
#else
codec_context->flags |= CODEC_FLAG_QSCALE;
#endif
codec_context->global_quality = FF_QP2LAMBDA * (31 - (31 * (bitrate / 100.0)));
} else {
c->bit_rate = bitrate;
codec_context->bit_rate = bitrate;
}
/* resolution must be a multiple of two */
c->width = width;
c->height = height;
codec_context->width = width;
codec_context->height = height;
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
c->time_base.den = frame_rate;
c->time_base.num = 1;
codec_context->time_base.den = frame_rate;
codec_context->time_base.num = 1;
Debug( 1, "Will encode in %d fps.", c->time_base.den );
Debug( 1, "Will encode in %d fps.", codec_context->time_base.den );
/* emit one intra frame every second */
c->gop_size = frame_rate;
codec_context->gop_size = frame_rate;
// some formats want stream headers to be separate
if ( of->flags & AVFMT_GLOBALHEADER )
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#else
codec_context->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
} else {
Fatal( "of->video_codec == AV_CODEC_ID_NONE" );
}
@ -278,13 +291,11 @@ void VideoStream::OpenStream( ) {
/* now that all the parameters are set, we can open the
video codecs and allocate the necessary encode buffers */
if ( ost ) {
AVCodecContext *c = ost->codec;
/* open the codec */
#if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0)
if ( (avRet = avcodec_open( c, codec )) < 0 )
if ( (avRet = avcodec_open( codec_context, codec )) < 0 )
#else
if ( (avRet = avcodec_open2( c, codec, 0 )) < 0 )
if ( (avRet = avcodec_open2( codec_context, codec, 0 )) < 0 )
#endif
{
Fatal( "Could not open codec. Error code %d \"%s\"", avRet, av_err2str( avRet ) );
@ -293,19 +304,15 @@ void VideoStream::OpenStream( ) {
Debug( 1, "Opened codec" );
/* allocate the encoded raw picture */
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
opicture = av_frame_alloc( );
#else
opicture = avcodec_alloc_frame( );
#endif
opicture = zm_av_frame_alloc( );
if ( !opicture ) {
Panic( "Could not allocate opicture" );
}
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
int size = av_image_get_buffer_size( c->pix_fmt, c->width, c->height, 1 );
int size = av_image_get_buffer_size( codec_context->pix_fmt, codec_context->width, codec_context->height, 1 );
#else
int size = avpicture_get_size( c->pix_fmt, c->width, c->height );
int size = avpicture_get_size( codec_context->pix_fmt, codec_context->width, codec_context->height );
#endif
uint8_t *opicture_buf = (uint8_t *)av_malloc( size );
@ -315,17 +322,17 @@ void VideoStream::OpenStream( ) {
}
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
av_image_fill_arrays(opicture->data, opicture->linesize,
opicture_buf, c->pix_fmt, c->width, c->height, 1);
opicture_buf, codec_context->pix_fmt, codec_context->width, codec_context->height, 1);
#else
avpicture_fill( (AVPicture *)opicture, opicture_buf, c->pix_fmt,
c->width, c->height );
avpicture_fill( (AVPicture *)opicture, opicture_buf, codec_context->pix_fmt,
codec_context->width, codec_context->height );
#endif
/* if the output format is not identical to the input format, then a temporary
picture is needed too. It is then converted to the required
output format */
tmp_opicture = NULL;
if ( c->pix_fmt != pf ) {
if ( codec_context->pix_fmt != pf ) {
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
tmp_opicture = av_frame_alloc( );
#else
@ -335,9 +342,9 @@ void VideoStream::OpenStream( ) {
Panic( "Could not allocate tmp_opicture" );
}
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
int size = av_image_get_buffer_size( pf, c->width, c->height,1 );
int size = av_image_get_buffer_size( pf, codec_context->width, codec_context->height,1 );
#else
int size = avpicture_get_size( pf, c->width, c->height );
int size = avpicture_get_size( pf, codec_context->width, codec_context->height );
#endif
uint8_t *tmp_opicture_buf = (uint8_t *)av_malloc( size );
if ( !tmp_opicture_buf ) {
@ -347,10 +354,10 @@ void VideoStream::OpenStream( ) {
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
av_image_fill_arrays(tmp_opicture->data,
tmp_opicture->linesize, tmp_opicture_buf, pf,
c->width, c->height, 1);
codec_context->width, codec_context->height, 1);
#else
avpicture_fill( (AVPicture *)tmp_opicture,
tmp_opicture_buf, pf, c->width, c->height );
tmp_opicture_buf, pf, codec_context->width, codec_context->height );
#endif
}
}
@ -375,7 +382,12 @@ void VideoStream::OpenStream( ) {
}
video_outbuf = NULL;
#if LIBAVFORMAT_VERSION_CHECK(57, 0, 0, 0, 0)
if (codec_context->codec_type == AVMEDIA_TYPE_VIDEO &&
codec_context->codec_id == AV_CODEC_ID_RAWVIDEO) {
#else
if ( !(of->flags & AVFMT_RAWPICTURE) ) {
#endif
/* allocate output buffer */
/* XXX: API change will be done */
// TODO: Make buffer dynamic.
@ -446,6 +458,8 @@ VideoStream::VideoStream( const char *in_filename, const char *in_format, int bi
if ( pthread_mutex_init( buffer_copy_lock, NULL ) != 0 ) {
Fatal("pthread_mutex_init failed");
}
codec_context = NULL;
}
VideoStream::~VideoStream( ) {
@ -481,7 +495,7 @@ VideoStream::~VideoStream( ) {
/* close each codec */
if ( ost ) {
avcodec_close( ost->codec );
avcodec_close( codec_context );
av_free( opicture->data[0] );
av_frame_free( &opicture );
if ( tmp_opicture ) {
@ -564,17 +578,15 @@ double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size,
static struct SwsContext *img_convert_ctx = 0;
#endif // HAVE_LIBSWSCALE
AVCodecContext *c = ost->codec;
if ( c->pix_fmt != pf ) {
if ( codec_context->pix_fmt != pf ) {
memcpy( tmp_opicture->data[0], buffer, buffer_size );
#ifdef HAVE_LIBSWSCALE
if ( !img_convert_ctx ) {
img_convert_ctx = sws_getCachedContext( NULL, c->width, c->height, pf, c->width, c->height, c->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
img_convert_ctx = sws_getCachedContext( NULL, codec_context->width, codec_context->height, pf, codec_context->width, codec_context->height, codec_context->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
if ( !img_convert_ctx )
Panic( "Unable to initialise image scaling context" );
}
sws_scale( img_convert_ctx, tmp_opicture->data, tmp_opicture->linesize, 0, c->height, opicture->data, opicture->linesize );
sws_scale( img_convert_ctx, tmp_opicture->data, tmp_opicture->linesize, 0, codec_context->height, opicture->data, opicture->linesize );
#else // HAVE_LIBSWSCALE
Fatal( "swscale is required for MPEG mode" );
#endif // HAVE_LIBSWSCALE
@ -586,7 +598,13 @@ double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size,
AVPacket *pkt = packet_buffers[packet_index];
av_init_packet( pkt );
int got_packet = 0;
#if LIBAVFORMAT_VERSION_CHECK(57, 0, 0, 0, 0)
if (codec_context->codec_type == AVMEDIA_TYPE_VIDEO &&
codec_context->codec_id == AV_CODEC_ID_RAWVIDEO) {
#else
if ( of->flags & AVFMT_RAWPICTURE ) {
#endif
#if LIBAVCODEC_VERSION_CHECK(52, 30, 2, 30, 2)
pkt->flags |= AV_PKT_FLAG_KEY;
#else
@ -597,19 +615,34 @@ double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size,
pkt->size = sizeof (AVPicture);
got_packet = 1;
} else {
opicture_ptr->pts = c->frame_number;
opicture_ptr->quality = c->global_quality;
opicture_ptr->pts = codec_context->frame_number;
opicture_ptr->quality = codec_context->global_quality;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// Put encoder into flushing mode
avcodec_send_frame(codec_context, opicture_ptr);
int ret = avcodec_receive_packet(codec_context, pkt);
if ( ret < 0 ) {
if ( AVERROR_EOF != ret ) {
Error("ERror encoding video (%d) (%s)", ret,
av_err2str(ret));
}
} else {
got_packet = 1;
}
#else
#if LIBAVFORMAT_VERSION_CHECK(54, 1, 0, 2, 100)
int ret = avcodec_encode_video2( c, pkt, opicture_ptr, &got_packet );
int ret = avcodec_encode_video2( codec_context, pkt, opicture_ptr, &got_packet );
if ( ret != 0 ) {
Fatal( "avcodec_encode_video2 failed with errorcode %d \"%s\"", ret, av_err2str( ret ) );
}
#else
int out_size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, opicture_ptr );
int out_size = avcodec_encode_video( codec_context, video_outbuf, video_outbuf_size, opicture_ptr );
got_packet = out_size > 0 ? 1 : 0;
pkt->data = got_packet ? video_outbuf : NULL;
pkt->size = got_packet ? out_size : 0;
#endif
#endif
if ( got_packet ) {
// if ( c->coded_frame->key_frame )
@ -622,12 +655,12 @@ double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size,
// }
if ( pkt->pts != (int64_t)AV_NOPTS_VALUE ) {
pkt->pts = av_rescale_q( pkt->pts, c->time_base, ost->time_base );
pkt->pts = av_rescale_q( pkt->pts, codec_context->time_base, ost->time_base );
}
if ( pkt->dts != (int64_t)AV_NOPTS_VALUE ) {
pkt->dts = av_rescale_q( pkt->dts, c->time_base, ost->time_base );
pkt->dts = av_rescale_q( pkt->dts, codec_context->time_base, ost->time_base );
}
pkt->duration = av_rescale_q( pkt->duration, c->time_base, ost->time_base );
pkt->duration = av_rescale_q( pkt->duration, codec_context->time_base, ost->time_base );
pkt->stream_index = ost->index;
}
}
@ -658,8 +691,12 @@ void *VideoStream::StreamingThreadCallback(void *ctx){
VideoStream* videoStream = reinterpret_cast<VideoStream*>(ctx);
const uint64_t nanosecond_multiplier = 1000000000;
uint64_t target_interval_ns = nanosecond_multiplier * ( ((double)videoStream->ost->codec->time_base.num) / (videoStream->ost->codec->time_base.den) );
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
uint64_t target_interval_ns = nanosecond_multiplier * ( ((double)videoStream->codec_context->time_base.num) / (videoStream->codec_context->time_base.den) );
#else
uint64_t target_interval_ns = nanosecond_multiplier * ( ((double)videoStream->codec_context->time_base.num) / (videoStream->codec_context->time_base.den) );
#endif
uint64_t frame_count = 0;
timespec start_time;
clock_gettime(CLOCK_MONOTONIC, &start_time);

View File

@ -46,6 +46,7 @@ protected:
AVOutputFormat *of;
AVFormatContext *ofc;
AVStream *ost;
AVCodecContext *codec_context;
AVCodec *codec;
AVFrame *opicture;
AVFrame *tmp_opicture;

View File

@ -103,6 +103,7 @@ int RemoteCamera::Read( int fd, char *buf, int size ) {
int bytes_to_recv = size - ReceivedBytes;
if ( SOCKET_BUF_SIZE < bytes_to_recv )
bytes_to_recv = SOCKET_BUF_SIZE;
//Debug(3, "Aiming to receive %d of %d bytes", bytes_to_recv, size );
bytes = recv(fd, &buf[ReceivedBytes], bytes_to_recv, 0); //socket, buffer, len, flags
if ( bytes <= 0 ) {
Error("RemoteCamera::Read Recv error. Closing Socket\n");

View File

@ -87,7 +87,8 @@ public:
virtual void Terminate() = 0;
virtual int Connect() = 0;
virtual int Disconnect() = 0;
virtual int PreCapture() = 0;
virtual int PreCapture() { return 0; };
virtual int PrimeCapture() { return 0; };
virtual int Capture( Image &image ) = 0;
virtual int PostCapture() = 0;
virtual int CaptureAndRecord( Image &image, timeval recording, char* event_directory )=0;

View File

@ -67,6 +67,7 @@ RemoteCameraNVSocket::RemoteCameraNVSocket(
timeout.tv_sec = 0;
timeout.tv_usec = 0;
subpixelorder = ZM_SUBPIX_ORDER_BGR;
if ( capture ) {
Initialise();
@ -97,43 +98,39 @@ void RemoteCameraNVSocket::Initialise() {
}
int RemoteCameraNVSocket::Connect() {
int port_num = atoi(port.c_str());
//struct addrinfo *p;
struct sockaddr_in servaddr;
bzero( &servaddr, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htons(INADDR_ANY);
servaddr.sin_port = htons(atoi(port.c_str()));
struct sockaddr_in servaddr;
bzero( &servaddr, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htons(INADDR_ANY);
servaddr.sin_port = htons(port_num);
sd = socket(AF_INET, SOCK_STREAM, 0);
sd = socket(AF_INET, SOCK_STREAM, 0);
//for(p = hp; p != NULL; p = p->ai_next) {
//sd = socket( p->ai_family, p->ai_socktype, p->ai_protocol );
if ( sd < 0 ) {
Warning("Can't create socket: %s", strerror(errno) );
//continue;
return -1;
}
//if ( connect( sd, p->ai_addr, p->ai_addrlen ) < 0 ) {
if ( connect( sd, (struct sockaddr *)&servaddr , sizeof(servaddr) ) < 0 ) {
close(sd);
sd = -1;
Warning("Can't connect to socket mid: %d : %s", monitor_id, strerror(errno) );
return -1;
//continue;
//}
/* If we got here, we must have connected successfully */
//break;
//sd = socket( p->ai_family, p->ai_socktype, p->ai_protocol );
if ( sd < 0 ) {
Warning("Can't create socket: %s", strerror(errno) );
//continue;
return -1;
}
//if ( p == NULL ) {
//Error("Unable to connect to the remote camera, aborting");
//return( -1 );
//}
//if ( connect( sd, p->ai_addr, p->ai_addrlen ) < 0 ) {
if ( connect( sd, (struct sockaddr *)&servaddr , sizeof(servaddr) ) < 0 ) {
close(sd);
sd = -1;
Debug( 3, "Connected to host, socket = %d", sd );
return( sd );
Warning("Can't connect to socket mid: %d : %s", monitor_id, strerror(errno) );
return -1;
}
//if ( p == NULL ) {
//Error("Unable to connect to the remote camera, aborting");
//return( -1 );
//}
Debug( 3, "Connected to host:%d, socket = %d", port_num, sd );
return sd;
}
int RemoteCameraNVSocket::Disconnect() {
@ -144,132 +141,33 @@ int RemoteCameraNVSocket::Disconnect() {
}
int RemoteCameraNVSocket::SendRequest( std::string request ) {
Debug( 2, "Sending request: %s", request.c_str() );
Debug( 4, "Sending request: %s", request.c_str() );
if ( write( sd, request.data(), request.length() ) < 0 ) {
Error( "Can't write: %s", strerror(errno) );
Disconnect();
return( -1 );
}
Debug( 3, "Request sent" );
Debug( 4, "Request sent" );
return( 0 );
}
/* Return codes are as follows:
* -1 means there was an error
* 0 means no bytes were returned but there wasn't actually an error.
* > 0 is the # of bytes read.
*/
int RemoteCameraNVSocket::ReadData( Buffer &buffer, unsigned int bytes_expected ) {
fd_set rfds;
FD_ZERO(&rfds);
FD_SET(sd, &rfds);
struct timeval temp_timeout = timeout;
int n_found = select(sd+1, &rfds, NULL, NULL, &temp_timeout);
if ( n_found == 0 ) {
Debug( 4, "Select timed out timeout was %d secs %d usecs", temp_timeout.tv_sec, temp_timeout.tv_usec );
int error = 0;
socklen_t len = sizeof(error);
int retval = getsockopt(sd, SOL_SOCKET, SO_ERROR, &error, &len);
if ( retval != 0 ) {
Debug(1, "error getting socket error code %s", strerror(retval));
}
if ( error != 0 ) {
return -1;
}
// Why are we disconnecting? It's just a timeout, meaning that data wasn't available.
//Disconnect();
return 0;
} else if ( n_found < 0 ) {
Error("Select error: %s", strerror(errno));
return -1;
}
unsigned int total_bytes_to_read = 0;
if ( bytes_expected ) {
total_bytes_to_read = bytes_expected;
} else {
if ( ioctl( sd, FIONREAD, &total_bytes_to_read ) < 0 ) {
Error( "Can't ioctl(): %s", strerror(errno) );
return( -1 );
}
if ( total_bytes_to_read == 0 ) {
if ( mode == SINGLE_IMAGE ) {
int error = 0;
socklen_t len = sizeof (error);
int retval = getsockopt( sd, SOL_SOCKET, SO_ERROR, &error, &len );
if(retval != 0 ) {
Debug( 1, "error getting socket error code %s", strerror(retval) );
}
if (error != 0) {
return -1;
}
// Case where we are grabbing a single jpg, but no content-length was given, so the expectation is that we read until close.
return( 0 );
}
// If socket is closed locally, then select will fail, but if it is closed remotely
// then we have an exception on our socket.. but no data.
Debug( 3, "Socket closed remotely" );
//Disconnect(); // Disconnect is done outside of ReadData now.
return( -1 );
}
// There can be lots of bytes available. I've seen 4MB or more. This will vastly inflate our buffer size unnecessarily.
if ( total_bytes_to_read > ZM_NETWORK_BUFSIZ ) {
total_bytes_to_read = ZM_NETWORK_BUFSIZ;
Debug(3, "Just getting 32K" );
} else {
Debug(3, "Just getting %d", total_bytes_to_read );
}
} // end if bytes_expected or not
Debug( 3, "Expecting %d bytes", total_bytes_to_read );
int total_bytes_read = 0;
do {
int bytes_read = buffer.read_into( sd, total_bytes_to_read );
if ( bytes_read < 0 ) {
Error( "Read error: %s", strerror(errno) );
return( -1 );
} else if ( bytes_read == 0 ) {
Debug( 2, "Socket closed" );
//Disconnect(); // Disconnect is done outside of ReadData now.
return( -1 );
} else if ( (unsigned int)bytes_read < total_bytes_to_read ) {
Error( "Incomplete read, expected %d, got %d", total_bytes_to_read, bytes_read );
return( -1 );
}
Debug( 3, "Read %d bytes", bytes_read );
total_bytes_read += bytes_read;
total_bytes_to_read -= bytes_read;
} while ( total_bytes_to_read );
Debug( 4, buffer );
return( total_bytes_read );
}
int RemoteCameraNVSocket::PreCapture() {
int RemoteCameraNVSocket::PrimeCapture() {
if ( sd < 0 ) {
Connect();
if ( sd < 0 ) {
Error( "Unable to connect to camera" );
return( -1 );
}
mode = SINGLE_IMAGE;
buffer.clear();
}
struct image_def {
uint16_t width;
uint16_t height;
uint16_t type;
};
struct image_def image_def;
buffer.clear();
struct image_def {
uint16_t width;
uint16_t height;
uint16_t type;
};
struct image_def image_def;
if ( SendRequest("GetImageParams") < 0 ) {
if ( SendRequest("GetImageParams\n") < 0 ) {
Error( "Unable to send request" );
Disconnect();
return -1;
@ -289,20 +187,28 @@ struct image_def image_def;
}
int RemoteCameraNVSocket::Capture( Image &image ) {
if ( SendRequest("GetNextImage") < 0 ) {
if ( SendRequest("GetNextImage\n") < 0 ) {
Warning( "Unable to capture image, retrying" );
return( 1 );
return 0;
}
if ( Read( sd, buffer, imagesize ) < imagesize ) {
Warning( "Unable to capture image, retrying" );
return( 1 );
return 0;
}
uint32_t end;
if ( Read(sd, (char *) &end , sizeof(end)) < 0 ) {
Warning( "Unable to capture image, retrying" );
return 0;
}
if ( end != 0xFFFFFFFF) {
Warning("End Bytes Failed\n");
return 0;
}
image.Assign( width, height, colours, subpixelorder, buffer, imagesize );
return( 0 );
return 1;
}
int RemoteCameraNVSocket::PostCapture()
{
int RemoteCameraNVSocket::PostCapture() {
return( 0 );
}

View File

@ -67,7 +67,7 @@ bool p_record_audio );
int SendRequest( std::string );
int ReadData( Buffer &buffer, unsigned int bytes_expected=0 );
int GetResponse();
int PreCapture();
int PrimeCapture();
int Capture( Image &image );
int PostCapture();
int CaptureAndRecord( Image &image, timeval recording, char* event_directory ) {return(0);};

View File

@ -379,21 +379,31 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const
stream->id = i;
#endif
AVCodecContext *codec_context = NULL;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
codec_context = avcodec_alloc_context3(NULL);
avcodec_parameters_to_context(codec_context, stream->codecpar);
#else
codec_context = stream->codec;
#endif
Debug( 1, "Looking for codec for %s payload type %d / %s", mediaDesc->getType().c_str(), mediaDesc->getPayloadType(), mediaDesc->getPayloadDesc().c_str() );
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
if ( mediaDesc->getType() == "video" )
stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
codec_context->codec_type = AVMEDIA_TYPE_VIDEO;
else if ( mediaDesc->getType() == "audio" )
stream->codec->codec_type = AVMEDIA_TYPE_AUDIO;
codec_context->codec_type = AVMEDIA_TYPE_AUDIO;
else if ( mediaDesc->getType() == "application" )
stream->codec->codec_type = AVMEDIA_TYPE_DATA;
codec_context->codec_type = AVMEDIA_TYPE_DATA;
#else
if ( mediaDesc->getType() == "video" )
stream->codec->codec_type = CODEC_TYPE_VIDEO;
codec_context->codec_type = CODEC_TYPE_VIDEO;
else if ( mediaDesc->getType() == "audio" )
stream->codec->codec_type = CODEC_TYPE_AUDIO;
codec_context->codec_type = CODEC_TYPE_AUDIO;
else if ( mediaDesc->getType() == "application" )
stream->codec->codec_type = CODEC_TYPE_DATA;
codec_context->codec_type = CODEC_TYPE_DATA;
#endif
#if LIBAVCODEC_VERSION_CHECK(55, 50, 3, 60, 103)
@ -410,31 +420,27 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const
#if LIBAVCODEC_VERSION_CHECK(55, 50, 3, 60, 103)
codec_name = std::string( smStaticPayloads[i].payloadName );
#else
strncpy( stream->codec->codec_name, smStaticPayloads[i].payloadName, sizeof(stream->codec->codec_name) );;
strncpy( codec_context->codec_name, smStaticPayloads[i].payloadName, sizeof(codec_context->codec_name) );;
#endif
stream->codec->codec_type = smStaticPayloads[i].codecType;
stream->codec->codec_id = smStaticPayloads[i].codecId;
stream->codec->sample_rate = smStaticPayloads[i].clockRate;
codec_context->codec_type = smStaticPayloads[i].codecType;
codec_context->codec_id = smStaticPayloads[i].codecId;
codec_context->sample_rate = smStaticPayloads[i].clockRate;
break;
}
}
}
else
{
} else {
// Look in dynamic table
for ( unsigned int i = 0; i < (sizeof(smDynamicPayloads)/sizeof(*smDynamicPayloads)); i++ )
{
if ( smDynamicPayloads[i].payloadName == mediaDesc->getPayloadDesc() )
{
for ( unsigned int i = 0; i < (sizeof(smDynamicPayloads)/sizeof(*smDynamicPayloads)); i++ ) {
if ( smDynamicPayloads[i].payloadName == mediaDesc->getPayloadDesc() ) {
Debug( 1, "Got dynamic payload type %d, %s", mediaDesc->getPayloadType(), smDynamicPayloads[i].payloadName );
#if LIBAVCODEC_VERSION_CHECK(55, 50, 3, 60, 103)
codec_name = std::string( smStaticPayloads[i].payloadName );
#else
strncpy( stream->codec->codec_name, smDynamicPayloads[i].payloadName, sizeof(stream->codec->codec_name) );;
strncpy( codec_context->codec_name, smDynamicPayloads[i].payloadName, sizeof(codec_context->codec_name) );;
#endif
stream->codec->codec_type = smDynamicPayloads[i].codecType;
stream->codec->codec_id = smDynamicPayloads[i].codecId;
stream->codec->sample_rate = mediaDesc->getClock();
codec_context->codec_type = smDynamicPayloads[i].codecType;
codec_context->codec_id = smDynamicPayloads[i].codecId;
codec_context->sample_rate = mediaDesc->getClock();
break;
}
}
@ -450,14 +456,13 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const
//return( 0 );
}
if ( mediaDesc->getWidth() )
stream->codec->width = mediaDesc->getWidth();
codec_context->width = mediaDesc->getWidth();
if ( mediaDesc->getHeight() )
stream->codec->height = mediaDesc->getHeight();
if ( stream->codec->codec_id == AV_CODEC_ID_H264 && mediaDesc->getSprops().size())
{
codec_context->height = mediaDesc->getHeight();
if ( codec_context->codec_id == AV_CODEC_ID_H264 && mediaDesc->getSprops().size()) {
uint8_t start_sequence[]= { 0, 0, 1 };
stream->codec->extradata_size= 0;
stream->codec->extradata= NULL;
codec_context->extradata_size= 0;
codec_context->extradata= NULL;
char pvalue[1024], *value = pvalue;
strcpy(pvalue, mediaDesc->getSprops().c_str());
@ -482,22 +487,33 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const
if (packet_size) {
uint8_t *dest =
(uint8_t *)av_malloc(packet_size + sizeof(start_sequence) +
stream->codec->extradata_size +
FF_INPUT_BUFFER_PADDING_SIZE);
codec_context->extradata_size +
#if LIBAVCODEC_VERSION_CHECK(57, 0, 0, 0, 0)
AV_INPUT_BUFFER_PADDING_SIZE
#else
FF_INPUT_BUFFER_PADDING_SIZE
#endif
);
if(dest) {
if(stream->codec->extradata_size) {
if(codec_context->extradata_size) {
// av_realloc?
memcpy(dest, stream->codec->extradata, stream->codec->extradata_size);
av_free(stream->codec->extradata);
memcpy(dest, codec_context->extradata, codec_context->extradata_size);
av_free(codec_context->extradata);
}
memcpy(dest+stream->codec->extradata_size, start_sequence, sizeof(start_sequence));
memcpy(dest+stream->codec->extradata_size+sizeof(start_sequence), decoded_packet, packet_size);
memset(dest+stream->codec->extradata_size+sizeof(start_sequence)+
packet_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
memcpy(dest+codec_context->extradata_size, start_sequence, sizeof(start_sequence));
memcpy(dest+codec_context->extradata_size+sizeof(start_sequence), decoded_packet, packet_size);
memset(dest+codec_context->extradata_size+sizeof(start_sequence)+
packet_size, 0,
#if LIBAVCODEC_VERSION_CHECK(57, 0, 0, 0, 0)
AV_INPUT_BUFFER_PADDING_SIZE
#else
FF_INPUT_BUFFER_PADDING_SIZE
#endif
);
stream->codec->extradata= dest;
stream->codec->extradata_size+= sizeof(start_sequence)+packet_size;
codec_context->extradata= dest;
codec_context->extradata_size+= sizeof(start_sequence)+packet_size;
// } else {
// av_log(codec, AV_LOG_ERROR, "Unable to allocate memory for extradata!");
// return AVERROR(ENOMEM);

View File

@ -31,13 +31,11 @@
#include <string>
#include <vector>
class SessionDescriptor
{
class SessionDescriptor {
protected:
enum { PAYLOAD_TYPE_DYNAMIC=96 };
struct StaticPayloadDesc
{
struct StaticPayloadDesc {
int payloadType;
const char payloadName[6];
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
@ -50,8 +48,7 @@ protected:
int autoChannels;
};
struct DynamicPayloadDesc
{
struct DynamicPayloadDesc {
const char payloadName[32];
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
AVMediaType codecType;
@ -65,8 +62,7 @@ protected:
};
public:
class ConnInfo
{
class ConnInfo {
protected:
std::string mNetworkType;
std::string mAddressType;
@ -78,8 +74,7 @@ public:
ConnInfo( const std::string &connInfo );
};
class BandInfo
{
class BandInfo {
protected:
std::string mType;
int mValue;
@ -88,8 +83,7 @@ public:
BandInfo( const std::string &bandInfo );
};
class MediaDescriptor
{
class MediaDescriptor {
protected:
std::string mType;
int mPort;

View File

@ -167,7 +167,11 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
video_out_ctx->time_base.den);
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
video_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#else
video_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
}
Monitor::Orientation orientation = monitor->getOrientation();
@ -274,7 +278,11 @@ VideoStore::VideoStore(const char *filename_in, const char *format_in,
if (audio_out_stream) {
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
audio_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
audio_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#else
audio_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
}
}
} // end if audio_in_stream

View File

@ -204,7 +204,6 @@ bool Zone::CheckAlarms( const Image *delta_image ) {
int diff_width = diff_image->Width();
uint8_t* diff_buff = (uint8_t*)diff_image->Buffer();
uint8_t* pdiff;
const uint8_t* ppoly;
unsigned int pixel_diff_count = 0;
@ -267,6 +266,7 @@ bool Zone::CheckAlarms( const Image *delta_image ) {
int bx1 = bx-1;
int by1 = by-1;
Debug( 5, "Checking for filtered pixels" );
if ( bx > 1 || by > 1 ) {
// Now remove any pixels smaller than our filter size
@ -679,7 +679,7 @@ bool Zone::CheckAlarms( const Image *delta_image ) {
}
}
ppoly = pg_image->Buffer( lo_x2, y );
const uint8_t* ppoly = pg_image->Buffer( lo_x2, y );
for ( int x = lo_x2; x <= hi_x2; x++, pdiff++, ppoly++ ) {
if ( !*ppoly ) {
*pdiff = BLACK;

View File

@ -161,7 +161,7 @@ int main(int argc, char *argv[]) {
Usage();
}
int modes = (device[0]?1:0 + host[0]?1:0 + file[0]?1:0 + (monitor_id > 0 ? 1 : 0));
int modes = ( (device[0]?1:0) + (host[0]?1:0) + (file[0]?1:0) + (monitor_id > 0 ? 1 : 0));
if ( modes > 1 ) {
fprintf(stderr, "Only one of device, host/port/path, file or monitor id allowed\n");
Usage();

View File

@ -1,353 +0,0 @@
//
// ZoneMinder Image File Writer Implementation, $Date$, $Revision$
// Copyright (C) 2001-2008 Philip Coombes
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
//
/*
=head1 NAME
zmf - The ZoneMinder Frame daemon
=head1 SYNOPSIS
zmf -m <monitor_id>
zmf --monitor <monitor_id>
zmf -h
zmf --help
zmf -v
zmf --version
=head1 DESCRIPTION
This is an optional daemon that can run in concert with the Analysis daemon and
whose function it is to actually write captured frames to disk. This frees up
the Analysis daemon to do more analysis (!) and so keep up with the Capture
daemon better. If it isn't running or dies then the Analysis daemon just writes
them itself.
=head1 OPTIONS
-m, --monitor_id - ID of the monitor to use
-h, --help - Display usage information
-v, --version - Print the installed version of ZoneMinder
=cut
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#include <sys/time.h>
#include <errno.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <getopt.h>
#include <signal.h>
#include "zm.h"
#include "zm_db.h"
#include "zm_signal.h"
#include "zm_monitor.h"
#include "zmf.h"
int OpenSocket( int monitor_id )
{
int sd = socket( AF_UNIX, SOCK_STREAM, 0);
if ( sd < 0 )
{
Error( "Can't create socket: %s", strerror(errno) );
return( -1 );
}
char sock_path[PATH_MAX] = "";
snprintf( sock_path, sizeof(sock_path), "%s/zmf-%d.sock", config.path_socks, monitor_id );
if ( unlink( sock_path ) < 0 )
{
Warning( "Can't unlink '%s': %s", sock_path, strerror(errno) );
}
struct sockaddr_un addr;
strncpy( addr.sun_path, sock_path, sizeof(addr.sun_path) );
addr.sun_family = AF_UNIX;
if ( bind( sd, (struct sockaddr *)&addr, strlen(addr.sun_path)+sizeof(addr.sun_family)) < 0 )
{
Error( "Can't bind: %s", strerror(errno) );
exit( -1 );
}
if ( listen( sd, SOMAXCONN ) < 0 )
{
Error( "Can't listen: %s", strerror(errno) );
return( -1 );
}
struct sockaddr_un rem_addr;
socklen_t rem_addr_len = sizeof(rem_addr);
int new_sd = -1;
if ( (new_sd = accept( sd, (struct sockaddr *)&rem_addr, &rem_addr_len )) < 0 )
{
Error( "Can't accept: %s", strerror(errno) );
exit( -1 );
}
close( sd );
sd = new_sd;
Info( "Frame server socket open, awaiting images" );
return( sd );
}
int ReopenSocket( int &sd, int monitor_id )
{
close( sd );
return( sd = OpenSocket( monitor_id ) );
}
void Usage()
{
fprintf( stderr, "zmf -m <monitor_id>\n" );
fprintf( stderr, "Options:\n" );
fprintf( stderr, " -m, --monitor <monitor_id> : Specify which monitor to use\n" );
fprintf( stderr, " -h, --help : This screen\n" );
fprintf( stderr, " -v, --version : Report the installed version of ZoneMinder\n" );
exit( 0 );
}
int main( int argc, char *argv[] )
{
self = argv[0];
srand( getpid() * time( 0 ) );
int id = -1;
static struct option long_options[] = {
{"monitor", 1, 0, 'm'},
{"help", 0, 0, 'h'},
{"version", 0, 0, 'v'},
{0, 0, 0, 0}
};
while (1)
{
int option_index = 0;
int c = getopt_long (argc, argv, "m:h:v", long_options, &option_index);
if (c == -1)
{
break;
}
switch (c)
{
case 'm':
id = atoi(optarg);
break;
case 'h':
case '?':
Usage();
break;
case 'v':
std::cout << ZM_VERSION << "\n";
exit(0);
default:
//fprintf( stderr, "?? getopt returned character code 0%o ??\n", c );
break;
}
}
if (optind < argc)
{
fprintf( stderr, "Extraneous options, " );
while (optind < argc)
printf ("%s ", argv[optind++]);
printf ("\n");
Usage();
}
if ( id < 0 )
{
fprintf( stderr, "Bogus monitor %d\n", id );
Usage();
exit( 0 );
}
char log_id_string[16];
snprintf( log_id_string, sizeof(log_id_string), "m%d", id );
zmLoadConfig();
logInit( "zmf" );
hwcaps_detect();
Monitor *monitor = Monitor::Load( id, false, Monitor::QUERY );
if ( !monitor )
{
fprintf( stderr, "Can't find monitor with id of %d\n", id );
exit( -1 );
}
Storage *Storage = monitor->getStorage();
char capt_path[PATH_MAX];
char anal_path[PATH_MAX];
snprintf( capt_path, sizeof(capt_path), "%s/%d/%%s/%%0%dd-capture.jpg", Storage->Path(), monitor->Id(), config.event_image_digits );
snprintf( anal_path, sizeof(anal_path), "%s/%d/%%s/%%0%dd-analyse.jpg", Storage->Path(), monitor->Id(), config.event_image_digits );
zmSetDefaultTermHandler();
zmSetDefaultDieHandler();
sigset_t block_set;
sigemptyset( &block_set );
int sd = OpenSocket( monitor->Id() );
FrameHeader frame_header = { 0, 0, false, 0 };
//unsigned char *image_data = 0;
fd_set rfds;
struct timeval timeout;
timeout.tv_sec = 1;
timeout.tv_usec = 0;
while( 1 )
{
struct timeval temp_timeout = timeout;
FD_ZERO(&rfds);
FD_SET(sd, &rfds);
int n_found = select( sd+1, &rfds, NULL, NULL, &temp_timeout );
if( n_found == 0 )
{
Debug( 1, "Select timed out" );
continue;
}
else if ( n_found < 0)
{
Error( "Select error: %s", strerror(errno) );
ReopenSocket( sd, monitor->Id() );
continue;
}
sigprocmask( SIG_BLOCK, &block_set, 0 );
int n_bytes = read( sd, &frame_header, sizeof(frame_header) );
if ( n_bytes != sizeof(frame_header) )
{
if ( n_bytes < 0 )
{
Error( "Can't read frame header: %s", strerror(errno) );
}
else if ( n_bytes > 0 )
{
Error( "Incomplete read of frame header, %d bytes only", n_bytes );
}
else
{
Warning( "Socket closed at remote end" );
}
ReopenSocket( sd, monitor->Id() );
continue;
}
Debug( 1, "Read frame header, expecting %ld bytes of image", frame_header.image_length );
static unsigned char image_data[ZM_MAX_IMAGE_SIZE];
// Read for pipe and loop until bytes expected have been read or an error occurs
int bytes_read = 0;
do
{
n_bytes = read( sd, image_data+bytes_read, frame_header.image_length-bytes_read );
if (n_bytes < 0) break; // break on error
if (n_bytes < (int)frame_header.image_length)
{
// print some informational messages
if (bytes_read == 0)
{
Debug(4,"Image read : Short read %d bytes of %d expected bytes",n_bytes,frame_header.image_length);
}
else if (bytes_read+n_bytes == (int)frame_header.image_length)
{
Debug(5,"Image read : Read rest of short read: %d bytes read total of %d bytes",n_bytes,frame_header.image_length);
}
else
{
Debug(6,"Image read : continuing, read %d bytes (%d so far)", n_bytes, bytes_read+n_bytes);
}
}
bytes_read+= n_bytes;
} while (n_bytes>0 && (bytes_read < (ssize_t)frame_header.image_length) );
// Print errors if there was a problem
if ( n_bytes < 1 )
{
Error( "Only read %d bytes of %d\n", bytes_read, frame_header.image_length);
if ( n_bytes < 0 )
{
Error( "Can't read frame image data: %s", strerror(errno) );
}
else
{
Warning( "Socket closed at remote end" );
}
ReopenSocket( sd, monitor->Id() );
continue;
}
static char subpath[PATH_MAX] = "";
if ( config.use_deep_storage )
{
struct tm *time = localtime( &frame_header.event_time );
snprintf( subpath, sizeof(subpath), "%02d/%02d/%02d/%02d/%02d/%02d", time->tm_year-100, time->tm_mon+1, time->tm_mday, time->tm_hour, time->tm_min, time->tm_sec );
}
else
{
snprintf( subpath, sizeof(subpath), "%ld", frame_header.event_id );
}
static char path[PATH_MAX] = "";
snprintf( path, sizeof(path), frame_header.alarm_frame?anal_path:capt_path, subpath, frame_header.frame_id );
Debug( 1, "Got image, writing to %s", path );
FILE *fd = 0;
if ( (fd = fopen( path, "w" )) < 0 )
{
Error( "Can't fopen '%s': %s", path, strerror(errno) );
exit( -1 );
}
if ( 0 == fwrite( image_data, frame_header.image_length, 1, fd ) )
{
Error( "Can't fwrite image data: %s", strerror(errno) );
exit( -1 );
}
fclose( fd );
sigprocmask( SIG_UNBLOCK, &block_set, 0 );
}
logTerm();
zmDbClose();
}

View File

@ -34,17 +34,13 @@ public function beforeFilter() {
public function index() {
$this->Event->recursive = -1;
$allowedMonitors=preg_split ('@,@', $this->Session->Read('allowedMonitors'),NULL, PREG_SPLIT_NO_EMPTY);
if (!empty($allowedMonitors))
{
$mon_options = array('Event.MonitorId' => $allowedMonitors);
}
else
{
$mon_options='';
}
$allowedMonitors=preg_split ('@,@', $this->Session->Read('allowedMonitors'),NULL, PREG_SPLIT_NO_EMPTY);
if (!empty($allowedMonitors)) {
$mon_options = array('Event.MonitorId' => $allowedMonitors);
} else {
$mon_options='';
}
if ($this->request->params['named']) {
$this->FilterComponent = $this->Components->load('Filter');
@ -81,7 +77,6 @@ public function beforeFilter() {
//$thumbData = $this->createThumbnail($value['Event']['Id']);
$thumbData = "";
$events[$key]['thumbData'] = $thumbData;
}
$this->set(compact('events'));

View File

@ -35,22 +35,24 @@ public function beforeFilter() {
* @return void
*/
public function index() {
$this->Monitor->recursive = 0;
$allowedMonitors=preg_split ('@,@', $this->Session->Read('allowedMonitors'),NULL, PREG_SPLIT_NO_EMPTY);
$this->Monitor->recursive = 0;
if (!empty($allowedMonitors))
{
$options = array('conditions'=>array('Monitor.Id'=> $allowedMonitors));
if ($this->request->params['named']) {
$this->FilterComponent = $this->Components->load('Filter');
$conditions = $this->FilterComponent->buildFilter($this->request->params['named']);
} else {
$conditions = array();
}
else
{
$options='';
$allowedMonitors=preg_split ('@,@', $this->Session->Read('allowedMonitors'),NULL, PREG_SPLIT_NO_EMPTY);
if (!empty($allowedMonitors)) {
$conditions['Monitor.Id' ] = $allowedMonitors;
}
$monitors = $this->Monitor->find('all',$options);
$this->set(array(
'monitors' => $monitors,
'_serialize' => array('monitors')
));
$monitors = $this->Monitor->find('all',array('conditions'=>$conditions));
$this->set(array(
'monitors' => $monitors,
'_serialize' => array('monitors')
));
}
/**

View File

@ -0,0 +1,14 @@
<input type="hidden" name="newMonitor[Method]" value="<?php echo validHtmlStr($monitor->Method()) ?>"/>
<tr><td><?php echo translate('HostName') ?></td><td><input type="text" name="newMonitor[Host]" value="<?php echo validHtmlStr($monitor->Host()) ?>" size="36"/></td></tr>
<tr><td><?php echo translate('Port') ?></td><td><input type="number" name="newMonitor[Port]" value="<?php echo validHtmlStr($monitor->Port()) ?>" size="6"/></td></tr>
<tr><td><?php echo translate('Path') ?></td><td><input type="text" name="newMonitor[Path]" value="<?php echo validHtmlStr($monitor->Path()) ?>" size="36"/></td></tr>
<input type="hidden" name="newMonitor[User]" value="<?php echo validHtmlStr($monitor->User()) ?>"/>
<input type="hidden" name="newMonitor[Pass]" value="<?php echo validHtmlStr($monitor->Pass()) ?>"/>
<input type="hidden" name="newMonitor[Options]" value="<?php echo validHtmlStr($monitor->Options()) ?>"/>
<tr><td><?php echo translate('TargetColorspace') ?></td><td><select name="newMonitor[Colours]"><?php foreach ( $Colours as $name => $value ) { ?><option value="<?php echo $value ?>"<?php if ( $value == $monitor->Colours()) { ?> selected="selected"<?php } ?>><?php echo $name ?></option><?php } ?></select></td></tr>
<tr><td><?php echo translate('CaptureWidth') ?> (<?php echo translate('Pixels') ?>)</td><td><input type="number" name="newMonitor[Width]" value="<?php echo validHtmlStr($monitor->Width()) ?>" size="4" onkeyup="updateMonitorDimensions(this);"/></td></tr>
<tr><td><?php echo translate('CaptureHeight') ?> (<?php echo translate('Pixels') ?>)</td><td><input type="number" name="newMonitor[Height]" value="<?php echo validHtmlStr($monitor->Height()) ?>" size="4" onkeyup="updateMonitorDimensions(this);"/></td></tr>
<tr><td><?php echo translate('PreserveAspect') ?></td><td><input type="checkbox" name="preserveAspectRatio" value="1"/></td></tr>
<tr><td><?php echo translate('Orientation') ?></td><td><?php echo htmlselect( 'newMonitor[Orientation]', $orientations, $monitor->Orientation() );?></td></tr>
<input type="hidden" name="newMonitor[Deinterlacing]" value="<?php echo validHtmlStr($monitor->Deinterlacing()) ?>"/>
<input type="hidden" name="newMonitor[RTSPDescribe]" value="<?php echo validHtmlStr($monitor->RTSPDescribe()) ?>"/>

View File

@ -207,11 +207,11 @@ for( $monitor_i = 0; $monitor_i < count($displayMonitors); $monitor_i += 1 ) {
<?php
if ( ZM_WEB_ID_ON_CONSOLE ) {
?>
<td class="colId"><?php echo makePopupLink( '?view=watch&amp;mid='.$monitor['Id'], 'zmWatch'.$monitor['Id'], array( 'watch', reScale( $monitor['Width'], $scale ), reScale( $monitor['Height'], $scale ) ), $monitor['Id'], $running && ($monitor['Function'] != 'None') && canView('Stream') ) ?></td>
<td class="colId"><?php echo makePopupLink( '?view=watch&amp;mid='.$monitor['Id'], 'zmWatch'.$monitor['Id'], array( 'watch', reScale( $monitor['Width'], $scale ), reScale( $monitor['Height'], $scale ) ), $monitor['Id'], ($monitor['Function'] != 'None') && canView('Stream') ) ?></td>
<?php
}
?>
<td class="colName"><?php echo makePopupLink( '?view=watch&amp;mid='.$monitor['Id'], 'zmWatch'.$monitor['Id'], array( 'watch', reScale( $monitor['Width'], $scale ), reScale( $monitor['Height'], $scale ) ), $monitor['Name'], $running && ($monitor['Function'] != 'None') && canView('Stream') ) ?></td>
<td class="colName"><?php echo makePopupLink( '?view=watch&amp;mid='.$monitor['Id'], 'zmWatch'.$monitor['Id'], array( 'watch', reScale( $monitor['Width'], $scale ), reScale( $monitor['Height'], $scale ) ), $monitor['Name'], ($monitor['Function'] != 'None') && canView('Stream') ) ?></td>
<td class="colFunction"><?php echo makePopupLink( '?view=function&amp;mid='.$monitor['Id'], 'zmFunction', 'function', '<span class="'.$fclass.'">'.translate('Fn'.$monitor['Function']).( empty($monitor['Enabled']) ? ', disabled' : '' ) .'</span>', canEdit( 'Monitors' ) ) ?></td>
<?php
if ( count($servers) ) { ?>

View File

@ -827,7 +827,7 @@ switch ( $tab ) {
<?php
} else if ( $monitor->Type() == 'NVSocket' ) {
include('monitor_source_nvsocket.php');
include('_monitor_source_nvsocket.php');
} else if ( $monitor->Type() == 'Remote' ) {
?>
<tr><td><?php echo translate('RemoteProtocol') ?></td><td><?php echo htmlSelect( "newMonitor[Protocol]", $remoteProtocols, $monitor->Protocol(), "updateMethods( this );if(this.value=='rtsp'){\$('RTSPDescribe').setStyle('display','table-row');}else{\$('RTSPDescribe').hide();}" ); ?></td></tr>

View File

@ -41,6 +41,10 @@ if ( ! empty($_REQUEST['eid'] ) ) {
$Event = new Event( $_REQUEST['eid'] );
$path = $Event->Path().'/'.$Event->DefaultVideo();
Logger::Debug("Path: $path");
} else if ( ! empty($_REQUEST['event_id'] ) ) {
$Event = new Event( $_REQUEST['event_id'] );
$path = $Event->Path().'/'.$Event->DefaultVideo();
Logger::Debug("Path: $path");
} else {
$errorText = 'No video path';
}