merge all the commits from the messed up iconnor_video branch

This commit is contained in:
Isaac Connor 2016-04-05 17:14:46 -04:00
parent f73d8be765
commit 9a795432cf
39 changed files with 4487 additions and 4284 deletions

View File

@ -4,7 +4,7 @@
#
cmake_minimum_required (VERSION 2.6)
project (zoneminder)
set(zoneminder_VERSION "1.29.0")
set(zoneminder_VERSION "1.30.0")
# make API version a minor of ZM version
set(zoneminder_API_VERSION "${zoneminder_VERSION}.1")

View File

@ -3,7 +3,7 @@ ZoneMinder H264 Patch
[![Build Status](https://travis-ci.org/ZoneMinder/ZoneMinder.png?branch=feature-h264-videostorage)](https://travis-ci.org/ZoneMinder/ZoneMinder) [![Bountysource](https://api.bountysource.com/badge/team?team_id=204&style=bounties_received)](https://www.bountysource.com/teams/zoneminder/issues?utm_source=ZoneMinder&utm_medium=shield&utm_campaign=bounties_received)
##Feature-h264-videostorage Branch Details
This branch supports direct recording of h264 cameras into MP4 format uisng the h264 Passthrough option, but only with FFMPEG Monitors currently. It also it provides h264 encoding from jpegs of any other monitor type and local or mpeg cameras. If you encounter any issues, please open an issue on GitHub and attach it to the h264 milestone. But do remember this is bleeding edge so it will have problems.
This branch supports direct recording of h264 cameras into MP4 format uisng the h264 Passthrough option. As well it provides h264 encoding for local or mpeg cameras. If you encounter any issues, please open an issue on GitHub and attach it to the h264 milestone. But do remember this is bleeding edge so it will have problems.
Thanks to @chriswiggins and @mastertheknife for their work, @SteveGilvarry is now maintaining this branch and welcomes any assistance.
**The following SQL changes are required, these will be merged to zmupdate once we are ready to merge this branch to master.**

View File

@ -193,6 +193,7 @@ CREATE TABLE `Events` (
`Length` decimal(10,2) NOT NULL default '0.00',
`Frames` int(10) unsigned default NULL,
`AlarmFrames` int(10) unsigned default NULL,
`DefaultVideo` VARCHAR( 64 ) NOT NULL,
`TotScore` int(10) unsigned NOT NULL default '0',
`AvgScore` smallint(5) unsigned default '0',
`MaxScore` smallint(5) unsigned default '0',
@ -344,6 +345,10 @@ CREATE TABLE `Monitors` (
`Palette` int(10) unsigned NOT NULL default '0',
`Orientation` enum('0','90','180','270','hori','vert') NOT NULL default '0',
`Deinterlacing` int(10) unsigned NOT NULL default '0',
`SaveJPEGs` TINYINT NOT NULL DEFAULT '3' ,
`VideoWriter` TINYINT NOT NULL DEFAULT '0',
`EncoderParameters` TEXT NOT NULL,
`RecordAudio` TINYINT NOT NULL DEFAULT '0',
`RTSPDescribe` tinyint(1) unsigned NOT NULL default '0',
`Brightness` mediumint(7) NOT NULL default '-1',
`Contrast` mediumint(7) NOT NULL default '-1',

View File

@ -1,3 +1,9 @@
zoneminder (1.29.0+h264-wily-SNAPSHOT2016033101) wily; urgency=medium
*
-- Isaac Connor <iconnor@connortechnology.com> Thu, 31 Mar 2016 10:29:41 -0400
zoneminder (1.28.1+1-vivid-SNAPSHOT2015081701) vivid; urgency=medium
* include api, switch to cmake build

View File

@ -21,6 +21,7 @@ Build-Depends: debhelper (>= 9), dh-systemd, python-sphinx | python3-sphinx, apa
,libphp-serialization-perl
,libsys-mmap-perl [!hurd-any]
,libwww-perl
,libx264-dev, libmp4v2-dev
# Unbundled (dh_linktree):
,libjs-jquery
,libjs-mootools
@ -61,6 +62,7 @@ Depends: ${shlibs:Depends}, ${misc:Depends}, ${perl:Depends}
,policykit-1
,rsyslog | system-log-daemon
,zip
,libmp4v2-2
Recommends: ${misc:Recommends}
,libapache2-mod-php5 | php5-fpm
,mysql-server | virtual-mysql-server

View File

@ -20,7 +20,7 @@
#include "zm.h"
#include "zm_camera.h"
Camera::Camera( int p_id, SourceType p_type, int p_width, int p_height, int p_colours, int p_subpixelorder, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture ) :
Camera::Camera( int p_id, SourceType p_type, int p_width, int p_height, int p_colours, int p_subpixelorder, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) :
id( p_id ),
type( p_type ),
width( p_width),
@ -31,7 +31,8 @@ Camera::Camera( int p_id, SourceType p_type, int p_width, int p_height, int p_co
hue( p_hue ),
colour( p_colour ),
contrast( p_contrast ),
capture( p_capture )
capture( p_capture ),
record_audio( p_record_audio )
{
pixels = width * height;
imagesize = pixels * colours;

7
src/zm_camera.h Executable file → Normal file
View File

@ -47,9 +47,10 @@ protected:
int colour;
int contrast;
bool capture;
bool record_audio;
public:
Camera( int p_id, SourceType p_type, int p_width, int p_height, int p_colours, int p_subpixelorder, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture );
Camera( int p_id, SourceType p_type, int p_width, int p_height, int p_colours, int p_subpixelorder, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
virtual ~Camera();
int getId() const { return( id ); }
@ -74,13 +75,13 @@ public:
bool CanCapture() const { return( capture ); }
bool SupportsNativeVideo() const { return( type == FFMPEG_SRC ); }
bool SupportsNativeVideo() const { return( (type == FFMPEG_SRC )||(type == REMOTE_SRC)); }
virtual int PrimeCapture() { return( 0 ); }
virtual int PreCapture()=0;
virtual int Capture( Image &image )=0;
virtual int PostCapture()=0;
virtual int CaptureAndRecord( Image &image, bool recording, char* event_directory)=0;
virtual int CaptureAndRecord( Image &image, bool recording, char* event_directory)=0;
};
#endif // ZM_CAMERA_H

View File

@ -30,8 +30,8 @@ const char* content_type_match = "Content-Type:";
size_t content_length_match_len;
size_t content_type_match_len;
cURLCamera::cURLCamera( int p_id, const std::string &p_path, const std::string &p_user, const std::string &p_pass, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture ) :
Camera( p_id, CURL_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture ),
cURLCamera::cURLCamera( int p_id, const std::string &p_path, const std::string &p_user, const std::string &p_pass, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) :
Camera( p_id, CURL_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
mPath( p_path ), mUser( p_user ), mPass ( p_pass ), bTerminate( false ), bReset( false ), mode ( MODE_UNSET )
{
@ -313,6 +313,7 @@ int cURLCamera::PostCapture()
int cURLCamera::CaptureAndRecord( Image &image, bool recording, char* event_directory )
{
Error("Capture and Record not implemented for the cURL camera type");
// Nothing to do here
return( 0 );
}

View File

@ -65,7 +65,7 @@ protected:
pthread_cond_t request_complete_cond;
public:
cURLCamera( int p_id, const std::string &path, const std::string &username, const std::string &password, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture );
cURLCamera( int p_id, const std::string &path, const std::string &username, const std::string &password, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
~cURLCamera();
const std::string &Path() const { return( mPath ); }

0
src/zm_event.cpp Executable file → Normal file
View File

0
src/zm_event.h Executable file → Normal file
View File

134
src/zm_ffmpeg_camera.cpp Executable file → Normal file
View File

@ -36,8 +36,8 @@ extern "C"{
#include <pthread.h>
#endif
FfmpegCamera::FfmpegCamera( int p_id, const std::string &p_path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture ) :
Camera( p_id, FFMPEG_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture ),
FfmpegCamera::FfmpegCamera( int p_id, const std::string &p_path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) :
Camera( p_id, FFMPEG_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
mPath( p_path ),
mMethod( p_method ),
mOptions( p_options )
@ -168,7 +168,7 @@ int FfmpegCamera::Capture( Image &image )
(avResult == -110)
)
{
Info( "av_read_frame returned \"%s\". Reopening stream.", errbuf);
Info( "av_read_frame returned \"%s\". Reopening stream.", errbuf );
ReopenFfmpeg();
}
@ -176,6 +176,7 @@ int FfmpegCamera::Capture( Image &image )
return( -1 );
}
Debug( 5, "Got packet from stream %d", packet.stream_index );
// What about audio stream? Maybe someday we could do sound detection...
if ( packet.stream_index == mVideoStreamId )
{
#if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
@ -187,37 +188,38 @@ int FfmpegCamera::Capture( Image &image )
Debug( 4, "Decoded video packet at frame %d", frameCount );
if ( frameComplete )
{
if ( frameComplete ) {
Debug( 3, "Got frame %d", frameCount );
avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
#if HAVE_LIBSWSCALE
if(mConvertContext == NULL) {
mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
if(mConvertContext == NULL) {
mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
if(mConvertContext == NULL)
Fatal( "Unable to create conversion context for %s", mPath.c_str() );
}
if(mConvertContext == NULL)
Fatal( "Unable to create conversion context for %s", mPath.c_str() );
}
if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE
Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
#endif // HAVE_LIBSWSCALE
frameCount++;
}
}
} // end if frameComplete
} else {
Debug( 4, "Different stream_index %d", packet.stream_index );
} // end if packet.stream_index == mVideoStreamId
#if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100)
av_packet_unref( &packet);
#else
av_free_packet( &packet );
#endif
}
} // end while ! frameComplete
return (0);
}
} // FfmpegCamera::Capture
int FfmpegCamera::PostCapture()
{
@ -312,6 +314,7 @@ int FfmpegCamera::OpenFfmpeg() {
// Find first video stream present
mVideoStreamId = -1;
mAudioStreamId = -1;
for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
{
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
@ -320,23 +323,32 @@ int FfmpegCamera::OpenFfmpeg() {
if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
{
mVideoStreamId = i;
break;
if ( mVideoStreamId == -1 ) {
mVideoStreamId = i;
// if we break, then we won't find the audio stream
continue;
} else {
Debug(2, "Have another video stream." );
}
}
if(mAudioStreamId == -1) //FIXME best way to copy all other streams?
{
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
#else
if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO )
if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO )
#endif
{
{
if ( mAudioStreamId == -1 ) {
mAudioStreamId = i;
}
} else {
Debug(2, "Have another audio stream." );
}
}
}
if ( mVideoStreamId == -1 )
Fatal( "Unable to locate video stream in %s", mPath.c_str() );
if ( mAudioStreamId == -1 )
Debug( 3, "Unable to locate audio stream in %s", mPath.c_str() );
Debug ( 1, "Found video stream" );
@ -521,17 +533,15 @@ int FfmpegCamera::CaptureAndRecord( Image &image, bool recording, char* event_fi
/* Request a writeable buffer of the target image */
directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
if(directbuffer == NULL) {
if( directbuffer == NULL ) {
Error("Failed requesting writeable buffer for the captured image.");
return (-1);
}
int frameComplete = false;
while ( !frameComplete )
{
while ( !frameComplete ) {
int avResult = av_read_frame( mFormatContext, &packet );
if ( avResult < 0 )
{
if ( avResult < 0 ) {
char errbuf[AV_ERROR_MAX_STRING_SIZE];
av_strerror(avResult, errbuf, AV_ERROR_MAX_STRING_SIZE);
if (
@ -539,8 +549,7 @@ int FfmpegCamera::CaptureAndRecord( Image &image, bool recording, char* event_fi
(avResult == AVERROR_EOF || (mFormatContext->pb && mFormatContext->pb->eof_reached)) ||
// Check for Connection failure.
(avResult == -110)
)
{
) {
Info( "av_read_frame returned \"%s\". Reopening stream.", errbuf);
ReopenFfmpeg();
}
@ -549,8 +558,7 @@ int FfmpegCamera::CaptureAndRecord( Image &image, bool recording, char* event_fi
return( -1 );
}
Debug( 5, "Got packet from stream %d", packet.stream_index );
if ( packet.stream_index == mVideoStreamId )
{
if ( packet.stream_index == mVideoStreamId ) {
#if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
if ( avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet ) < 0 )
#else
@ -560,8 +568,7 @@ int FfmpegCamera::CaptureAndRecord( Image &image, bool recording, char* event_fi
Debug( 4, "Decoded video packet at frame %d", frameCount );
if ( frameComplete )
{
if ( frameComplete ) {
Debug( 3, "Got frame %d", frameCount );
avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
@ -572,21 +579,23 @@ int FfmpegCamera::CaptureAndRecord( Image &image, bool recording, char* event_fi
//TODO I think we need to store the key frame location for seeking as part of the event
//Video recording
if(recording && !wasRecording){
if ( recording && !wasRecording ) {
//Instantiate the video storage module
videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime);
wasRecording = true;
strcpy(oldDirectory, event_file);
}else if(!recording && wasRecording && videoStore){
} else if ( ( ! recording ) && wasRecording && videoStore ) {
Info("Deleting videoStore instance");
delete videoStore;
videoStore = NULL;
}
//The directory we are recording to is no longer tied to the current event. Need to re-init the videostore with the correct directory and start recording again
if(recording && wasRecording && (strcmp(oldDirectory, event_file)!=0) && (packet.flags & AV_PKT_FLAG_KEY) ){ //don't open new videostore until we're on a key frame..would this require an offset adjustment for the event as a result?...if we store our key frame location with the event will that be enough?
if ( recording && wasRecording && (strcmp(oldDirectory, event_file) != 0 ) && (packet.flags & AV_PKT_FLAG_KEY) ) {
// don't open new videostore until we're on a key frame..would this require an offset adjustment for the event as a result?...
// if we store our key frame location with the event will that be enough?
Info("Re-starting video storage module");
if(videoStore){
delete videoStore;
@ -597,7 +606,7 @@ int FfmpegCamera::CaptureAndRecord( Image &image, bool recording, char* event_fi
strcpy(oldDirectory, event_file);
}
if(videoStore && recording){
if ( videoStore && recording ) {
//Write the packet to our video store
int ret = videoStore->writeVideoFramePacket(&packet, mFormatContext->streams[mVideoStreamId]);//, &lastKeyframePkt);
if(ret<0){//Less than zero and we skipped a frame
@ -607,32 +616,37 @@ int FfmpegCamera::CaptureAndRecord( Image &image, bool recording, char* event_fi
}
#if HAVE_LIBSWSCALE
if(mConvertContext == NULL) {
mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
if(mConvertContext == NULL)
Fatal( "Unable to create conversion context for %s", mPath.c_str() );
}
if ( mConvertContext == NULL ) {
mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
if ( mConvertContext == NULL )
Fatal( "Unable to create conversion context for %s", mPath.c_str() );
}
if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE
Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" );
#endif // HAVE_LIBSWSCALE
frameCount++;
}
}else if(packet.stream_index == mAudioStreamId){//FIXME best way to copy all other streams
if(videoStore && recording){
//Write the packet to our video store
int ret = videoStore->writeAudioFramePacket(&packet, mFormatContext->streams[packet.stream_index]); //FIXME no relevance of last key frame
if(ret<0){//Less than zero and we skipped a frame
av_free_packet( &packet );
return 0;
}
frameCount++;
} // end if frameComplete
} else if ( packet.stream_index == mAudioStreamId ) { //FIXME best way to copy all other streams
if ( videoStore && recording ) {
if ( record_audio ) {
Debug(4, "Recording audio packet" );
//Write the packet to our video store
int ret = videoStore->writeAudioFramePacket(&packet, mFormatContext->streams[packet.stream_index]); //FIXME no relevance of last key frame
if ( ret < 0 ) {//Less than zero and we skipped a frame
av_free_packet( &packet );
return 0;
}
} else {
Debug(4, "Not recording audio packet" );
}
}
}
av_free_packet( &packet );
}
} // end while ! frameComplete
return (frameCount);
}

4
src/zm_ffmpeg_camera.h Executable file → Normal file
View File

@ -73,7 +73,7 @@ protected:
int64_t startTime;
public:
FfmpegCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture );
FfmpegCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
~FfmpegCamera();
const std::string &Path() const { return( mPath ); }
@ -86,7 +86,7 @@ public:
int PrimeCapture();
int PreCapture();
int Capture( Image &image );
int CaptureAndRecord( Image &image, bool recording, char* event_directory );
int CaptureAndRecord( Image &image, bool recording, char* event_directory );
int PostCapture();
};

View File

@ -34,7 +34,7 @@
#include "zm.h"
#include "zm_file_camera.h"
FileCamera::FileCamera( int p_id, const char *p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture ) : Camera( p_id, FILE_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture )
FileCamera::FileCamera( int p_id, const char *p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) : Camera( p_id, FILE_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio )
{
strncpy( path, p_path, sizeof(path) );
if ( capture )

View File

@ -36,7 +36,7 @@ protected:
char path[PATH_MAX];
public:
FileCamera( int p_id, const char *p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture );
FileCamera( int p_id, const char *p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
~FileCamera();
const char *Path() const { return( path ); }
@ -46,7 +46,7 @@ public:
int PreCapture();
int Capture( Image &image );
int PostCapture();
int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
};
#endif // ZM_FILE_CAMERA_H

View File

@ -61,8 +61,8 @@ void LibvlcUnlockBuffer(void* opaque, void* picture, void *const *planes)
}
}
LibvlcCamera::LibvlcCamera( int p_id, const std::string &p_path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture ) :
Camera( p_id, LIBVLC_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture ),
LibvlcCamera::LibvlcCamera( int p_id, const std::string &p_path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) :
Camera( p_id, LIBVLC_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
mPath( p_path ),
mMethod( p_method ),
mOptions( p_options )

View File

@ -57,7 +57,7 @@ protected:
libvlc_media_player_t *mLibvlcMediaPlayer;
public:
LibvlcCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture );
LibvlcCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
~LibvlcCamera();
const std::string &Path() const { return( mPath ); }
@ -70,7 +70,7 @@ public:
int PrimeCapture();
int PreCapture();
int Capture( Image &image );
int CaptureAndRecord( Image &image, bool recording, char* event_directory );
int CaptureAndRecord( Image &image, bool recording, char* event_directory );
int PostCapture();
};

View File

@ -286,8 +286,26 @@ AVFrame **LocalCamera::capturePictures = 0;
LocalCamera *LocalCamera::last_camera = NULL;
LocalCamera::LocalCamera( int p_id, const std::string &p_device, int p_channel, int p_standard, bool p_v4l_multi_buffer, unsigned int p_v4l_captures_per_frame, const std::string &p_method, int p_width, int p_height, int p_colours, int p_palette, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, unsigned int p_extras) :
Camera( p_id, LOCAL_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture ),
LocalCamera::LocalCamera(
int p_id,
const std::string &p_device,
int p_channel,
int p_standard,
bool p_v4l_multi_buffer,
unsigned int p_v4l_captures_per_frame,
const std::string &p_method,
int p_width,
int p_height,
int p_colours,
int p_palette,
int p_brightness,
int p_contrast,
int p_hue,
int p_colour,
bool p_capture,
bool p_record_audio,
unsigned int p_extras) :
Camera( p_id, LOCAL_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
device( p_device ),
channel( p_channel ),
standard( p_standard ),

View File

@ -116,7 +116,25 @@ protected:
static LocalCamera *last_camera;
public:
LocalCamera( int p_id, const std::string &device, int p_channel, int p_format, bool v4lmultibuffer, unsigned int v4lcapturesperframe, const std::string &p_method, int p_width, int p_height, int p_colours, int p_palette, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, unsigned int p_extras = 0);
LocalCamera(
int p_id,
const std::string &device,
int p_channel,
int p_format,
bool v4lmultibuffer,
unsigned int v4lcapturesperframe,
const std::string &p_method,
int p_width,
int p_height,
int p_colours,
int p_palette,
int p_brightness,
int p_contrast,
int p_hue,
int p_colour,
bool p_capture,
bool p_record_audio,
unsigned int p_extras = 0);
~LocalCamera();
void Initialise();
@ -138,7 +156,7 @@ public:
int PreCapture();
int Capture( Image &image );
int PostCapture();
int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
static bool GetCurrentSettings( const char *device, char *output, int version, bool verbose );
};

7414
src/zm_monitor.cpp Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@ -87,7 +87,7 @@ public:
} State;
protected:
typedef std::set<Zone *> ZoneSet;
typedef std::set<Zone *> ZoneSet;
typedef enum { GET_SETTINGS=0x1, SET_SETTINGS=0x2, RELOAD=0x4, SUSPEND=0x10, RESUME=0x20 } Action;
@ -96,36 +96,36 @@ protected:
/* sizeof(SharedData) expected to be 336 bytes on 32bit and 64bit */
typedef struct
{
uint32_t size; /* +0 */
uint32_t last_write_index; /* +4 */
uint32_t last_read_index; /* +8 */
uint32_t state; /* +12 */
uint32_t last_event; /* +16 */
uint32_t action; /* +20 */
int32_t brightness; /* +24 */
int32_t hue; /* +28 */
int32_t colour; /* +32 */
int32_t contrast; /* +36 */
int32_t alarm_x; /* +40 */
int32_t alarm_y; /* +44 */
uint8_t valid; /* +48 */
uint8_t active; /* +49 */
uint8_t signal; /* +50 */
uint8_t format; /* +51 */
uint32_t imagesize; /* +52 */
uint32_t epadding1; /* +56 */
uint32_t epadding2; /* +60 */
uint32_t size; /* +0 */
uint32_t last_write_index; /* +4 */
uint32_t last_read_index; /* +8 */
uint32_t state; /* +12 */
uint32_t last_event; /* +16 */
uint32_t action; /* +20 */
int32_t brightness; /* +24 */
int32_t hue; /* +28 */
int32_t colour; /* +32 */
int32_t contrast; /* +36 */
int32_t alarm_x; /* +40 */
int32_t alarm_y; /* +44 */
uint8_t valid; /* +48 */
uint8_t active; /* +49 */
uint8_t signal; /* +50 */
uint8_t format; /* +51 */
uint32_t imagesize; /* +52 */
uint32_t epadding1; /* +56 */
uint32_t epadding2; /* +60 */
/*
** This keeps 32bit time_t and 64bit time_t identical and compatible as long as time is before 2038.
** Shared memory layout should be identical for both 32bit and 64bit and is multiples of 16.
*/
union { /* +64 */
time_t last_write_time;
uint64_t extrapad1;
union { /* +64 */
time_t last_write_time;
uint64_t extrapad1;
};
union { /* +72 */
time_t last_read_time;
uint64_t extrapad2;
union { /* +72 */
time_t last_read_time;
uint64_t extrapad2;
};
uint8_t control_state[256]; /* +80 */
@ -134,8 +134,7 @@ protected:
typedef enum { TRIGGER_CANCEL, TRIGGER_ON, TRIGGER_OFF } TriggerState;
/* sizeof(TriggerData) expected to be 560 on 32bit & and 64bit */
typedef struct
{
typedef struct {
uint32_t size;
uint32_t trigger_state;
uint32_t trigger_score;
@ -146,29 +145,26 @@ protected:
} TriggerData;
/* sizeof(Snapshot) expected to be 16 bytes on 32bit and 32 bytes on 64bit */
struct Snapshot
{
struct Snapshot {
struct timeval *timestamp;
Image *image;
void* padding;
};
//TODO: Technically we can't exclude this struct when people don't have avformat as the Memory.pm module doesn't know about avformat
//TODO: Technically we can't exclude this struct when people don't have avformat as the Memory.pm module doesn't know about avformat
#if 1
//sizeOf(VideoStoreData) expected to be 4104 bytes on 32bit and 64bit
typedef struct
{
uint32_t size;
char event_file[4096];
uint32_t recording; //bool arch dependent so use uint32 instead
//uint32_t frameNumber;
//sizeOf(VideoStoreData) expected to be 4104 bytes on 32bit and 64bit
typedef struct {
uint32_t size;
char event_file[4096];
uint32_t recording; //bool arch dependent so use uint32 instead
//uint32_t frameNumber;
} VideoStoreData;
} VideoStoreData;
#endif // HAVE_LIBAVFORMAT
class MonitorLink
{
class MonitorLink {
protected:
unsigned int id;
char name[64];
@ -178,16 +174,16 @@ protected:
#if ZM_MEM_MAPPED
int map_fd;
char mem_file[PATH_MAX];
char mem_file[PATH_MAX];
#else // ZM_MEM_MAPPED
int shm_id;
int shm_id;
#endif // ZM_MEM_MAPPED
off_t mem_size;
off_t mem_size;
unsigned char *mem_ptr;
volatile SharedData *shared_data;
volatile TriggerData *trigger_data;
volatile VideoStoreData *video_store_data;
volatile VideoStoreData *video_store_data;
int last_state;
int last_event;
@ -196,21 +192,17 @@ protected:
MonitorLink( int p_id, const char *p_name );
~MonitorLink();
inline int Id() const
{
inline int Id() const {
return( id );
}
inline const char *Name() const
{
inline const char *Name() const {
return( name );
}
inline bool isConnected() const
{
inline bool isConnected() const {
return( connected );
}
inline time_t getLastConnectTime() const
{
inline time_t getLastConnectTime() const {
return( last_connect_time );
}
@ -227,58 +219,59 @@ protected:
unsigned int id;
char name[64];
unsigned int server_id;
Function function; // What the monitor is doing
bool enabled; // Whether the monitor is enabled or asleep
unsigned int width; // Normally the same as the camera, but not if partly rotated
unsigned int height; // Normally the same as the camera, but not if partly rotated
Function function; // What the monitor is doing
bool enabled; // Whether the monitor is enabled or asleep
unsigned int width; // Normally the same as the camera, but not if partly rotated
unsigned int height; // Normally the same as the camera, but not if partly rotated
bool v4l_multi_buffer;
unsigned int v4l_captures_per_frame;
Orientation orientation; // Whether the image has to be rotated at all
Orientation orientation; // Whether the image has to be rotated at all
unsigned int deinterlacing;
int savejpegspref;
int videowriterpref;
std::string encoderparams;
std::vector<EncoderParameter_t> encoderparamsvec;
bool record_audio; // Whether to store the audio that we receive
int brightness; // The statically saved brightness of the camera
int contrast; // The statically saved contrast of the camera
int hue; // The statically saved hue of the camera
int colour; // The statically saved colour of the camera
char event_prefix[64]; // The prefix applied to event names as they are created
char label_format[64]; // The format of the timestamp on the images
Coord label_coord; // The coordinates of the timestamp on the images
int label_size; // Size of the timestamp on the images
int image_buffer_count; // Size of circular image buffer, at least twice the size of the pre_event_count
int pre_event_buffer_count; // Size of dedicated circular pre event buffer used when analysis is not performed at capturing framerate,
// value is pre_event_count + alarm_frame_count - 1
int warmup_count; // How many images to process before looking for events
int pre_event_count; // How many images to hold and prepend to an alarm event
int post_event_count; // How many unalarmed images must occur before the alarm state is reset
int brightness; // The statically saved brightness of the camera
int contrast; // The statically saved contrast of the camera
int hue; // The statically saved hue of the camera
int colour; // The statically saved colour of the camera
char event_prefix[64]; // The prefix applied to event names as they are created
char label_format[64]; // The format of the timestamp on the images
Coord label_coord; // The coordinates of the timestamp on the images
int label_size; // Size of the timestamp on the images
int image_buffer_count; // Size of circular image buffer, at least twice the size of the pre_event_count
int pre_event_buffer_count; // Size of dedicated circular pre event buffer used when analysis is not performed at capturing framerate,
// value is pre_event_count + alarm_frame_count - 1
int warmup_count; // How many images to process before looking for events
int pre_event_count; // How many images to hold and prepend to an alarm event
int post_event_count; // How many unalarmed images must occur before the alarm state is reset
int stream_replay_buffer; // How many frames to store to support DVR functions, IGNORED from this object, passed directly into zms now
int section_length; // How long events should last in continuous modes
bool adaptive_skip; // Whether to use the newer adaptive algorithm for this monitor
int frame_skip; // How many frames to skip in continuous modes
int motion_frame_skip; // How many frames to skip in motion detection
double analysis_fps; // Target framerate for video analysis
unsigned int analysis_update_delay; // How long we wait before updating analysis parameters
int capture_delay; // How long we wait between capture frames
int alarm_capture_delay; // How long we wait between capture frames when in alarm state
int alarm_frame_count; // How many alarm frames are required before an event is triggered
int fps_report_interval; // How many images should be captured/processed between reporting the current FPS
int ref_blend_perc; // Percentage of new image going into reference image.
int alarm_ref_blend_perc; // Percentage of new image going into reference image during alarm.
bool track_motion; // Whether this monitor tries to track detected motion
Rgb signal_check_colour; // The colour that the camera will emit when no video signal detected
bool embed_exif; // Whether to embed Exif data into each image frame or not
int section_length; // How long events should last in continuous modes
bool adaptive_skip; // Whether to use the newer adaptive algorithm for this monitor
int frame_skip; // How many frames to skip in continuous modes
int motion_frame_skip; // How many frames to skip in motion detection
double analysis_fps; // Target framerate for video analysis
unsigned int analysis_update_delay; // How long we wait before updating analysis parameters
int capture_delay; // How long we wait between capture frames
int alarm_capture_delay; // How long we wait between capture frames when in alarm state
int alarm_frame_count; // How many alarm frames are required before an event is triggered
int fps_report_interval; // How many images should be captured/processed between reporting the current FPS
int ref_blend_perc; // Percentage of new image going into reference image.
int alarm_ref_blend_perc; // Percentage of new image going into reference image during alarm.
bool track_motion; // Whether this monitor tries to track detected motion
Rgb signal_check_colour; // The colour that the camera will emit when no video signal detected
bool embed_exif; // Whether to embed Exif data into each image frame or not
double fps;
Image delta_image;
Image ref_image;
Image alarm_image; // Used in creating analysis images, will be initialized in Analysis
Image write_image; // Used when creating snapshot images
Image alarm_image; // Used in creating analysis images, will be initialized in Analysis
Image write_image; // Used when creating snapshot images
Purpose purpose; // What this monitor has been created to do
Purpose purpose; // What this monitor has been created to do
int event_count;
int image_count;
int ready_count;
@ -290,22 +283,22 @@ protected:
time_t start_time;
time_t last_fps_time;
time_t auto_resume_time;
unsigned int last_motion_score;
unsigned int last_motion_score;
EventCloseMode event_close_mode;
EventCloseMode event_close_mode;
#if ZM_MEM_MAPPED
int map_fd;
char mem_file[PATH_MAX];
char mem_file[PATH_MAX];
#else // ZM_MEM_MAPPED
int shm_id;
int shm_id;
#endif // ZM_MEM_MAPPED
off_t mem_size;
unsigned char *mem_ptr;
SharedData *shared_data;
TriggerData *trigger_data;
VideoStoreData *video_store_data;
VideoStoreData *video_store_data;
Snapshot *image_buffer;
Snapshot next_buffer; /* Used by four field deinterlacing */
@ -329,54 +322,85 @@ protected:
public:
// OurCheckAlarms seems to be unused. Check it on zm_monitor.cpp for more info.
//bool OurCheckAlarms( Zone *zone, const Image *pImage );
Monitor( int p_id, const char *p_name, unsigned int p_server_id, int p_function, bool p_enabled, const char *p_linked_monitors, Camera *p_camera, int p_orientation, unsigned int p_deinterlacing, int p_savejpegs, int p_videowriter, std::string p_encoderparams, const char *p_event_prefix, const char *p_label_format, const Coord &p_label_coord, int label_size, int p_image_buffer_count, int p_warmup_count, int p_pre_event_count, int p_post_event_count, int p_stream_replay_buffer, int p_alarm_frame_count, int p_section_length, int p_frame_skip, int p_motion_frame_skip, double p_analysis_fps, unsigned int p_analysis_update_delay, int p_capture_delay, int p_alarm_capture_delay, int p_fps_report_interval, int p_ref_blend_perc, int p_alarm_ref_blend_perc, bool p_track_motion, Rgb p_signal_check_colour, bool p_embed_exif, Purpose p_purpose, int p_n_zones=0, Zone *p_zones[]=0 );
Monitor(
int p_id,
const char *p_name,
unsigned int p_server_id,
int p_function,
bool p_enabled,
const char *p_linked_monitors,
Camera *p_camera,
int p_orientation,
unsigned int p_deinterlacing,
int p_savejpegs,
int p_videowriter,
std::string p_encoderparams,
bool p_record_audio,
const char *p_event_prefix,
const char *p_label_format,
const Coord &p_label_coord,
int label_size,
int p_image_buffer_count,
int p_warmup_count,
int p_pre_event_count,
int p_post_event_count,
int p_stream_replay_buffer,
int p_alarm_frame_count,
int p_section_length,
int p_frame_skip,
int p_motion_frame_skip,
double p_analysis_fps,
unsigned int p_analysis_update_delay,
int p_capture_delay,
int p_alarm_capture_delay,
int p_fps_report_interval,
int p_ref_blend_perc,
int p_alarm_ref_blend_perc,
bool p_track_motion,
Rgb p_signal_check_colour,
bool p_embed_exif,
Purpose p_purpose,
int p_n_zones=0,
Zone *p_zones[]=0
);
~Monitor();
void AddZones( int p_n_zones, Zone *p_zones[] );
void AddPrivacyBitmask( Zone *p_zones[] );
bool connect();
inline int ShmValid() const
{
inline int ShmValid() const {
return( shared_data->valid );
}
inline int Id() const
{
inline int Id() const {
return( id );
}
inline const char *Name() const
{
inline const char *Name() const {
return( name );
}
inline Function GetFunction() const
{
inline Function GetFunction() const {
return( function );
}
inline bool Enabled()
{
inline bool Enabled() {
if ( function <= MONITOR )
return( false );
return( enabled );
}
inline const char *EventPrefix() const
{
inline const char *EventPrefix() const {
return( event_prefix );
}
inline bool Ready()
{
inline bool Ready() {
if ( function <= MONITOR )
return( false );
return( image_count > ready_count );
}
inline bool Active()
{
inline bool Active() {
if ( function <= MONITOR )
return( false );
return( enabled && shared_data->active );
}
inline bool Exif()
{
inline bool Exif() {
return( embed_exif );
}
@ -417,17 +441,14 @@ public:
int actionColour( int p_colour=-1 );
int actionContrast( int p_contrast=-1 );
inline int PrimeCapture()
{
inline int PrimeCapture() {
return( camera->PrimeCapture() );
}
inline int PreCapture()
{
inline int PreCapture() {
return( camera->PreCapture() );
}
int Capture();
int PostCapture()
{
int PostCapture() {
return( camera->PostCapture() );
}
@ -456,7 +477,7 @@ public:
static int LoadFfmpegMonitors( const char *file, Monitor **&monitors, Purpose purpose );
#endif // HAVE_LIBAVFORMAT
static Monitor *Load( unsigned int id, bool load_zones, Purpose purpose );
//void writeStreamImage( Image *image, struct timeval *timestamp, int scale, int mag, int x, int y );
//void writeStreamImage( Image *image, struct timeval *timestamp, int scale, int mag, int x, int y );
//void StreamImages( int scale=100, int maxfps=10, time_t ttl=0, int msq_id=0 );
//void StreamImagesRaw( int scale=100, int maxfps=10, time_t ttl=0 );
//void StreamImagesZip( int scale=100, int maxfps=10, time_t ttl=0 );
@ -470,53 +491,48 @@ public:
#define MOD_ADD( var, delta, limit ) (((var)+(limit)+(delta))%(limit))
class MonitorStream : public StreamBase
{
class MonitorStream : public StreamBase {
protected:
typedef struct SwapImage {
bool valid;
struct timeval timestamp;
char file_name[PATH_MAX];
} SwapImage;
typedef struct SwapImage {
bool valid;
struct timeval timestamp;
char file_name[PATH_MAX];
} SwapImage;
private:
SwapImage *temp_image_buffer;
int temp_image_buffer_count;
int temp_read_index;
int temp_write_index;
SwapImage *temp_image_buffer;
int temp_image_buffer_count;
int temp_read_index;
int temp_write_index;
protected:
time_t ttl;
time_t ttl;
protected:
int playback_buffer;
bool delayed;
int playback_buffer;
bool delayed;
int frame_count;
int frame_count;
protected:
bool checkSwapPath( const char *path, bool create_path );
bool checkSwapPath( const char *path, bool create_path );
bool sendFrame( const char *filepath, struct timeval *timestamp );
bool sendFrame( Image *image, struct timeval *timestamp );
void processCommand( const CmdMsg *msg );
bool sendFrame( const char *filepath, struct timeval *timestamp );
bool sendFrame( Image *image, struct timeval *timestamp );
void processCommand( const CmdMsg *msg );
public:
MonitorStream() : playback_buffer( 0 ), delayed( false ), frame_count( 0 )
{
}
void setStreamBuffer( int p_playback_buffer )
{
playback_buffer = p_playback_buffer;
}
void setStreamTTL( time_t p_ttl )
{
ttl = p_ttl;
}
bool setStreamStart( int monitor_id )
{
return loadMonitor( monitor_id );
}
MonitorStream() : playback_buffer( 0 ), delayed( false ), frame_count( 0 ) {
}
void setStreamBuffer( int p_playback_buffer ) {
playback_buffer = p_playback_buffer;
}
void setStreamTTL( time_t p_ttl ) {
ttl = p_ttl;
}
bool setStreamStart( int monitor_id ) {
return loadMonitor( monitor_id );
}
void runStream();
};

View File

@ -21,8 +21,23 @@
#include "zm_utils.h"
RemoteCamera::RemoteCamera( int p_id, const std::string &p_protocol, const std::string &p_host, const std::string &p_port, const std::string &p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture ) :
Camera( p_id, REMOTE_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture ),
RemoteCamera::RemoteCamera(
int p_id,
const std::string &p_protocol,
const std::string &p_host,
const std::string &p_port,
const std::string &p_path,
int p_width,
int p_height,
int p_colours,
int p_brightness,
int p_contrast,
int p_hue,
int p_colour,
bool p_capture,
bool p_record_audio
) :
Camera( p_id, REMOTE_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
protocol( p_protocol ),
host( p_host ),
port( p_port ),

View File

@ -55,7 +55,22 @@ protected:
struct addrinfo *hp;
public:
RemoteCamera( int p_id, const std::string &p_proto, const std::string &p_host, const std::string &p_port, const std::string &p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture );
RemoteCamera(
int p_id,
const std::string &p_proto,
const std::string &p_host,
const std::string &p_port,
const std::string &p_path,
int p_width,
int p_height,
int p_colours,
int p_brightness,
int p_contrast,
int p_hue,
int p_colour,
bool p_capture,
bool p_record_audio
);
virtual ~RemoteCamera();
const std::string &Protocol() const { return( protocol ); }
@ -73,7 +88,7 @@ public:
virtual int PreCapture() = 0;
virtual int Capture( Image &image ) = 0;
virtual int PostCapture() = 0;
virtual int CaptureAndRecord( Image &image, bool recording, char* event_directory )=0;
virtual int CaptureAndRecord( Image &image, bool recording, char* event_directory )=0;
};
#endif // ZM_REMOTE_CAMERA_H

View File

@ -31,8 +31,8 @@
#include <sys/filio.h> // FIONREAD and friends
#endif
RemoteCameraHttp::RemoteCameraHttp( int p_id, const std::string &p_method, const std::string &p_host, const std::string &p_port, const std::string &p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture ) :
RemoteCamera( p_id, "http", p_host, p_port, p_path, p_width, p_height, p_colours, p_brightness, p_contrast, p_hue, p_colour, p_capture )
RemoteCameraHttp::RemoteCameraHttp( int p_id, const std::string &p_method, const std::string &p_host, const std::string &p_port, const std::string &p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) :
RemoteCamera( p_id, "http", p_host, p_port, p_path, p_width, p_height, p_colours, p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio )
{
sd = -1;

View File

@ -45,7 +45,7 @@ protected:
enum { SIMPLE, REGEXP } method;
public:
RemoteCameraHttp( int p_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture );
RemoteCameraHttp( int p_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
~RemoteCameraHttp();
void Initialise();
@ -58,7 +58,7 @@ public:
int PreCapture();
int Capture( Image &image );
int PostCapture();
int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
};
#endif // ZM_REMOTE_CAMERA_HTTP_H

View File

@ -28,22 +28,22 @@
#include <sys/types.h>
#include <sys/socket.h>
RemoteCameraRtsp::RemoteCameraRtsp( int p_id, const std::string &p_method, const std::string &p_host, const std::string &p_port, const std::string &p_path, int p_width, int p_height, bool p_rtsp_describe, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture ) :
RemoteCamera( p_id, "rtsp", p_host, p_port, p_path, p_width, p_height, p_colours, p_brightness, p_contrast, p_hue, p_colour, p_capture ),
rtsp_describe( p_rtsp_describe ),
rtspThread( 0 )
RemoteCameraRtsp::RemoteCameraRtsp( int p_id, const std::string &p_method, const std::string &p_host, const std::string &p_port, const std::string &p_path, int p_width, int p_height, bool p_rtsp_describe, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) :
RemoteCamera( p_id, "rtsp", p_host, p_port, p_path, p_width, p_height, p_colours, p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
rtsp_describe( p_rtsp_describe ),
rtspThread( 0 )
{
if ( p_method == "rtpUni" )
method = RtspThread::RTP_UNICAST;
else if ( p_method == "rtpMulti" )
method = RtspThread::RTP_MULTICAST;
else if ( p_method == "rtpRtsp" )
method = RtspThread::RTP_RTSP;
else if ( p_method == "rtpRtspHttp" )
method = RtspThread::RTP_RTSP_HTTP;
else
Fatal( "Unrecognised method '%s' when creating RTSP camera %d", p_method.c_str(), id );
if ( p_method == "rtpUni" )
method = RtspThread::RTP_UNICAST;
else if ( p_method == "rtpMulti" )
method = RtspThread::RTP_MULTICAST;
else if ( p_method == "rtpRtsp" )
method = RtspThread::RTP_RTSP;
else if ( p_method == "rtpRtspHttp" )
method = RtspThread::RTP_RTSP_HTTP;
else
Fatal( "Unrecognised method '%s' when creating RTSP camera %d", p_method.c_str(), id );
if ( capture )
{
@ -52,11 +52,14 @@ RemoteCameraRtsp::RemoteCameraRtsp( int p_id, const std::string &p_method, const
mFormatContext = NULL;
mVideoStreamId = -1;
mAudioStreamId = -1;
mCodecContext = NULL;
mCodec = NULL;
mRawFrame = NULL;
mFrame = NULL;
frameCount = 0;
wasRecording = false;
startTime=0;
#if HAVE_LIBSWSCALE
mConvertContext = NULL;
@ -80,26 +83,26 @@ RemoteCameraRtsp::RemoteCameraRtsp( int p_id, const std::string &p_method, const
RemoteCameraRtsp::~RemoteCameraRtsp()
{
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
av_frame_free( &mFrame );
av_frame_free( &mRawFrame );
av_frame_free( &mFrame );
av_frame_free( &mRawFrame );
#else
av_freep( &mFrame );
av_freep( &mRawFrame );
av_freep( &mFrame );
av_freep( &mRawFrame );
#endif
#if HAVE_LIBSWSCALE
if ( mConvertContext )
{
sws_freeContext( mConvertContext );
mConvertContext = NULL;
}
if ( mConvertContext )
{
sws_freeContext( mConvertContext );
mConvertContext = NULL;
}
#endif
if ( mCodecContext )
{
avcodec_close( mCodecContext );
mCodecContext = NULL; // Freed by avformat_free_context in the destructor of RtspThread class
}
if ( mCodecContext )
{
avcodec_close( mCodecContext );
mCodecContext = NULL; // Freed by avformat_free_context in the destructor of RtspThread class
}
if ( capture )
{
@ -109,106 +112,109 @@ RemoteCameraRtsp::~RemoteCameraRtsp()
void RemoteCameraRtsp::Initialise()
{
RemoteCamera::Initialise();
RemoteCamera::Initialise();
int max_size = width*height*colours;
// This allocates a buffer able to hold a raw fframe, which is a little artbitrary. Might be nice to get some
// decent data on how large a buffer is really needed.
buffer.size( max_size );
if ( logDebugging() )
av_log_set_level( AV_LOG_DEBUG );
else
av_log_set_level( AV_LOG_QUIET );
if ( logDebugging() )
av_log_set_level( AV_LOG_DEBUG );
else
av_log_set_level( AV_LOG_QUIET );
av_register_all();
av_register_all();
Connect();
Connect();
}
void RemoteCameraRtsp::Terminate()
{
Disconnect();
Disconnect();
}
int RemoteCameraRtsp::Connect()
{
rtspThread = new RtspThread( id, method, protocol, host, port, path, auth, rtsp_describe );
rtspThread = new RtspThread( id, method, protocol, host, port, path, auth, rtsp_describe );
rtspThread->start();
rtspThread->start();
return( 0 );
return( 0 );
}
int RemoteCameraRtsp::Disconnect()
{
if ( rtspThread )
{
rtspThread->stop();
rtspThread->join();
delete rtspThread;
rtspThread = 0;
}
return( 0 );
if ( rtspThread )
{
rtspThread->stop();
rtspThread->join();
delete rtspThread;
rtspThread = 0;
}
return( 0 );
}
int RemoteCameraRtsp::PrimeCapture()
{
Debug( 2, "Waiting for sources" );
for ( int i = 0; i < 100 && !rtspThread->hasSources(); i++ )
{
usleep( 100000 );
}
if ( !rtspThread->hasSources() )
Fatal( "No RTSP sources" );
Debug( 2, "Waiting for sources" );
for ( int i = 0; i < 100 && !rtspThread->hasSources(); i++ )
{
usleep( 100000 );
}
if ( !rtspThread->hasSources() )
Fatal( "No RTSP sources" );
Debug( 2, "Got sources" );
Debug( 2, "Got sources" );
mFormatContext = rtspThread->getFormatContext();
mFormatContext = rtspThread->getFormatContext();
// Find first video stream present
mVideoStreamId = -1;
// Find first video stream present
mVideoStreamId = -1;
for ( unsigned int i = 0; i < mFormatContext->nb_streams; i++ )
// Find the first video stream.
for ( unsigned int i = 0; i < mFormatContext->nb_streams; i++ )
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
{
mVideoStreamId = i;
break;
}
if ( mVideoStreamId == -1 )
Fatal( "Unable to locate video stream" );
{
mVideoStreamId = i;
break;
}
if ( mVideoStreamId == -1 )
Fatal( "Unable to locate video stream" );
// Get a pointer to the codec context for the video stream
mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;
// Get a pointer to the codec context for the video stream
mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;
// Find the decoder for the video stream
mCodec = avcodec_find_decoder( mCodecContext->codec_id );
if ( mCodec == NULL )
Panic( "Unable to locate codec %d decoder", mCodecContext->codec_id );
// Find the decoder for the video stream
mCodec = avcodec_find_decoder( mCodecContext->codec_id );
if ( mCodec == NULL )
Panic( "Unable to locate codec %d decoder", mCodecContext->codec_id );
// Open codec
// Open codec
#if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0)
if ( avcodec_open( mCodecContext, mCodec ) < 0 )
if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
Panic( "Can't open codec" );
Panic( "Can't open codec" );
// Allocate space for the native video frame
// Allocate space for the native video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
mRawFrame = av_frame_alloc();
mRawFrame = av_frame_alloc();
#else
mRawFrame = avcodec_alloc_frame();
mRawFrame = avcodec_alloc_frame();
#endif
// Allocate space for the converted video frame
// Allocate space for the converted video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
mFrame = av_frame_alloc();
mFrame = av_frame_alloc();
#else
mFrame = avcodec_alloc_frame();
mFrame = avcodec_alloc_frame();
#endif
if(mRawFrame == NULL || mFrame == NULL)
@ -229,27 +235,25 @@ int RemoteCameraRtsp::PrimeCapture()
}
#else // HAVE_LIBSWSCALE
Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE
*/
return( 0 );
return( 0 );
}
int RemoteCameraRtsp::PreCapture()
{
if ( !rtspThread->isRunning() )
return( -1 );
if ( !rtspThread->hasSources() )
{
Error( "Cannot precapture, no RTP sources" );
return( -1 );
}
return( 0 );
int RemoteCameraRtsp::PreCapture() {
if ( !rtspThread->isRunning() )
return( -1 );
if ( !rtspThread->hasSources() )
{
Error( "Cannot precapture, no RTP sources" );
return( -1 );
}
return( 0 );
}
int RemoteCameraRtsp::Capture( Image &image )
{
int RemoteCameraRtsp::Capture( Image &image ) {
AVPacket packet;
uint8_t* directbuffer;
int frameComplete = false;
@ -261,111 +265,279 @@ int RemoteCameraRtsp::Capture( Image &image )
return (-1);
}
while ( true )
{
buffer.clear();
if ( !rtspThread->isRunning() )
return (-1);
while ( true ) {
buffer.clear();
if ( !rtspThread->isRunning() )
return (-1);
if ( rtspThread->getFrame( buffer ) )
{
Debug( 3, "Read frame %d bytes", buffer.size() );
Debug( 4, "Address %p", buffer.head() );
Hexdump( 4, buffer.head(), 16 );
if ( rtspThread->getFrame( buffer ) ) {
Debug( 3, "Read frame %d bytes", buffer.size() );
Debug( 4, "Address %p", buffer.head() );
Hexdump( 4, buffer.head(), 16 );
if ( !buffer.size() )
return( -1 );
if ( !buffer.size() )
return( -1 );
if(mCodecContext->codec_id == AV_CODEC_ID_H264)
{
// SPS and PPS frames should be saved and appended to IDR frames
int nalType = (buffer.head()[3] & 0x1f);
if(mCodecContext->codec_id == AV_CODEC_ID_H264) {
// SPS and PPS frames should be saved and appended to IDR frames
int nalType = (buffer.head()[3] & 0x1f);
// SPS
if(nalType == 7)
{
lastSps = buffer;
continue;
}
// PPS
else if(nalType == 8)
{
lastPps = buffer;
continue;
}
// IDR
else if(nalType == 5)
{
buffer += lastSps;
buffer += lastPps;
}
}
// SPS The SPS NAL unit contains parameters that apply to a series of consecutive coded video pictures
if(nalType == 7)
{
lastSps = buffer;
continue;
}
// PPS The PPS NAL unit contains parameters that apply to the decoding of one or more individual pictures inside a coded video sequence
else if(nalType == 8)
{
lastPps = buffer;
continue;
}
// IDR
else if(nalType == 5)
{
buffer += lastSps;
buffer += lastPps;
}
}
av_init_packet( &packet );
av_init_packet( &packet );
while ( !frameComplete && buffer.size() > 0 )
{
packet.data = buffer.head();
packet.size = buffer.size();
while ( !frameComplete && buffer.size() > 0 ) {
packet.data = buffer.head();
packet.size = buffer.size();
// So I think this is the magic decode step. Result is a raw image?
#if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet );
#else
int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size );
#endif
if ( len < 0 ) {
Error( "Error while decoding frame %d", frameCount );
Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() );
buffer.clear();
continue;
}
Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() );
//if ( buffer.size() < 400 )
//Hexdump( 0, buffer.head(), buffer.size() );
buffer -= len;
}
// At this point, we either have a frame or ran out of buffer. What happens if we run out of buffer?
if ( frameComplete ) {
Debug( 3, "Got frame %d", frameCount );
avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height );
#if HAVE_LIBSWSCALE
if(mConvertContext == NULL) {
mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
if(mConvertContext == NULL)
Fatal( "Unable to create conversion context");
}
if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE
Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE
frameCount++;
} /* frame complete */
#if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100)
av_packet_unref( &packet );
#else
av_free_packet( &packet );
#endif
} /* getFrame() */
if(frameComplete)
return (0);
} // end while true
// can never get here.
return (0) ;
}
//Function to handle capture and store
int RemoteCameraRtsp::CaptureAndRecord( Image &image, bool recording, char* event_file ) {
AVPacket packet;
uint8_t* directbuffer;
int frameComplete = false;
/* Request a writeable buffer of the target image */
directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
if(directbuffer == NULL) {
Error("Failed requesting writeable buffer for the captured image.");
return (-1);
}
while ( true ) {
buffer.clear();
if ( !rtspThread->isRunning() )
return (-1);
if ( rtspThread->getFrame( buffer ) ) {
Debug( 3, "Read frame %d bytes", buffer.size() );
Debug( 4, "Address %p", buffer.head() );
Hexdump( 4, buffer.head(), 16 );
if ( !buffer.size() )
return( -1 );
if(mCodecContext->codec_id == AV_CODEC_ID_H264) {
// SPS and PPS frames should be saved and appended to IDR frames
int nalType = (buffer.head()[3] & 0x1f);
// SPS
if(nalType == 7) {
lastSps = buffer;
continue;
}
// PPS
else if(nalType == 8) {
lastPps = buffer;
continue;
}
// IDR
else if(nalType == 5) {
buffer += lastSps;
buffer += lastPps;
}
} // end if H264, what about other codecs?
av_init_packet( &packet );
// Why are we checking for it being the video stream
if ( packet.stream_index == mVideoStreamId ) {
while ( !frameComplete && buffer.size() > 0 ) {
packet.data = buffer.head();
packet.size = buffer.size();
// So this does the decode
#if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet );
int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet );
#else
int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size );
int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size );
#endif
if ( len < 0 )
{
Error( "Error while decoding frame %d", frameCount );
Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() );
buffer.clear();
continue;
}
Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() );
//if ( buffer.size() < 400 )
//Hexdump( 0, buffer.head(), buffer.size() );
if ( len < 0 ) {
Error( "Error while decoding frame %d", frameCount );
Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() );
buffer.clear();
continue;
}
Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() );
//if ( buffer.size() < 400 )
//Hexdump( 0, buffer.head(), buffer.size() );
buffer -= len;
buffer -= len;
} // end while get & decode a frame
}
if ( frameComplete ) {
if ( frameComplete ) {
Debug( 3, "Got frame %d", frameCount );
Debug( 3, "Got frame %d", frameCount );
avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height );
//Video recording
if ( recording && !wasRecording ) {
//Instantiate the video storage module
videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime);
wasRecording = true;
strcpy(oldDirectory, event_file);
} else if ( !recording && wasRecording && videoStore ) {
// Why are we deleting the videostore? Becase for soem reason we are no longer recording? How does that happen?
Info("Deleting videoStore instance");
delete videoStore;
videoStore = NULL;
}
//The directory we are recording to is no longer tied to the current event. Need to re-init the videostore with the correct directory and start recording again
if ( recording && wasRecording && (strcmp(oldDirectory, event_file)!=0) && (packet.flags & AV_PKT_FLAG_KEY) ) {
//don't open new videostore until we're on a key frame..would this require an offset adjustment for the event as a result?...if we store our key frame location with the event will that be enough?
Info("Re-starting video storage module");
if ( videoStore ) {
delete videoStore;
videoStore = NULL;
}
videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime);
strcpy( oldDirectory, event_file );
}
if ( videoStore && recording ) {
//Write the packet to our video store
int ret = videoStore->writeVideoFramePacket(&packet, mFormatContext->streams[mVideoStreamId]);//, &lastKeyframePkt);
if ( ret < 0 ) {//Less than zero and we skipped a frame
av_free_packet( &packet );
return 0;
}
}
#if HAVE_LIBSWSCALE
if(mConvertContext == NULL) {
mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
if(mConvertContext == NULL) {
mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
if(mConvertContext == NULL)
Fatal( "Unable to create conversion context");
}
if(mConvertContext == NULL)
Fatal( "Unable to create conversion context");
}
if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE
Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE
frameCount++;
frameCount++;
} /* frame complete */
} /* frame complete */
} else if ( packet.stream_index == mAudioStreamId ) {
Debug( 4, "Got audio packet" );
if ( videoStore && recording ) {
if ( record_audio ) {
Debug( 4, "Storing Audio packet" );
//Write the packet to our video store
int ret = videoStore->writeAudioFramePacket(&packet, mFormatContext->streams[packet.stream_index]); //FIXME no relevance of last key frame
if ( ret < 0 ) { //Less than zero and we skipped a frame
#if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100)
av_packet_unref( &packet );
#else
av_free_packet( &packet );
#endif
return 0;
}
} else {
Debug( 4, "Not storing audio" );
}
}
} // end if video or audio packet
#if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100)
av_packet_unref( &packet);
av_packet_unref( &packet );
#else
av_free_packet( &packet );
av_free_packet( &packet );
#endif
} /* getFrame() */
} /* getFrame() */
if(frameComplete)
return (0);
}
return (0) ;
}
if(frameComplete)
return (0);
} // end while true
return (0) ;
} // int RemoteCameraRtsp::CaptureAndRecord( Image &image, bool recording, char* event_file )
int RemoteCameraRtsp::PostCapture()
{
return( 0 );
return( 0 );
}
#endif // HAVE_LIBAVFORMAT

View File

@ -26,6 +26,7 @@
#include "zm_utils.h"
#include "zm_rtsp.h"
#include "zm_ffmpeg.h"
#include "zm_videostore.h"
//
// Class representing 'rtsp' cameras, i.e. those which are
@ -40,11 +41,11 @@ protected:
int rtsp_sd;
int rtp_sd;
int rtcp_sd;
bool rtsp_describe;
bool rtsp_describe;
Buffer buffer;
Buffer lastSps;
Buffer lastPps;
Buffer lastSps;
Buffer lastPps;
RtspThread::RtspMethod method;
@ -55,31 +56,36 @@ protected:
#if HAVE_LIBAVFORMAT
AVFormatContext *mFormatContext;
int mVideoStreamId;
int mAudioStreamId;
AVCodecContext *mCodecContext;
AVCodec *mCodec;
AVFrame *mRawFrame;
AVFrame *mFrame;
_AVPIXELFORMAT imagePixFormat;
#endif // HAVE_LIBAVFORMAT
bool wasRecording;
VideoStore *videoStore;
char oldDirectory[4096];
int64_t startTime;
#if HAVE_LIBSWSCALE
struct SwsContext *mConvertContext;
#endif
public:
RemoteCameraRtsp( int p_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, bool p_rtsp_describe, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture );
RemoteCameraRtsp( int p_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, bool p_rtsp_describe, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
~RemoteCameraRtsp();
void Initialise();
void Terminate();
int Connect();
int Disconnect();
int Connect();
int Disconnect();
int PrimeCapture();
int PreCapture();
int Capture( Image &image );
int PostCapture();
int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
int CaptureAndRecord( Image &image, bool recording, char* event_directory );
};
#endif // ZM_REMOTE_CAMERA_RTSP_H

View File

@ -262,6 +262,11 @@ bool RtpSource::handlePacket( const unsigned char *packet, size_t packetLen )
int rtpHeaderSize = 12 + rtpHeader->cc * 4;
// No need to check for nal type as non fragmented packets already have 001 start sequence appended
bool h264FragmentEnd = (mCodecId == AV_CODEC_ID_H264) && (packet[rtpHeaderSize+1] & 0x40);
// M stands for Market, it is the 8th bit
// The interpretation of the marker is defined by a profile. It is intended
// to allow significant events such as frame boundaries to be marked in the
// packet stream. A profile may define additional marker bits or specify
// that there is no marker bit by changing the number of bits in the payload type field.
bool thisM = rtpHeader->m || h264FragmentEnd;
if ( updateSeq( ntohs(rtpHeader->seqN) ) )
@ -275,15 +280,18 @@ bool RtpSource::handlePacket( const unsigned char *packet, size_t packetLen )
if( mCodecId == AV_CODEC_ID_H264 )
{
int nalType = (packet[rtpHeaderSize] & 0x1f);
Debug( 3, "Have H264 frame: nal type is %d", nalType );
switch (nalType)
{
case 24:
case 24: // STAP-A
{
extraHeader = 2;
break;
}
case 25: case 26: case 27:
case 25: // STAP-B
case 26: // MTAP-16
case 27: // MTAP-24
{
extraHeader = 3;
break;
@ -304,6 +312,9 @@ bool RtpSource::handlePacket( const unsigned char *packet, size_t packetLen )
extraHeader = 2;
break;
}
default: {
Debug(3, "Unhandled nalType %d", nalType );
}
}
// Append NAL frame start code
@ -311,6 +322,8 @@ bool RtpSource::handlePacket( const unsigned char *packet, size_t packetLen )
mFrame.append( "\x0\x0\x1", 3 );
}
mFrame.append( packet+rtpHeaderSize+extraHeader, packetLen-rtpHeaderSize-extraHeader );
} else {
Debug( 3, "NOT H264 frame: type is %d", mCodecId );
}
Hexdump( 4, mFrame.head(), 16 );

View File

@ -64,7 +64,7 @@ X264MP4Writer::X264MP4Writer(const char* p_path, const unsigned int p_width, con
if(zm_pf == 0) {
Error("Unable to match ffmpeg pixelformat");
}
codec_pf = AV_PIX_FMT_YUV420P;
codec_pf = PIX_FMT_YUV420P;
swscaleobj.SetDefaults(zm_pf, codec_pf, width, height);

View File

@ -120,8 +120,8 @@ protected:
/* SWScale */
SWScale swscaleobj;
enum _AVPIXELFORMAT zm_pf;
enum _AVPIXELFORMAT codec_pf;
enum PixelFormat zm_pf;
enum PixelFormat codec_pf;
size_t codec_imgsize;
size_t zm_imgsize;

0
src/zm_videostore.cpp Executable file → Normal file
View File

0
src/zm_videostore.h Executable file → Normal file
View File

View File

@ -1 +1 @@
1.29.0
1.30.0

View File

@ -1,33 +0,0 @@
# Define the line ending behavior of the different file extensions
# Set default behaviour, in case users don't have core.autocrlf set.
* text=auto
# Explicitly declare text files we want to always be normalized and converted
# to native line endings on checkout.
*.php text
*.default text
*.ctp text
*.sql text
*.md text
*.po text
*.js text
*.css text
*.ini text
*.properties text
*.txt text
*.xml text
*.yml text
.htaccess text
# Declare files that will always have CRLF line endings on checkout.
*.bat eol=crlf
# Declare files that will always have LF line endings on checkout.
*.pem eol=lf
# Denote all files that are truly binary and should not be modified.
*.png binary
*.jpg binary
*.gif binary
*.ico binary
*.mo binary

21
web/api/.gitignore vendored
View File

@ -1,21 +0,0 @@
# User specific & automatically generated files #
#################################################
/app/Config/database.php
/app/tmp
/lib/Cake/Console/Templates/skel/tmp/
/plugins
/vendors
/build
/dist
/tags
# OS generated files #
######################
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
Icon?
ehthumbs.db
Thumbs.db

View File

@ -1153,30 +1153,38 @@ function zmaCheck( $monitor )
function getImageSrc( $event, $frame, $scale=SCALE_BASE, $captureOnly=false, $overwrite=false )
{
$eventPath = getEventPath( $event );
$eventPath = ZM_DIR_EVENTS.'/'.getEventPath( $event );
if ( !is_array($frame) )
$frame = array( 'FrameId'=>$frame, 'Type'=>'' );
//echo "S:$scale, CO:$captureOnly<br>";
$currEvent = dbFetchOne( 'SELECT M.SaveJPEGs FROM Events AS E INNER JOIN Monitors AS M ON E.MonitorId = M.Id WHERE E.Id = '.$event['Id'] );
if ( $currEvent['SaveJPEGs'] == "4" )
if ( file_exists( $eventPath.'/snapshot.jpg' ) ) {
$captImage = "snapshot.jpg";
else
} else {
$captImage = sprintf( "%0".ZM_EVENT_IMAGE_DIGITS."d-capture.jpg", $frame['FrameId'] );
if ( ! file_exists( $eventPath.'/'.$captImage ) ) {
# Generate the frame JPG
if ( $event['DefaultVideo'] ) {
$command ='ffmpeg -v 0 -i '.$eventPath.'/'.$Event->DefaultVideo().' -vf "select=gte(n\\,'.$frame['FrameId'].'),setpts=PTS-STARTPTS" '.$eventPath.'/'.$captImage;
system( $command, $output, $retval );
} else {
Error("Can't create frame images from video becuase there is no video file for this event " );
}
}
}
$captPath = $eventPath.'/'.$captImage;
$thumbCaptPath = ZM_DIR_IMAGES.'/'.$event['Id'].'-'.$captImage;
//echo "CI:$captImage, CP:$captPath, TCP:$thumbCaptPath<br>";
$analImage = sprintf( "%0".ZM_EVENT_IMAGE_DIGITS."d-analyse.jpg", $frame['FrameId'] );
$analPath = $eventPath.'/'.$analImage;
$analFile = ZM_DIR_EVENTS."/".$analPath;
$thumbAnalPath = ZM_DIR_IMAGES.'/'.$event['Id'].'-'.$analImage;
//echo "AI:$analImage, AP:$analPath, TAP:$thumbAnalPath<br>";
$alarmFrame = $frame['Type']=='Alarm';
$hasAnalImage = $alarmFrame && file_exists( $analFile ) && filesize( $analFile );
$hasAnalImage = $alarmFrame && file_exists( $analPath ) && filesize( $analPath );
$isAnalImage = $hasAnalImage && !$captureOnly;
if ( !ZM_WEB_SCALE_THUMBS || $scale >= SCALE_BASE || !function_exists( 'imagecreatefromjpeg' ) )
@ -1207,22 +1215,20 @@ function getImageSrc( $event, $frame, $scale=SCALE_BASE, $captureOnly=false, $ov
$thumbPath = $thumbCaptPath;
}
$imageFile = ZM_DIR_EVENTS."/".$imagePath;
//$thumbFile = ZM_DIR_EVENTS."/".$thumbPath;
$thumbFile = $thumbPath;
if ( $overwrite || !file_exists( $thumbFile ) || !filesize( $thumbFile ) )
{
// Get new dimensions
list( $imageWidth, $imageHeight ) = getimagesize( $imageFile );
list( $imageWidth, $imageHeight ) = getimagesize( $imagePath );
$thumbWidth = $imageWidth * $fraction;
$thumbHeight = $imageHeight * $fraction;
// Resample
$thumbImage = imagecreatetruecolor( $thumbWidth, $thumbHeight );
$image = imagecreatefromjpeg( $imageFile );
$image = imagecreatefromjpeg( $imagePath );
imagecopyresampled( $thumbImage, $image, 0, 0, 0, 0, $thumbWidth, $thumbHeight, $imageWidth, $imageHeight );
if ( !imagejpeg( $thumbImage, $thumbFile ) )
if ( !imagejpeg( $thumbImage, $thumbPath ) )
Error( "Can't create thumbnail '$thumbPath'" );
}
}
@ -1231,15 +1237,13 @@ function getImageSrc( $event, $frame, $scale=SCALE_BASE, $captureOnly=false, $ov
'eventPath' => $eventPath,
'imagePath' => $imagePath,
'thumbPath' => $thumbPath,
'imageFile' => $imageFile,
'imageFile' => $imagePath,
'thumbFile' => $thumbFile,
'imageClass' => $alarmFrame?"alarm":"normal",
'isAnalImage' => $isAnalImage,
'hasAnalImage' => $hasAnalImage,
);
//echo "IP:$imagePath<br>";
//echo "TP:$thumbPath<br>";
return( $imageData );
}

View File

@ -584,6 +584,7 @@ $SLANG = array(
'Protocol' => 'Protocol',
'Rate' => 'Rate',
'RecaptchaWarning' => 'Your reCaptcha secret key is invalid. Please correct it, or reCaptcha will not work', // added Sep 24 2015 - PP
'RecordAudio' => 'Whether to store the audio stream when saving an event.',
'Real' => 'Real',
'Record' => 'Record',
'RefImageBlendPct' => 'Reference Image Blend %ge',

View File

@ -55,7 +55,7 @@ $replayModes = array(
if ( isset( $_REQUEST['streamMode'] ) )
$streamMode = validHtmlStr($_REQUEST['streamMode']);
else
$streamMode = video;
$streamMode = 'video';
if ( isset( $_REQUEST['replayMode'] ) )
$replayMode = validHtmlStr($_REQUEST['replayMode']);

View File

@ -86,6 +86,7 @@ if ( ! empty($_REQUEST['mid']) ) {
'SaveJPEGs' => "3",
'VideoWriter' => "0",
'EncoderParameters' => "# Lines beginning with # are a comment \n# For changing quality, use the crf option\n# 1 is best, 51 is worst quality\n#crf=23\n",
'RecordAudio' => "0",
'LabelFormat' => '%N - %d/%m/%y %H:%M:%S',
'LabelX' => 0,
'LabelY' => 0,
@ -593,6 +594,7 @@ if ( $tab != 'storage' )
<input type="hidden" name="newMonitor[SaveJPEGs]" value="<?php echo validHtmlStr($newMonitor['SaveJPEGs']) ?>"/>
<input type="hidden" name="newMonitor[VideoWriter]" value="<?php echo validHtmlStr($newMonitor['VideoWriter']) ?>"/>
<input type="hidden" name="newMonitor[EncoderParameters]" value="<?php echo validHtmlStr($newMonitor['EncoderParameters']) ?>"/>
<input type="hidden" name="newMonitor[RecordAudio]" value="<?php echo validHtmlStr($newMonitor['RecordAudio']) ?>"/>
<?php
}
if ( $tab != 'source' || ($newMonitor['Type'] != 'Remote' && $newMonitor['Protocol'] != 'RTSP'))
@ -891,6 +893,7 @@ switch ( $tab )
<tr><td><?php echo translate('SaveJPEGs') ?></td><td><select name="newMonitor[SaveJPEGs]"><?php foreach ( $savejpegopts as $name => $value ) { ?><option value="<?php echo $value ?>"<?php if ( $value == $newMonitor['SaveJPEGs'] ) { ?> selected="selected"<?php } ?>><?php echo $name ?></option><?php } ?></select></td></tr>
<tr><td><?php echo translate('VideoWriter') ?></td><td><select name="newMonitor[VideoWriter]"><?php foreach ( $videowriteropts as $name => $value ) { ?><option value="<?php echo $value ?>"<?php if ( $value == $newMonitor['VideoWriter'] ) { ?> selected="selected"<?php } ?>><?php echo $name ?></option><?php } ?></select></td></tr>
<tr><td><?php echo translate('OptionalEncoderParam') ?></td><td><textarea name="newMonitor[EncoderParameters]" rows="4" cols="36"><?php echo validHtmlStr($newMonitor['EncoderParameters']) ?></textarea></td></tr>
<tr><td><?php echo translate('RecordAudio') ?></td><td><input type="checkbox" name="newMonitor[RecordAudio]" value="1"<?php if ( !empty($newMonitor['RecordAudio']) ) { ?> checked="checked"<?php } ?>/></td></tr>
<?php
break;
case 'timestamp' :