Merge branch 'zma_to_thread' into put_ffmpeg_into_event_wip
This commit is contained in:
commit
ecc68da0fd
|
@ -195,6 +195,7 @@ CREATE TABLE `Events` (
|
|||
`Frames` int(10) unsigned default NULL,
|
||||
`AlarmFrames` int(10) unsigned default NULL,
|
||||
`DefaultVideo` VARCHAR( 64 ) DEFAULT '' NOT NULL,
|
||||
`SaveJPEGs` TINYINT,
|
||||
`TotScore` int(10) unsigned NOT NULL default '0',
|
||||
`AvgScore` smallint(5) unsigned default '0',
|
||||
`MaxScore` smallint(5) unsigned default '0',
|
||||
|
@ -384,8 +385,8 @@ CREATE TABLE `Monitors` (
|
|||
`Deinterlacing` int(10) unsigned NOT NULL default '0',
|
||||
`SaveJPEGs` TINYINT NOT NULL DEFAULT '3' ,
|
||||
`VideoWriter` TINYINT NOT NULL DEFAULT '0',
|
||||
`OutputCodec` enum('h264','mjpeg'),
|
||||
`OutputContainer` enum('mp4','mkv'),
|
||||
`OutputCodec` enum('h264','mjpeg','mpeg1','mpeg2'),
|
||||
`OutputContainer` enum('auto','mp4','mkv'),
|
||||
`EncoderParameters` TEXT,
|
||||
`RecordAudio` TINYINT NOT NULL DEFAULT '0',
|
||||
`RTSPDescribe` tinyint(1) unsigned,
|
||||
|
|
|
@ -67,3 +67,4 @@ SET @s = (SELECT IF(
|
|||
|
||||
PREPARE stmt FROM @s;
|
||||
EXECUTE stmt;
|
||||
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
ALTER TABLE `Monitors` MODIFY `OutputCodec` enum('h264','mjpeg','mpeg1','mpeg2') default 'h264';
|
||||
ALTER TABLE `Monitors` MODIFY `OutputContainer` enum('auto','mp4','mkv') default 'auto';
|
||||
|
||||
SET @s = (SELECT IF(
|
||||
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = DATABASE()
|
||||
AND table_name = 'Events'
|
||||
AND column_name = 'SaveJPEGs'
|
||||
) > 0,
|
||||
"SELECT 'Column SaveJPEGs already exists in Events'",
|
||||
"ALTER TABLE `Events` ADD `SaveJPEGs` TINYINT AFTER `DefaultVideo`"
|
||||
));
|
||||
|
||||
PREPARE stmt FROM @s;
|
||||
EXECUTE stmt;
|
|
@ -115,7 +115,7 @@ BEGIN {
|
|||
, $Config{ZM_DB_USER}
|
||||
, $Config{ZM_DB_PASS}
|
||||
) or croak( "Can't connect to db" );
|
||||
my $sql = 'select * from Config';
|
||||
my $sql = 'SELECT Name,Value FROM Config';
|
||||
my $sth = $dbh->prepare_cached( $sql ) or croak( "Can't prepare '$sql': ".$dbh->errstr() );
|
||||
my $res = $sth->execute() or croak( "Can't execute: ".$sth->errstr() );
|
||||
while( my $config = $sth->fetchrow_hashref() ) {
|
||||
|
|
|
@ -310,25 +310,27 @@ sub set {
|
|||
my %defaults = eval('%'.$type.'::defaults');
|
||||
if ( ref $params ne 'HASH' ) {
|
||||
my ( $caller, undef, $line ) = caller;
|
||||
$openprint::log->error("$type -> set called with non-hash params from $caller $line");
|
||||
$log->error("$type -> set called with non-hash params from $caller $line");
|
||||
}
|
||||
|
||||
foreach my $field ( keys %fields ) {
|
||||
$log->debug("field: $field, param: ".$$params{$field}) if $debug;
|
||||
if ( exists $$params{$field} ) {
|
||||
$openprint::log->debug("field: $field, $$self{$field} =? param: ".$$params{$field}) if $debug;
|
||||
if ( ( ! defined $$self{$field} ) or ($$self{$field} ne $params->{$field}) ) {
|
||||
if ( $params ) {
|
||||
$log->debug("field: $field, param: ".$$params{$field}) if $debug;
|
||||
if ( exists $$params{$field} ) {
|
||||
$log->debug("field: $field, $$self{$field} =? param: ".$$params{$field}) if $debug;
|
||||
if ( ( ! defined $$self{$field} ) or ($$self{$field} ne $params->{$field}) ) {
|
||||
# Only make changes to fields that have changed
|
||||
if ( defined $fields{$field} ) {
|
||||
$$self{$field} = $$params{$field} if defined $fields{$field};
|
||||
push @set_fields, $fields{$field}, $$params{$field}; #mark for sql updating
|
||||
} # end if
|
||||
$openprint::log->debug("Running $field with $$params{$field}") if $debug;
|
||||
if ( my $func = $self->can( $field ) ) {
|
||||
$func->( $self, $$params{$field} );
|
||||
} # end if
|
||||
} # end if
|
||||
} # end if
|
||||
if ( defined $fields{$field} ) {
|
||||
$$self{$field} = $$params{$field} if defined $fields{$field};
|
||||
push @set_fields, $fields{$field}, $$params{$field}; #mark for sql updating
|
||||
} # end if
|
||||
$log->debug("Running $field with $$params{$field}") if $debug;
|
||||
if ( my $func = $self->can( $field ) ) {
|
||||
$func->( $self, $$params{$field} );
|
||||
} # end if
|
||||
} # end if
|
||||
} # end if
|
||||
} # end if $params
|
||||
|
||||
if ( defined $fields{$field} ) {
|
||||
if ( $$self{$field} ) {
|
||||
|
|
|
@ -94,7 +94,7 @@ delete @ENV{qw(IFS CDPATH ENV BASH_ENV)};
|
|||
|
||||
my @daemons = (
|
||||
'zmc',
|
||||
'zma',
|
||||
#'zma',
|
||||
'zmfilter.pl',
|
||||
'zmaudit.pl',
|
||||
'zmtrigger.pl',
|
||||
|
|
|
@ -251,7 +251,7 @@ if ( $command =~ /^(?:start|restart)$/ )
|
|||
}
|
||||
if ( $monitor->{Function} ne 'Monitor' )
|
||||
{
|
||||
runCommand( "zmdc.pl start zma -m $monitor->{Id}" );
|
||||
#runCommand( "zmdc.pl start zma -m $monitor->{Id}" );
|
||||
}
|
||||
if ( $Config{ZM_OPT_CONTROL} )
|
||||
{
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
configure_file(zm_config.h.in "${CMAKE_CURRENT_BINARY_DIR}/zm_config.h" @ONLY)
|
||||
|
||||
# Group together all the source files that are used by all the binaries (zmc, zma, zmu, zms etc)
|
||||
set(ZM_BIN_SRC_FILES zm_box.cpp zm_buffer.cpp zm_camera.cpp zm_comms.cpp zm_config.cpp zm_coord.cpp zm_curl_camera.cpp zm.cpp zm_db.cpp zm_logger.cpp zm_event.cpp zm_eventstream.cpp zm_exception.cpp zm_file_camera.cpp zm_ffmpeg_input.cpp zm_ffmpeg_camera.cpp zm_image.cpp zm_jpeg.cpp zm_libvlc_camera.cpp zm_local_camera.cpp zm_monitor.cpp zm_monitorstream.cpp zm_ffmpeg.cpp zm_mpeg.cpp zm_packet.cpp zm_packetqueue.cpp zm_poly.cpp zm_regexp.cpp zm_remote_camera.cpp zm_remote_camera_http.cpp zm_remote_camera_nvsocket.cpp zm_remote_camera_rtsp.cpp zm_rtp.cpp zm_rtp_ctrl.cpp zm_rtp_data.cpp zm_rtp_source.cpp zm_rtsp.cpp zm_rtsp_auth.cpp zm_sdp.cpp zm_signal.cpp zm_stream.cpp zm_swscale.cpp zm_thread.cpp zm_time.cpp zm_timer.cpp zm_user.cpp zm_utils.cpp zm_video.cpp zm_videostore.cpp zm_zone.cpp zm_storage.cpp)
|
||||
set(ZM_BIN_SRC_FILES zm_analysis_thread.cpp zm_box.cpp zm_buffer.cpp zm_camera.cpp zm_comms.cpp zm_config.cpp zm_coord.cpp zm_curl_camera.cpp zm.cpp zm_db.cpp zm_logger.cpp zm_event.cpp zm_eventstream.cpp zm_exception.cpp zm_file_camera.cpp zm_ffmpeg_camera.cpp zm_image.cpp zm_jpeg.cpp zm_libvlc_camera.cpp zm_local_camera.cpp zm_monitor.cpp zm_monitorstream.cpp zm_ffmpeg.cpp zm_ffmpeg_input.cpp zm_mpeg.cpp zm_packet.cpp zm_packetqueue.cpp zm_poly.cpp zm_regexp.cpp zm_remote_camera.cpp zm_remote_camera_http.cpp zm_remote_camera_nvsocket.cpp zm_remote_camera_rtsp.cpp zm_rtp.cpp zm_rtp_ctrl.cpp zm_rtp_data.cpp zm_rtp_source.cpp zm_rtsp.cpp zm_rtsp_auth.cpp zm_sdp.cpp zm_signal.cpp zm_stream.cpp zm_swscale.cpp zm_thread.cpp zm_time.cpp zm_timer.cpp zm_user.cpp zm_utils.cpp zm_video.cpp zm_videostore.cpp zm_zone.cpp zm_storage.cpp)
|
||||
|
||||
# A fix for cmake recompiling the source files for every target.
|
||||
add_library(zm STATIC ${ZM_BIN_SRC_FILES})
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
snprintf( swap_path, sizeof(swap_path), "%s/zmswap-m%d/zmswap-q%06d", staticConfig.PATH_SWAP.c_str(), monitor->Id(), connkey );
|
||||
|
||||
int len = snprintf(NULL, 0, "/zmswap-m%d", monitor->Id());
|
||||
|
||||
|
||||
int swap_path_length = strlen(staticConfig.PATH_SWAP.c_str()) + snprintf(NULL, 0, "/zmswap-m%d", monitor->Id() ) + snprintf(NULL, 0, "/zmswap-q%06d", connkey ) + 1; // +1 for NULL terminator
|
||||
|
||||
if ( connkey && playback_buffer > 0 ) {
|
||||
|
||||
if ( swap_path_length + max_swap_len_suffix > PATH_MAX ) {
|
||||
Error( "Swap Path is too long. %d > %d ", swap_path_length+max_swap_len_suffix, PATH_MAX );
|
||||
} else {
|
||||
swap_path = (char *)malloc( swap_path_length+max_swap_len_suffix );
|
||||
Debug( 3, "Checking swap image path %s", staticConfig.PATH_SWAP.c_str() );
|
||||
strncpy( swap_path, staticConfig.PATH_SWAP.c_str(), swap_path_length );
|
||||
if ( checkSwapPath( swap_path, false ) ) {
|
||||
snprintf( &(swap_path[swap_path_length]), max_swap_len_suffix, "/zmswap-m%d", monitor->Id() );
|
||||
if ( checkSwapPath( swap_path, true ) ) {
|
||||
snprintf( &(swap_path[swap_path_length]), max_swap_len_suffix, "/zmswap-q%06d", connkey );
|
||||
if ( checkSwapPath( swap_path, true ) ) {
|
||||
buffered_playback = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
#include "zm_analysis_thread.h"
|
||||
|
||||
AnalysisThread::AnalysisThread(Monitor *p_monitor) {
|
||||
monitor = p_monitor;
|
||||
terminate = false;
|
||||
//sigemptyset(&block_set);
|
||||
}
|
||||
|
||||
AnalysisThread::~AnalysisThread() {
|
||||
Debug(2, "THREAD: deleteing");
|
||||
}
|
||||
|
||||
int AnalysisThread::run() {
|
||||
|
||||
useconds_t analysis_rate = monitor->GetAnalysisRate();
|
||||
unsigned int analysis_update_delay = monitor->GetAnalysisUpdateDelay();
|
||||
time_t last_analysis_update_time, cur_time;
|
||||
monitor->UpdateAdaptiveSkip();
|
||||
last_analysis_update_time = time(0);
|
||||
|
||||
Debug(2, "THREAD: Getting ref image");
|
||||
monitor->get_ref_image();
|
||||
|
||||
while( !terminate ) {
|
||||
// Process the next image
|
||||
//sigprocmask(SIG_BLOCK, &block_set, 0);
|
||||
|
||||
// Some periodic updates are required for variable capturing framerate
|
||||
if ( analysis_update_delay ) {
|
||||
cur_time = time( 0 );
|
||||
if ( (unsigned int)( cur_time - last_analysis_update_time ) > analysis_update_delay ) {
|
||||
analysis_rate = monitor->GetAnalysisRate();
|
||||
monitor->UpdateAdaptiveSkip();
|
||||
last_analysis_update_time = cur_time;
|
||||
}
|
||||
}
|
||||
|
||||
if ( !monitor->Analyse() ) {
|
||||
Debug(4, "Sleeping for %d", monitor->Active()?ZM_SAMPLE_RATE:ZM_SUSPENDED_RATE);
|
||||
usleep(monitor->Active()?ZM_SAMPLE_RATE:ZM_SUSPENDED_RATE);
|
||||
} else if ( analysis_rate ) {
|
||||
Debug(4, "Sleeping for %d", analysis_rate);
|
||||
usleep(analysis_rate);
|
||||
}
|
||||
|
||||
//sigprocmask(SIG_UNBLOCK, &block_set, 0);
|
||||
} // end while ! terminate
|
||||
return 0;
|
||||
} // end in AnalysisThread::run()
|
|
@ -0,0 +1,29 @@
|
|||
#ifndef ZM_ANALYSIS_THREAD_H
|
||||
#define ZM_ANALYSIS_THREAD_H
|
||||
|
||||
#include "zm_thread.h"
|
||||
#include <signal.h>
|
||||
|
||||
#include "zm_monitor.h"
|
||||
|
||||
class AnalysisThread : public Thread {
|
||||
private:
|
||||
bool terminate;
|
||||
sigset_t block_set;
|
||||
Monitor *monitor;
|
||||
|
||||
public:
|
||||
AnalysisThread( Monitor * );
|
||||
~AnalysisThread();
|
||||
int run();
|
||||
|
||||
void stop() {
|
||||
terminate = true;
|
||||
}
|
||||
bool stopped() const {
|
||||
return( terminate );
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
#endif
|
|
@ -107,8 +107,8 @@ public:
|
|||
virtual int PreCapture()=0;
|
||||
virtual int Capture(ZMPacket &p)=0;
|
||||
virtual int PostCapture()=0;
|
||||
AVStream *get_VideoStream() { return NULL; };
|
||||
AVStream *get_AudioStream() { return NULL; };
|
||||
virtual AVStream *get_VideoStream() { return NULL; };
|
||||
virtual AVStream *get_AudioStream() { return NULL; };
|
||||
int get_VideoStreamId() { return mVideoStreamId; };
|
||||
int get_AudioStreamId() { return mAudioStreamId; };
|
||||
};
|
||||
|
|
157
src/zm_event.cpp
157
src/zm_event.cpp
|
@ -71,7 +71,7 @@ Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string
|
|||
|
||||
static char sql[ZM_SQL_MED_BUFSIZ];
|
||||
struct tm *stime = localtime( &start_time.tv_sec );
|
||||
snprintf( sql, sizeof(sql), "insert into Events ( MonitorId, StorageId, Name, StartTime, Width, Height, Cause, Notes, StateId, Orientation, Videoed ) values ( %d, %d, 'New Event', from_unixtime( %ld ), %d, %d, '%s', '%s', %d, %d, %d )",
|
||||
snprintf( sql, sizeof(sql), "insert into Events ( MonitorId, StorageId, Name, StartTime, Width, Height, Cause, Notes, StateId, Orientation, Videoed, DefaultVideo, SaveJPEGs ) values ( %d, %d, 'New Event', from_unixtime( %ld ), %d, %d, '%s', '%s', %d, %d, %d, '', %d )",
|
||||
monitor->Id(),
|
||||
storage->Id(),
|
||||
start_time.tv_sec,
|
||||
|
@ -81,7 +81,8 @@ Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string
|
|||
notes.c_str(),
|
||||
state_id,
|
||||
monitor->getOrientation(),
|
||||
videoEvent
|
||||
videoEvent,
|
||||
monitor->GetOptSaveJPEGs()
|
||||
);
|
||||
if ( mysql_query( &dbconn, sql ) ) {
|
||||
Error( "Can't insert event: %s. sql was (%s)", mysql_error( &dbconn ), sql );
|
||||
|
@ -140,6 +141,7 @@ Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string
|
|||
if ( symlink( time_path, id_file ) < 0 )
|
||||
Error( "Can't symlink %s -> %s: %s", id_file, path, strerror(errno));
|
||||
} else {
|
||||
// Shallow Storage
|
||||
snprintf( path, sizeof(path), "%s/%d/%d", storage->Path(), monitor->Id(), id );
|
||||
|
||||
errno = 0;
|
||||
|
@ -165,13 +167,23 @@ Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string
|
|||
/* Save as video */
|
||||
|
||||
if ( monitor->GetOptVideoWriter() != 0 ) {
|
||||
snprintf( video_name, sizeof(video_name), "%d-%s", id, "video.mp4" );
|
||||
std::string container = monitor->OutputContainer();
|
||||
if ( container == "auto" || container == "" ) {
|
||||
if ( monitor->OutputCodec() == "h264" ) {
|
||||
container = "mp4";
|
||||
} else {
|
||||
container = "mkv";
|
||||
}
|
||||
}
|
||||
|
||||
snprintf( video_name, sizeof(video_name), "%d-%s.%s", id, "video", container.c_str() );
|
||||
snprintf( video_file, sizeof(video_file), staticConfig.video_file_format, path, video_name );
|
||||
Debug(1,"Writing video file to %s", video_file );
|
||||
videowriter = NULL;
|
||||
Camera * camera = monitor->getCamera();
|
||||
videoStore = new VideoStore(
|
||||
video_file,
|
||||
"mp4",
|
||||
container.c_str(),
|
||||
camera->get_VideoStream(),
|
||||
( monitor->RecordAudio() ? camera->get_AudioStream() : NULL ),
|
||||
monitor );
|
||||
|
@ -180,7 +192,6 @@ Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string
|
|||
delete videoStore;
|
||||
videoStore = NULL;
|
||||
}
|
||||
|
||||
} else {
|
||||
/* No video object */
|
||||
videowriter = NULL;
|
||||
|
@ -208,7 +219,7 @@ Event::~Event() {
|
|||
videoStore = NULL;
|
||||
}
|
||||
|
||||
snprintf( sql, sizeof(sql), "update Events set Name='%s%d', EndTime = from_unixtime( %ld ), Length = %s%ld.%02ld, Frames = %d, AlarmFrames = %d, TotScore = %d, AvgScore = %d, MaxScore = %d, DefaultVideo = '%s' where Id = %d", monitor->EventPrefix(), id, end_time.tv_sec, delta_time.positive?"":"-", delta_time.sec, delta_time.fsec, frames, alarm_frames, tot_score, (int)(alarm_frames?(tot_score/alarm_frames):0), max_score, video_name, id );
|
||||
snprintf( sql, sizeof(sql), "UPDATE Events SET Name='%s%d', EndTime = from_unixtime( %ld ), Length = %s%ld.%02ld, Frames = %d, AlarmFrames = %d, TotScore = %d, AvgScore = %d, MaxScore = %d, DefaultVideo = '%s' where Id = %d", monitor->EventPrefix(), id, end_time.tv_sec, delta_time.positive?"":"-", delta_time.sec, delta_time.fsec, frames, alarm_frames, tot_score, (int)(alarm_frames?(tot_score/alarm_frames):0), max_score, video_name, id );
|
||||
if ( mysql_query( &dbconn, sql ) ) {
|
||||
Error( "Can't update event: %s", mysql_error( &dbconn ) );
|
||||
exit( mysql_errno( &dbconn ) );
|
||||
|
@ -285,7 +296,9 @@ bool Event::WriteFrameVideo( const Image *image, const struct timeval timestamp,
|
|||
|
||||
bool Event::WritePacket( ZMPacket &packet ) {
|
||||
|
||||
videoStore->writePacket( &packet );
|
||||
if ( videoStore->writePacket( &packet ) < 0 )
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
void Event::updateNotes( const StringSetMap &newNoteSetMap ) {
|
||||
|
@ -403,6 +416,11 @@ void Event::AddFramesInternal( int n_frames, int start_frame, Image **images, st
|
|||
if ( timestamps[i]->tv_sec <= 0 ) {
|
||||
Debug( 1, "Not adding pre-capture frame %d, zero or less than 0 timestamp", i );
|
||||
continue;
|
||||
} else if ( timestamps[i]->tv_sec < 0 ) {
|
||||
Warning( "Not adding pre-capture frame %d, negative timestamp", i );
|
||||
continue;
|
||||
} else {
|
||||
Debug( 3, "Adding pre-capture frame %d, timestamp = (%d), start_time=(%d)", i, timestamps[i]->tv_sec, start_time.tv_sec );
|
||||
}
|
||||
|
||||
frames++;
|
||||
|
@ -450,6 +468,96 @@ void Event::AddFramesInternal( int n_frames, int start_frame, Image **images, st
|
|||
}
|
||||
}
|
||||
|
||||
void Event::AddPacket( ZMPacket *packet, int score, Image *alarm_image ) {
|
||||
frames++;
|
||||
|
||||
static char event_file[PATH_MAX];
|
||||
snprintf( event_file, sizeof(event_file), staticConfig.capture_file_format, path, frames );
|
||||
|
||||
if ( monitor->GetOptSaveJPEGs() & 4 ) {
|
||||
// Only snapshots
|
||||
//If this is the first frame, we should add a thumbnail to the event directory
|
||||
if ( frames == 10 ) {
|
||||
static char snapshot_file[PATH_MAX];
|
||||
snprintf( snapshot_file, sizeof(snapshot_file), "%s/snapshot.jpg", path );
|
||||
WriteFrameImage( packet->image, packet->timestamp, snapshot_file );
|
||||
}
|
||||
}
|
||||
if ( monitor->GetOptSaveJPEGs() & 1 ) {
|
||||
Debug( 1, "Writing capture frame %d to %s", frames, event_file );
|
||||
if ( ! WriteFrameImage( packet->image, packet->timestamp, event_file ) ) {
|
||||
Error("Failed to write frame image");
|
||||
}
|
||||
}
|
||||
if ( videoStore ) {
|
||||
videoStore->writePacket( packet );
|
||||
//FIXME if it fails, we should write a jpeg
|
||||
}
|
||||
|
||||
struct DeltaTimeval delta_time;
|
||||
DELTA_TIMEVAL( delta_time, packet->timestamp, start_time, DT_PREC_2 );
|
||||
|
||||
FrameType frame_type = score>0?ALARM:(score<0?BULK:NORMAL);
|
||||
// < 0 means no motion detection is being done.
|
||||
if ( score < 0 )
|
||||
score = 0;
|
||||
|
||||
bool db_frame = ( frame_type != BULK ) || ((frames%config.bulk_frame_interval)==0) || !frames;
|
||||
if ( db_frame ) {
|
||||
|
||||
Debug( 1, "Adding frame %d of type \"%s\" to DB", frames, Event::frame_type_names[frame_type] );
|
||||
static char sql[ZM_SQL_MED_BUFSIZ];
|
||||
snprintf( sql, sizeof(sql),
|
||||
"insert into Frames ( EventId, FrameId, Type, TimeStamp, Delta, Score )"
|
||||
" values ( %d, %d, '%s', from_unixtime( %ld ), %s%ld.%02ld, %d )",
|
||||
id, frames, frame_type_names[frame_type], packet->timestamp.tv_sec,
|
||||
delta_time.positive?"":"-", delta_time.sec, delta_time.fsec, score );
|
||||
if ( mysql_query( &dbconn, sql ) ) {
|
||||
Error( "Can't insert frame: %s", mysql_error( &dbconn ) );
|
||||
exit( mysql_errno( &dbconn ) );
|
||||
}
|
||||
last_db_frame = frames;
|
||||
|
||||
// We are writing a Bulk frame
|
||||
if ( frame_type == BULK ) {
|
||||
snprintf( sql, sizeof(sql), "update Events set Length = %s%ld.%02ld, Frames = %d, AlarmFrames = %d, TotScore = %d, AvgScore = %d, MaxScore = %d where Id = %d",
|
||||
( delta_time.positive?"":"-" ),
|
||||
delta_time.sec, delta_time.fsec,
|
||||
frames,
|
||||
alarm_frames,
|
||||
tot_score,
|
||||
(int)(alarm_frames?(tot_score/alarm_frames):0),
|
||||
max_score,
|
||||
id
|
||||
);
|
||||
if ( mysql_query( &dbconn, sql ) ) {
|
||||
Error( "Can't update event: %s", mysql_error( &dbconn ) );
|
||||
exit( mysql_errno( &dbconn ) );
|
||||
}
|
||||
}
|
||||
} // end if db_frame
|
||||
|
||||
end_time = packet->timestamp;
|
||||
|
||||
// We are writing an Alarm frame
|
||||
if ( frame_type == ALARM ) {
|
||||
alarm_frames++;
|
||||
|
||||
tot_score += score;
|
||||
if ( score > (int)max_score )
|
||||
max_score = score;
|
||||
|
||||
if ( alarm_image ) {
|
||||
snprintf( event_file, sizeof(event_file), staticConfig.analyse_file_format, path, frames );
|
||||
|
||||
Debug( 1, "Writing analysis frame %d", frames );
|
||||
if ( monitor->GetOptSaveJPEGs() & 2 ) {
|
||||
WriteFrameImage(alarm_image, packet->timestamp, event_file, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Event::AddFrame( Image *image, struct timeval timestamp, int score, Image *alarm_image ) {
|
||||
if ( !timestamp.tv_sec ) {
|
||||
Debug( 1, "Not adding new frame, zero timestamp" );
|
||||
|
@ -465,7 +573,7 @@ void Event::AddFrame( Image *image, struct timeval timestamp, int score, Image *
|
|||
// Only snapshots
|
||||
//If this is the first frame, we should add a thumbnail to the event directory
|
||||
if ( frames == 10 ) {
|
||||
char snapshot_file[PATH_MAX];
|
||||
static char snapshot_file[PATH_MAX];
|
||||
snprintf( snapshot_file, sizeof(snapshot_file), "%s/snapshot.jpg", path );
|
||||
WriteFrameImage( image, timestamp, snapshot_file );
|
||||
}
|
||||
|
@ -539,37 +647,4 @@ Debug(3, "Writing video");
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* This makes viewing the diagnostic images impossible because it keeps deleting them
|
||||
if ( config.record_diag_images ) {
|
||||
char diag_glob[PATH_MAX] = "";
|
||||
|
||||
snprintf( diag_glob, sizeof(diag_glob), "%s/%d/diag-*.jpg", staticConfig.DIR_EVENTS.c_str(), monitor->Id() );
|
||||
glob_t pglob;
|
||||
int glob_status = glob( diag_glob, 0, 0, &pglob );
|
||||
if ( glob_status != 0 ) {
|
||||
if ( glob_status < 0 ) {
|
||||
Error( "Can't glob '%s': %s", diag_glob, strerror(errno) );
|
||||
} else {
|
||||
Debug( 1, "Can't glob '%s': %d", diag_glob, glob_status );
|
||||
}
|
||||
} else {
|
||||
char new_diag_path[PATH_MAX] = "";
|
||||
for ( int i = 0; i < pglob.gl_pathc; i++ ) {
|
||||
char *diag_path = pglob.gl_pathv[i];
|
||||
|
||||
char *diag_file = strstr( diag_path, "diag-" );
|
||||
|
||||
if ( diag_file ) {
|
||||
snprintf( new_diag_path, sizeof(new_diag_path), general_file_format, path, frames, diag_file );
|
||||
|
||||
if ( rename( diag_path, new_diag_path ) < 0 ) {
|
||||
Error( "Can't rename '%s' to '%s': %s", diag_path, new_diag_path, strerror(errno) );
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
globfree( &pglob );
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
|
|
@ -122,6 +122,7 @@ class Event {
|
|||
|
||||
void AddFrames( int n_frames, Image **images, struct timeval **timestamps );
|
||||
void AddFrame( Image *image, struct timeval timestamp, int score=0, Image *alarm_frame=NULL );
|
||||
void AddPacket( ZMPacket *p, int score=0, Image *alarm_frame=NULL );
|
||||
bool WritePacket( ZMPacket &p );
|
||||
|
||||
private:
|
||||
|
|
|
@ -133,6 +133,7 @@ int av_dict_parse_string(AVDictionary **pm, const char *str,
|
|||
#endif // HAVE_LIBAVCODEC || HAVE_LIBAVUTIL || HAVE_LIBSWSCALE
|
||||
|
||||
#if HAVE_LIBAVUTIL
|
||||
#if LIBAVUTIL_VERSION_CHECK(56, 0, 0, 17, 100)
|
||||
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb){
|
||||
int64_t a, b, this_thing;
|
||||
|
||||
|
@ -156,6 +157,7 @@ simple_round:
|
|||
return av_rescale_q(this_thing, fs_tb, out_tb);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
int hacked_up_context2_for_older_ffmpeg(AVFormatContext **avctx, AVOutputFormat *oformat, const char *format, const char *filename) {
|
||||
AVFormatContext *s = avformat_alloc_context();
|
||||
|
|
|
@ -116,9 +116,9 @@ AVFrame *FFmpeg_Input::get_frame( int stream_id ) {
|
|||
if ( (stream_id < 0 ) || ( packet.stream_index == stream_id ) ) {
|
||||
Debug(1,"Packet is for our stream (%d)", packet.stream_index );
|
||||
|
||||
AVCodecContext *context = streams[packet.stream_index].context;
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
AVCodecContext *context = streams[packet.stream_index].context;
|
||||
|
||||
ret = avcodec_send_packet( context, &packet );
|
||||
if ( ret < 0 ) {
|
||||
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
|
||||
|
|
|
@ -130,7 +130,6 @@ Image::Image( int p_width, int p_height, int p_colours, int p_subpixelorder, uin
|
|||
}
|
||||
|
||||
Image::Image( const AVFrame *frame ) {
|
||||
AVFrame *dest_frame = zm_av_frame_alloc();
|
||||
|
||||
width = frame->width;
|
||||
height = frame->height;
|
||||
|
@ -141,7 +140,13 @@ Image::Image( const AVFrame *frame ) {
|
|||
buffer = 0;
|
||||
holdbuffer = 0;
|
||||
AllocImgBuffer(size);
|
||||
this->Assign( frame );
|
||||
}
|
||||
|
||||
void Image::Assign( const AVFrame *frame ) {
|
||||
/* Assume the dimensions etc are correct. FIXME */
|
||||
|
||||
AVFrame *dest_frame = zm_av_frame_alloc();
|
||||
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
|
||||
av_image_fill_arrays(dest_frame->data, dest_frame->linesize,
|
||||
buffer, AV_PIX_FMT_RGBA, width, height, 1);
|
||||
|
@ -167,7 +172,7 @@ Image::Image( const AVFrame *frame ) {
|
|||
Fatal("You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras");
|
||||
#endif // HAVE_LIBSWSCALE
|
||||
av_frame_free( &dest_frame );
|
||||
}
|
||||
} // end Image::Image( const AVFrame *frame )
|
||||
|
||||
Image::Image( const Image &p_image ) {
|
||||
if ( !initialised )
|
||||
|
@ -605,7 +610,7 @@ void Image::Assign(const unsigned int p_width, const unsigned int p_height, cons
|
|||
size = new_size;
|
||||
}
|
||||
|
||||
if(new_buffer != buffer)
|
||||
if ( new_buffer != buffer )
|
||||
(*fptr_imgbufcpy)(buffer, new_buffer, size);
|
||||
|
||||
}
|
||||
|
@ -613,25 +618,25 @@ void Image::Assign(const unsigned int p_width, const unsigned int p_height, cons
|
|||
void Image::Assign( const Image &image ) {
|
||||
unsigned int new_size = (image.width * image.height) * image.colours;
|
||||
|
||||
if(image.buffer == NULL) {
|
||||
if ( image.buffer == NULL ) {
|
||||
Error("Attempt to assign image with an empty buffer");
|
||||
return;
|
||||
}
|
||||
|
||||
if(image.colours != ZM_COLOUR_GRAY8 && image.colours != ZM_COLOUR_RGB24 && image.colours != ZM_COLOUR_RGB32) {
|
||||
if ( image.colours != ZM_COLOUR_GRAY8 && image.colours != ZM_COLOUR_RGB24 && image.colours != ZM_COLOUR_RGB32 ) {
|
||||
Error("Attempt to assign image with unexpected colours per pixel: %d",image.colours);
|
||||
return;
|
||||
}
|
||||
|
||||
if ( !buffer || image.width != width || image.height != height || image.colours != colours || image.subpixelorder != subpixelorder) {
|
||||
if ( !buffer || image.width != width || image.height != height || image.colours != colours || image.subpixelorder != subpixelorder ) {
|
||||
|
||||
if (holdbuffer && buffer) {
|
||||
if (new_size > allocation) {
|
||||
if ( holdbuffer && buffer ) {
|
||||
if ( new_size > allocation ) {
|
||||
Error("Held buffer is undersized for assigned buffer");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if(new_size > allocation || !buffer) {
|
||||
if ( new_size > allocation || !buffer ) {
|
||||
// DumpImgBuffer(); This is also done in AllocImgBuffer
|
||||
AllocImgBuffer(new_size);
|
||||
}
|
||||
|
@ -645,7 +650,7 @@ void Image::Assign( const Image &image ) {
|
|||
size = new_size;
|
||||
}
|
||||
|
||||
if(image.buffer != buffer)
|
||||
if ( image.buffer != buffer )
|
||||
(*fptr_imgbufcpy)(buffer, image.buffer, size);
|
||||
}
|
||||
|
||||
|
|
|
@ -95,14 +95,15 @@ protected:
|
|||
double _1_m;
|
||||
|
||||
static int CompareYX( const void *p1, const void *p2 ) {
|
||||
const Edge *e1 = (const Edge *)p1, *e2 = (const Edge *)p2;
|
||||
// This is because these functions are passed to qsort
|
||||
const Edge *e1 = reinterpret_cast<const Edge *>(p1), *e2 = reinterpret_cast<const Edge *>(p2);
|
||||
if ( e1->min_y == e2->min_y )
|
||||
return( int(e1->min_x - e2->min_x) );
|
||||
else
|
||||
return( int(e1->min_y - e2->min_y) );
|
||||
}
|
||||
static int CompareX( const void *p1, const void *p2 ) {
|
||||
const Edge *e1 = (const Edge *)p1, *e2 = (const Edge *)p2;
|
||||
const Edge *e1 = reinterpret_cast<const Edge *>(p1), *e2 = reinterpret_cast<const Edge *>(p2);
|
||||
return( int(e1->min_x - e2->min_x) );
|
||||
}
|
||||
};
|
||||
|
@ -145,6 +146,7 @@ protected:
|
|||
unsigned int size;
|
||||
unsigned int subpixelorder;
|
||||
unsigned long allocation;
|
||||
_AVPIXELFORMAT imagePixFormat;
|
||||
uint8_t *buffer;
|
||||
int buffertype; /* 0=not ours, no need to call free(), 1=malloc() buffer, 2=new buffer */
|
||||
int holdbuffer; /* Hold the buffer instead of replacing it with new one */
|
||||
|
@ -171,7 +173,11 @@ public:
|
|||
if ( colours == ZM_COLOUR_RGB32 ) {
|
||||
return AV_PIX_FMT_RGBA;
|
||||
} else if ( colours == ZM_COLOUR_RGB24 ) {
|
||||
return AV_PIX_FMT_RGB24;
|
||||
if ( subpixelorder == ZM_SUBPIX_ORDER_BGR){
|
||||
return AV_PIX_FMT_BGR24;
|
||||
} else {
|
||||
return AV_PIX_FMT_RGB24;
|
||||
}
|
||||
} else if ( colours == ZM_COLOUR_GRAY8 ) {
|
||||
return AV_PIX_FMT_GRAY8;
|
||||
} else {
|
||||
|
@ -179,13 +185,14 @@ public:
|
|||
return AV_PIX_FMT_RGBA;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Internal buffer should not be modified from functions outside of this class */
|
||||
inline const uint8_t* Buffer() const { return( buffer ); }
|
||||
inline const uint8_t* Buffer( unsigned int x, unsigned int y= 0 ) const { return( &buffer[colours*((y*width)+x)] ); }
|
||||
/* Request writeable buffer */
|
||||
uint8_t* WriteBuffer(const unsigned int p_width, const unsigned int p_height, const unsigned int p_colours, const unsigned int p_subpixelorder);
|
||||
// Is only acceptable on a pre-allocated buffer
|
||||
uint8_t* WriteBuffer() { if ( holdbuffer ) return buffer; return NULL; };
|
||||
|
||||
inline int IsBufferHeld() const { return holdbuffer; }
|
||||
inline void HoldBuffer(int tohold) { holdbuffer = tohold; }
|
||||
|
@ -199,6 +206,7 @@ public:
|
|||
|
||||
void Assign( unsigned int p_width, unsigned int p_height, unsigned int p_colours, unsigned int p_subpixelorder, const uint8_t* new_buffer, const size_t buffer_size);
|
||||
void Assign( const Image &image );
|
||||
void Assign( const AVFrame *frame );
|
||||
void AssignDirect( const unsigned int p_width, const unsigned int p_height, const unsigned int p_colours, const unsigned int p_subpixelorder, uint8_t *new_buffer, const size_t buffer_size, const int p_buffertype);
|
||||
|
||||
inline void CopyBuffer( const Image &image ) {
|
||||
|
|
|
@ -1898,7 +1898,7 @@ int LocalCamera::PrimeCapture() {
|
|||
}
|
||||
|
||||
int LocalCamera::PreCapture() {
|
||||
Debug( 5, "Pre-capturing" );
|
||||
Debug( 4, "Pre-capturing" );
|
||||
return( 0 );
|
||||
}
|
||||
|
||||
|
@ -2000,7 +2000,7 @@ int LocalCamera::Capture( ZMPacket &zm_packet ) {
|
|||
#if HAVE_LIBSWSCALE
|
||||
if ( conversion_type == 1 ) {
|
||||
|
||||
Debug( 9, "Calling sws_scale to perform the conversion" );
|
||||
Debug( 9, "Setting up a frame" );
|
||||
/* Use swscale to convert the image directly into the shared memory */
|
||||
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
|
||||
av_image_fill_arrays(tmpPicture->data,
|
||||
|
@ -2010,6 +2010,7 @@ int LocalCamera::Capture( ZMPacket &zm_packet ) {
|
|||
avpicture_fill( (AVPicture *)tmpPicture, directbuffer,
|
||||
imagePixFormat, width, height );
|
||||
#endif
|
||||
Debug( 9, "Calling sws_scale to perform the conversion" );
|
||||
sws_scale(
|
||||
imgConversionContext,
|
||||
capturePictures[capture_frame]->data,
|
||||
|
@ -2019,6 +2020,7 @@ int LocalCamera::Capture( ZMPacket &zm_packet ) {
|
|||
tmpPicture->data,
|
||||
tmpPicture->linesize
|
||||
);
|
||||
Debug( 9, "Done sws_scale to perform the conversion" );
|
||||
} else
|
||||
#endif
|
||||
if ( conversion_type == 2 ) {
|
||||
|
@ -2071,7 +2073,7 @@ int LocalCamera::PostCapture() {
|
|||
} else {
|
||||
Error( "Unable to requeue buffer due to not v4l2_data" )
|
||||
}
|
||||
}
|
||||
} else
|
||||
#endif // ZM_HAS_V4L2
|
||||
#if ZM_HAS_V4L1
|
||||
if ( v4l_version == 1 ) {
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#ifdef __FreeBSD__
|
||||
#include <sys/thr.h>
|
||||
#endif
|
||||
#include <cstdarg>
|
||||
|
||||
bool Logger::smInitialised = false;
|
||||
Logger *Logger::smInstance = 0;
|
||||
|
|
|
@ -63,16 +63,6 @@
|
|||
#define MAP_LOCKED 0
|
||||
#endif
|
||||
|
||||
std::vector<std::string> split(const std::string &s, char delim) {
|
||||
std::vector<std::string> elems;
|
||||
std::stringstream ss(s);
|
||||
std::string item;
|
||||
while(std::getline(ss, item, delim)) {
|
||||
elems.push_back(trimSpaces(item));
|
||||
}
|
||||
return elems;
|
||||
}
|
||||
|
||||
Monitor::MonitorLink::MonitorLink( int p_id, const char *p_name ) : id( p_id ) {
|
||||
strncpy( name, p_name, sizeof(name) );
|
||||
|
||||
|
@ -231,9 +221,11 @@ bool Monitor::MonitorLink::inAlarm() {
|
|||
bool Monitor::MonitorLink::hasAlarmed() {
|
||||
if ( shared_data->state == ALARM ) {
|
||||
return( true );
|
||||
} else if ( shared_data->last_event_id != (unsigned int)last_event_id ) {
|
||||
last_event_id = shared_data->last_event_id;
|
||||
}
|
||||
//} else if ( shared_data->last_event_id != (unsigned int)last_event ) {
|
||||
// Why test for it, just set it...
|
||||
last_event_id = shared_data->last_event_id;
|
||||
//}
|
||||
return( false );
|
||||
}
|
||||
|
||||
|
@ -251,6 +243,8 @@ Monitor::Monitor(
|
|||
int p_savejpegs,
|
||||
VideoWriter p_videowriter,
|
||||
std::string p_encoderparams,
|
||||
std::string p_output_codec,
|
||||
std::string p_output_container,
|
||||
bool p_record_audio,
|
||||
const char *p_event_prefix,
|
||||
const char *p_label_format,
|
||||
|
@ -290,6 +284,8 @@ Monitor::Monitor(
|
|||
savejpegspref( p_savejpegs ),
|
||||
videowriter( p_videowriter ),
|
||||
encoderparams( p_encoderparams ),
|
||||
output_codec( p_output_codec ),
|
||||
output_container( p_output_container ),
|
||||
record_audio( p_record_audio ),
|
||||
label_coord( p_label_coord ),
|
||||
label_size( p_label_size ),
|
||||
|
@ -372,7 +368,6 @@ Monitor::Monitor(
|
|||
mem_size = sizeof(SharedData)
|
||||
+ sizeof(TriggerData)
|
||||
+ sizeof(VideoStoreData) //Information to pass back to the capture process
|
||||
+ (image_buffer_count*sizeof(struct timeval))
|
||||
+ (image_buffer_count*camera->ImageSize())
|
||||
+ 64; /* Padding used to permit aligning the images buffer to 64 byte boundary */
|
||||
|
||||
|
@ -386,6 +381,7 @@ Monitor::Monitor(
|
|||
snprintf( monitor_dir, sizeof(monitor_dir), "%s/%d", storage->Path(), id );
|
||||
struct stat statbuf;
|
||||
|
||||
// If we are going to actually do capture, then yes, we should stat this dir, otherwise not
|
||||
if ( stat( monitor_dir, &statbuf ) ) {
|
||||
if ( errno == ENOENT || errno == ENOTDIR ) {
|
||||
if ( mkdir( monitor_dir, 0755 ) ) {
|
||||
|
@ -428,61 +424,39 @@ Monitor::Monitor(
|
|||
trigger_data->trigger_showtext[0] = 0;
|
||||
shared_data->valid = true;
|
||||
video_store_data->recording = (struct timeval){0};
|
||||
// Uh, why nothing? Why not NULL?
|
||||
snprintf(video_store_data->event_file, sizeof(video_store_data->event_file), "nothing");
|
||||
video_store_data->size = sizeof(VideoStoreData);
|
||||
//video_store_data->frameNumber = 0;
|
||||
} else if ( purpose == ANALYSIS ) {
|
||||
this->connect();
|
||||
if ( ! mem_ptr ) exit(-1);
|
||||
shared_data->state = IDLE;
|
||||
shared_data->last_read_time = 0;
|
||||
shared_data->alarm_x = -1;
|
||||
shared_data->alarm_y = -1;
|
||||
}
|
||||
|
||||
if ( ( ! mem_ptr ) || ! shared_data->valid ) {
|
||||
if ( purpose != QUERY ) {
|
||||
Error( "Shared data not initialised by capture daemon for monitor %s", name );
|
||||
exit( -1 );
|
||||
}
|
||||
}
|
||||
//if ( ( ! mem_ptr ) || ! shared_data->valid ) {
|
||||
//if ( purpose != QUERY ) {
|
||||
//Error( "Shared data not initialised by capture daemon for monitor %s", name );
|
||||
//exit( -1 );
|
||||
//}
|
||||
//}
|
||||
|
||||
// Will this not happen every time a monitor is instantiated? Seems like all the calls to the Monitor constructor pass a zero for n_zones, then load zones after..
|
||||
// In my storage areas branch, I took this out.. and didn't notice any problems.
|
||||
if ( false && !n_zones ) {
|
||||
Debug( 1, "Monitor %s has no zones, adding one.", name );
|
||||
n_zones = 1;
|
||||
zones = new Zone *[1];
|
||||
Coord coords[4] = { Coord( 0, 0 ), Coord( width-1, 0 ), Coord( width-1, height-1 ), Coord( 0, height-1 ) };
|
||||
zones[0] = new Zone( this, 0, "All", Zone::ACTIVE, Polygon( sizeof(coords)/sizeof(*coords), coords ), RGB_RED, Zone::BLOBS );
|
||||
}
|
||||
start_time = last_fps_time = time( 0 );
|
||||
|
||||
event = 0;
|
||||
|
||||
Debug( 1, "Monitor %s has function %d", name, function );
|
||||
Debug( 1, "Monitor %s LBF = '%s', LBX = %d, LBY = %d, LBS = %d", name, label_format, label_coord.X(), label_coord.Y(), label_size );
|
||||
Debug( 1, "Monitor %s IBC = %d, WUC = %d, pEC = %d, PEC = %d, EAF = %d, FRI = %d, RBP = %d, ARBP = %d, FM = %d", name, image_buffer_count, warmup_count, pre_event_count, post_event_count, alarm_frame_count, fps_report_interval, ref_blend_perc, alarm_ref_blend_perc, track_motion );
|
||||
Debug( 1, "Monitor %s\
|
||||
function: %d\
|
||||
label_format: '%s', label_x = %d, label_y = %d, label size = %d \
|
||||
IBC = %d, WUC = %d, pEC = %d, PEC = %d, EAF = %d, FRI = %d, RBP = %d, ARBP = %d, FM = %d",
|
||||
name,
|
||||
function, label_format, label_coord.X(), label_coord.Y(), label_size,
|
||||
image_buffer_count, warmup_count, pre_event_count, post_event_count, alarm_frame_count, fps_report_interval, ref_blend_perc, alarm_ref_blend_perc, track_motion
|
||||
);
|
||||
|
||||
//Set video recording flag for event start constructor and easy reference in code
|
||||
videoRecording = ((GetOptVideoWriter() == H264PASSTHROUGH) && camera->SupportsNativeVideo());
|
||||
n_linked_monitors = 0;
|
||||
linked_monitors = 0;
|
||||
|
||||
if ( purpose == ANALYSIS ) {
|
||||
adaptive_skip = true;
|
||||
|
||||
while( shared_data->last_write_index == (unsigned int)image_buffer_count
|
||||
&& shared_data->last_write_time == 0) {
|
||||
Warning( "Waiting for capture daemon" );
|
||||
sleep( 1 );
|
||||
}
|
||||
ref_image.Assign( width, height, camera->Colours(), camera->SubpixelOrder(), image_buffer[shared_data->last_write_index].image->Buffer(), camera->ImageSize());
|
||||
|
||||
n_linked_monitors = 0;
|
||||
linked_monitors = 0;
|
||||
|
||||
adaptive_skip = true;
|
||||
|
||||
ReloadLinkedMonitors( p_linked_monitors );
|
||||
}
|
||||
ReloadLinkedMonitors( p_linked_monitors );
|
||||
videoStore = NULL;
|
||||
} // Monitor::Monitor
|
||||
|
||||
bool Monitor::connect() {
|
||||
|
@ -553,44 +527,46 @@ bool Monitor::connect() {
|
|||
shared_data = (SharedData *)mem_ptr;
|
||||
trigger_data = (TriggerData *)((char *)shared_data + sizeof(SharedData));
|
||||
video_store_data = (VideoStoreData *)((char *)trigger_data + sizeof(TriggerData));
|
||||
struct timeval *shared_timestamps = (struct timeval *)((char *)video_store_data + sizeof(VideoStoreData));
|
||||
unsigned char *shared_images = (unsigned char *)((char *)shared_timestamps + (image_buffer_count*sizeof(struct timeval)));
|
||||
|
||||
unsigned char *shared_images = (unsigned char *)((char *)video_store_data + sizeof(VideoStoreData));
|
||||
|
||||
if ( ((unsigned long)shared_images % 64) != 0 ) {
|
||||
/* Align images buffer to nearest 64 byte boundary */
|
||||
Debug(3,"Aligning shared memory images to the next 64 byte boundary");
|
||||
shared_images = (uint8_t*)((unsigned long)shared_images + (64 - ((unsigned long)shared_images % 64)));
|
||||
}
|
||||
Debug(3, "Allocating %d image buffers", image_buffer_count );
|
||||
image_buffer = new Snapshot[image_buffer_count];
|
||||
for ( int i = 0; i < image_buffer_count; i++ ) {
|
||||
image_buffer[i].timestamp = &(shared_timestamps[i]);
|
||||
image_buffer[i].image = new Image( width, height, camera->Colours(), camera->SubpixelOrder(), &(shared_images[i*camera->ImageSize()]) );
|
||||
image_buffer[i].image->HoldBuffer(true); /* Don't release the internal buffer or replace it with another */
|
||||
}
|
||||
if ( (deinterlacing & 0xff) == 4) {
|
||||
/* Four field motion adaptive deinterlacing in use */
|
||||
/* Allocate a buffer for the next image */
|
||||
next_buffer.image = new Image( width, height, camera->Colours(), camera->SubpixelOrder());
|
||||
next_buffer.timestamp = new struct timeval;
|
||||
}
|
||||
if ( ( purpose == ANALYSIS ) && analysis_fps ) {
|
||||
// Size of pre event buffer must be greater than pre_event_count
|
||||
// if alarm_frame_count > 1, because in this case the buffer contains
|
||||
// alarmed images that must be discarded when event is created
|
||||
|
||||
Debug(3, "Allocating %d image buffers", image_buffer_count );
|
||||
image_buffer = new ZMPacket[image_buffer_count];
|
||||
for ( int i = 0; i < image_buffer_count; i++ ) {
|
||||
image_buffer[i].image = new Image( width, height, camera->Colours(), camera->SubpixelOrder(), &(shared_images[i*camera->ImageSize()]) );
|
||||
image_buffer[i].image->HoldBuffer(true); /* Don't release the internal buffer or replace it with another */
|
||||
}
|
||||
if ( (deinterlacing & 0xff) == 4) {
|
||||
/* Four field motion adaptive deinterlacing in use */
|
||||
/* Allocate a buffer for the next image */
|
||||
next_buffer.image = new Image( width, height, camera->Colours(), camera->SubpixelOrder());
|
||||
}
|
||||
pre_event_buffer_count = pre_event_count + alarm_frame_count - 1;
|
||||
//if ( ( purpose == ANALYSIS ) && analysis_fps ) {
|
||||
// Size of pre event buffer must be greater than pre_event_count
|
||||
// if alarm_frame_count > 1, because in this case the buffer contains
|
||||
// alarmed images that must be discarded when event is created
|
||||
|
||||
// Couldn't we just make sure there is always enough frames in the ring buffer?
|
||||
pre_event_buffer_count = pre_event_count + alarm_frame_count - 1;
|
||||
pre_event_buffer = new Snapshot[pre_event_buffer_count];
|
||||
pre_event_buffer = new ZMPacket[pre_event_buffer_count];
|
||||
for ( int i = 0; i < pre_event_buffer_count; i++ ) {
|
||||
pre_event_buffer[i].timestamp = new struct timeval;
|
||||
pre_event_buffer[i].image = new Image( width, height, camera->Colours(), camera->SubpixelOrder());
|
||||
}
|
||||
}
|
||||
Debug(3, "Success connecting");
|
||||
return true;
|
||||
}
|
||||
} // Monitor::connect
|
||||
|
||||
Monitor::~Monitor() {
|
||||
if ( videoStore ) {
|
||||
delete videoStore;
|
||||
videoStore = NULL;
|
||||
}
|
||||
if ( timestamps ) {
|
||||
delete[] timestamps;
|
||||
timestamps = 0;
|
||||
|
@ -611,23 +587,12 @@ Monitor::~Monitor() {
|
|||
|
||||
if ( (deinterlacing & 0xff) == 4) {
|
||||
delete next_buffer.image;
|
||||
delete next_buffer.timestamp;
|
||||
}
|
||||
for ( int i = 0; i < image_buffer_count; i++ ) {
|
||||
delete image_buffer[i].image;
|
||||
}
|
||||
delete[] image_buffer;
|
||||
} // end if mem_ptr
|
||||
|
||||
for ( int i = 0; i < n_zones; i++ ) {
|
||||
delete zones[i];
|
||||
}
|
||||
delete[] zones;
|
||||
|
||||
delete camera;
|
||||
delete storage;
|
||||
|
||||
if ( mem_ptr ) {
|
||||
if ( purpose == ANALYSIS ) {
|
||||
shared_data->state = state = IDLE;
|
||||
shared_data->last_read_index = image_buffer_count;
|
||||
|
@ -636,7 +601,6 @@ Monitor::~Monitor() {
|
|||
if ( analysis_fps ) {
|
||||
for ( int i = 0; i < pre_event_buffer_count; i++ ) {
|
||||
delete pre_event_buffer[i].image;
|
||||
delete pre_event_buffer[i].timestamp;
|
||||
}
|
||||
delete[] pre_event_buffer;
|
||||
}
|
||||
|
@ -675,6 +639,14 @@ Monitor::~Monitor() {
|
|||
}
|
||||
#endif // ZM_MEM_MAPPED
|
||||
} // end if mem_ptr
|
||||
|
||||
for ( int i = 0; i < n_zones; i++ ) {
|
||||
delete zones[i];
|
||||
}
|
||||
delete[] zones;
|
||||
|
||||
delete camera;
|
||||
delete storage;
|
||||
}
|
||||
|
||||
void Monitor::AddZones( int p_n_zones, Zone *p_zones[] ) {
|
||||
|
@ -713,17 +685,16 @@ int Monitor::GetImage( int index, int scale ) {
|
|||
if ( index < 0 || index > image_buffer_count ) {
|
||||
index = shared_data->last_write_index;
|
||||
}
|
||||
|
||||
Debug(3, "GetImage");
|
||||
if ( index != image_buffer_count ) {
|
||||
Image *image;
|
||||
// If we are going to be modifying the snapshot before writing, then we need to copy it
|
||||
if ( ( scale != ZM_SCALE_BASE ) || ( !config.timestamp_on_capture ) ) {
|
||||
Snapshot *snap = &image_buffer[index];
|
||||
ZMPacket *snap = &image_buffer[index];
|
||||
Image *snap_image = snap->image;
|
||||
|
||||
alarm_image.Assign( *snap_image );
|
||||
|
||||
|
||||
//write_image.Assign( *snap_image );
|
||||
|
||||
if ( scale != ZM_SCALE_BASE ) {
|
||||
|
@ -731,7 +702,7 @@ int Monitor::GetImage( int index, int scale ) {
|
|||
}
|
||||
|
||||
if ( !config.timestamp_on_capture ) {
|
||||
TimestampImage( &alarm_image, snap->timestamp );
|
||||
TimestampImage( &alarm_image, &snap->timestamp );
|
||||
}
|
||||
image = &alarm_image;
|
||||
} else {
|
||||
|
@ -753,13 +724,13 @@ struct timeval Monitor::GetTimestamp( int index ) const {
|
|||
}
|
||||
|
||||
if ( index != image_buffer_count ) {
|
||||
Snapshot *snap = &image_buffer[index];
|
||||
ZMPacket *snap = &image_buffer[index];
|
||||
|
||||
return( *(snap->timestamp) );
|
||||
return snap->timestamp;
|
||||
} else {
|
||||
static struct timeval null_tv = { 0, 0 };
|
||||
|
||||
return( null_tv );
|
||||
return null_tv;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -778,29 +749,29 @@ unsigned int Monitor::GetLastEvent() const {
|
|||
double Monitor::GetFPS() const {
|
||||
int index1 = shared_data->last_write_index;
|
||||
if ( index1 == image_buffer_count ) {
|
||||
return( 0.0 );
|
||||
return 0.0;
|
||||
}
|
||||
Snapshot *snap1 = &image_buffer[index1];
|
||||
if ( !snap1->timestamp || !snap1->timestamp->tv_sec ) {
|
||||
return( 0.0 );
|
||||
ZMPacket *snap1 = &image_buffer[index1];
|
||||
if ( !snap1->timestamp.tv_sec ) {
|
||||
return 0.0;
|
||||
}
|
||||
struct timeval time1 = *snap1->timestamp;
|
||||
struct timeval time1 = snap1->timestamp;
|
||||
|
||||
int image_count = image_buffer_count;
|
||||
int index2 = (index1+1)%image_buffer_count;
|
||||
if ( index2 == image_buffer_count ) {
|
||||
return( 0.0 );
|
||||
return 0.0;
|
||||
}
|
||||
Snapshot *snap2 = &image_buffer[index2];
|
||||
while ( !snap2->timestamp || !snap2->timestamp->tv_sec ) {
|
||||
ZMPacket *snap2 = &image_buffer[index2];
|
||||
while ( !snap2->timestamp.tv_sec ) {
|
||||
if ( index1 == index2 ) {
|
||||
return( 0.0 );
|
||||
return 0.0;
|
||||
}
|
||||
index2 = (index2+1)%image_buffer_count;
|
||||
snap2 = &image_buffer[index2];
|
||||
image_count--;
|
||||
}
|
||||
struct timeval time2 = *snap2->timestamp;
|
||||
struct timeval time2 = snap2->timestamp;
|
||||
|
||||
double time_diff = tvDiffSec( time2, time1 );
|
||||
|
||||
|
@ -808,9 +779,9 @@ double Monitor::GetFPS() const {
|
|||
|
||||
if ( curr_fps < 0.0 ) {
|
||||
//Error( "Negative FPS %f, time_diff = %lf (%d:%ld.%ld - %d:%ld.%ld), ibc: %d", curr_fps, time_diff, index2, time2.tv_sec, time2.tv_usec, index1, time1.tv_sec, time1.tv_usec, image_buffer_count );
|
||||
return( 0.0 );
|
||||
return 0.0;
|
||||
}
|
||||
return( curr_fps );
|
||||
return curr_fps;
|
||||
}
|
||||
|
||||
useconds_t Monitor::GetAnalysisRate() {
|
||||
|
@ -1028,7 +999,7 @@ void Monitor::DumpZoneImage( const char *zone_string ) {
|
|||
if ( ( (!staticConfig.SERVER_ID) || ( staticConfig.SERVER_ID == server_id ) ) && mem_ptr ) {
|
||||
Debug(3, "Trying to load from local zmc");
|
||||
int index = shared_data->last_write_index;
|
||||
Snapshot *snap = &image_buffer[index];
|
||||
ZMPacket *snap = &image_buffer[index];
|
||||
zone_image = new Image( *snap->image );
|
||||
} else {
|
||||
Debug(3, "Trying to load from event");
|
||||
|
@ -1169,23 +1140,27 @@ bool Monitor::CheckSignal( const Image *image ) {
|
|||
}
|
||||
|
||||
bool Monitor::Analyse() {
|
||||
mutex.lock();
|
||||
if ( shared_data->last_read_index == shared_data->last_write_index ) {
|
||||
// I wonder how often this happens. Maybe if this happens we should sleep or something?
|
||||
return( false );
|
||||
//Debug(3, " shared_data->last_read_index == shared_data->last_write_index " );
|
||||
// If analysis is keeping up, then it happens lots
|
||||
mutex.unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
struct timeval now;
|
||||
gettimeofday( &now, NULL );
|
||||
|
||||
if ( image_count && fps_report_interval && !(image_count%fps_report_interval) ) {
|
||||
fps = double(fps_report_interval)/(now.tv_sec - last_fps_time);
|
||||
if ( analysis_image_count && fps_report_interval && !(analysis_image_count%fps_report_interval) ) {
|
||||
fps = double(fps_report_interval)/(now.tv_sec - last_analysis_fps_time);
|
||||
Info( "%s: %d - Analysing at %.2f fps", name, image_count, fps );
|
||||
static char sql[ZM_SQL_SML_BUFSIZ];
|
||||
snprintf( sql, sizeof(sql), "UPDATE Monitors SET AnalysisFPS = '%.2lf' WHERE Id = '%d'", fps, id );
|
||||
if ( mysql_query( &dbconn, sql ) ) {
|
||||
Error( "Can't run query: %s", mysql_error( &dbconn ) );
|
||||
}
|
||||
last_fps_time = now.tv_sec;
|
||||
last_analysis_fps_time = now.tv_sec;
|
||||
}
|
||||
|
||||
int index;
|
||||
|
@ -1203,7 +1178,8 @@ bool Monitor::Analyse() {
|
|||
int pending_frames = shared_data->last_write_index - shared_data->last_read_index;
|
||||
if ( pending_frames < 0 ) pending_frames += image_buffer_count;
|
||||
|
||||
Debug( 4, "ReadIndex:%d, WriteIndex: %d, PendingFrames = %d, ReadMargin = %d, Step = %d", shared_data->last_read_index, shared_data->last_write_index, pending_frames, read_margin, step );
|
||||
Debug( 4, "ReadIndex:%d, WriteIndex: %d, PendingFrames = %d, ReadMargin = %d, Step = %d",
|
||||
shared_data->last_read_index, shared_data->last_write_index, pending_frames, read_margin, step );
|
||||
if ( step <= pending_frames ) {
|
||||
index = (shared_data->last_read_index+step)%image_buffer_count;
|
||||
} else {
|
||||
|
@ -1216,10 +1192,12 @@ bool Monitor::Analyse() {
|
|||
index = shared_data->last_write_index%image_buffer_count;
|
||||
}
|
||||
|
||||
Snapshot *snap = &image_buffer[index];
|
||||
struct timeval *timestamp = snap->timestamp;
|
||||
ZMPacket *snap = &image_buffer[index];
|
||||
struct timeval *timestamp = &snap->timestamp;
|
||||
//Debug(2, "timestamp for index (%d) %s", index, timeval_to_string( *timestamp ) );
|
||||
Image *snap_image = snap->image;
|
||||
|
||||
// This chunk o fcode is not analysis, so shouldn't be in here. Move it up to whereever analyse is called
|
||||
if ( shared_data->action ) {
|
||||
// Can there be more than 1 bit set in the action? Shouldn't these be elseifs?
|
||||
if ( shared_data->action & RELOAD ) {
|
||||
|
@ -1267,7 +1245,6 @@ bool Monitor::Analyse() {
|
|||
if ( static_undef ) {
|
||||
// Sure would be nice to be able to assume that these were already initialized. It's just 1 compare/branch, but really not neccessary.
|
||||
static_undef = false;
|
||||
timestamps = new struct timeval *[pre_event_count];
|
||||
images = new Image *[pre_event_count];
|
||||
last_signal = shared_data->signal;
|
||||
}
|
||||
|
@ -1279,13 +1256,16 @@ bool Monitor::Analyse() {
|
|||
Debug(3, "Motion detection is enabled signal(%d) signal_change(%d)", signal, signal_change);
|
||||
|
||||
if ( trigger_data->trigger_state != TRIGGER_OFF ) {
|
||||
Debug(3, "trigger != off");
|
||||
Debug(9, "Trigger not oFF state is (%d)", trigger_data->trigger_state );
|
||||
unsigned int score = 0;
|
||||
// Ready means that we have captured the warmpup # of frames
|
||||
if ( Ready() ) {
|
||||
Debug(9, "Ready");
|
||||
std::string cause;
|
||||
Event::StringSetMap noteSetMap;
|
||||
|
||||
if ( trigger_data->trigger_state == TRIGGER_ON ) {
|
||||
|
||||
score += trigger_data->trigger_score;
|
||||
if ( !event ) {
|
||||
if ( cause.length() )
|
||||
|
@ -1323,10 +1303,12 @@ bool Monitor::Analyse() {
|
|||
ref_image = *snap_image;
|
||||
|
||||
} else if ( signal && Active() && (function == MODECT || function == MOCORD) ) {
|
||||
Debug(3, "signal and active and modtect");
|
||||
Event::StringSet zoneSet;
|
||||
int motion_score = last_motion_score;
|
||||
if ( !(image_count % (motion_frame_skip+1) ) ) {
|
||||
// Get new score.
|
||||
Debug(3,"before DetectMotion");
|
||||
motion_score = DetectMotion( *snap_image, zoneSet );
|
||||
|
||||
Debug( 3, "After motion detection, last_motion_score(%d), new motion score(%d)", last_motion_score, motion_score );
|
||||
|
@ -1377,14 +1359,10 @@ bool Monitor::Analyse() {
|
|||
//TODO: What happens is the event closes and sets recording to false then recording to true again so quickly that our capture daemon never picks it up. Maybe need a refresh flag?
|
||||
if ( (!signal_change && signal) && (function == RECORD || function == MOCORD) ) {
|
||||
if ( event ) {
|
||||
//TODO: We shouldn't have to do this every time. Not sure why it clears itself if this isn't here??
|
||||
//snprintf(video_store_data->event_file, sizeof(video_store_data->event_file), "%s", event->getEventFile());
|
||||
//Debug( 3, "Detected new event at (%d.%d)", timestamp->tv_sec,timestamp->tv_usec );
|
||||
|
||||
if ( section_length ) {
|
||||
// TODO: Wouldn't this be clearer if we just did something like if now - event->start > section_length ?
|
||||
int section_mod = timestamp->tv_sec % section_length;
|
||||
Debug( 4, "Section length (%d) Last Section Mod(%d), new section mod(%d)", section_length, last_section_mod, section_mod );
|
||||
Debug( 3, "Section length (%d) Last Section Mod(%d), new section mod(%d)", section_length, last_section_mod, section_mod );
|
||||
if ( section_mod < last_section_mod ) {
|
||||
//if ( state == IDLE || state == TAPE || event_close_mode == CLOSE_TIME ) {
|
||||
//if ( state == TAPE ) {
|
||||
|
@ -1419,59 +1397,10 @@ bool Monitor::Analyse() {
|
|||
if ( state == IDLE ) {
|
||||
shared_data->state = state = TAPE;
|
||||
}
|
||||
|
||||
//if ( config.overlap_timed_events )
|
||||
if ( false ) {
|
||||
int pre_index;
|
||||
int pre_event_images = pre_event_count;
|
||||
|
||||
if ( analysis_fps ) {
|
||||
// If analysis fps is set,
|
||||
// compute the index for pre event images in the dedicated buffer
|
||||
pre_index = image_count%pre_event_buffer_count;
|
||||
|
||||
// Seek forward the next filled slot in to the buffer (oldest data)
|
||||
// from the current position
|
||||
while ( pre_event_images && !pre_event_buffer[pre_index].timestamp->tv_sec ) {
|
||||
pre_index = (pre_index + 1)%pre_event_buffer_count;
|
||||
// Slot is empty, removing image from counter
|
||||
pre_event_images--;
|
||||
}
|
||||
} else {
|
||||
// If analysis fps is not set (analysis performed at capturing framerate),
|
||||
// compute the index for pre event images in the capturing buffer
|
||||
pre_index = ((index + image_buffer_count) - pre_event_count)%image_buffer_count;
|
||||
|
||||
// Seek forward the next filled slot in to the buffer (oldest data)
|
||||
// from the current position
|
||||
while ( pre_event_images && !image_buffer[pre_index].timestamp->tv_sec ) {
|
||||
pre_index = (pre_index + 1)%image_buffer_count;
|
||||
// Slot is empty, removing image from counter
|
||||
pre_event_images--;
|
||||
}
|
||||
}
|
||||
|
||||
if ( pre_event_images ) {
|
||||
if ( analysis_fps ) {
|
||||
for ( int i = 0; i < pre_event_images; i++ ) {
|
||||
timestamps[i] = pre_event_buffer[pre_index].timestamp;
|
||||
images[i] = pre_event_buffer[pre_index].image;
|
||||
pre_index = (pre_index + 1)%pre_event_buffer_count;
|
||||
}
|
||||
} else {
|
||||
for ( int i = 0; i < pre_event_images; i++ ) {
|
||||
timestamps[i] = image_buffer[pre_index].timestamp;
|
||||
images[i] = image_buffer[pre_index].image;
|
||||
pre_index = (pre_index + 1)%image_buffer_count;
|
||||
}
|
||||
}
|
||||
|
||||
event->AddFrames( pre_event_images, images, timestamps );
|
||||
}
|
||||
} // end if false or config.overlap_timed_events
|
||||
} // end if ! event
|
||||
}
|
||||
if ( score ) {
|
||||
Debug(9, "Score: (%d)", score );
|
||||
if ( (state == IDLE || state == TAPE || state == PREALARM ) ) {
|
||||
if ( Event::PreAlarmCount() >= (alarm_frame_count-1) ) {
|
||||
Info( "%s: %03d - Gone into alarm state", name, image_count );
|
||||
|
@ -1484,16 +1413,18 @@ bool Monitor::Analyse() {
|
|||
// If analysis fps is set,
|
||||
// compute the index for pre event images in the dedicated buffer
|
||||
pre_index = image_count%pre_event_buffer_count;
|
||||
Debug(3, "Pre Index = (%d) = image_count(%d) %% pre_event_buffer_count (%d)", pre_index, image_count, pre_event_buffer_count );
|
||||
|
||||
// Seek forward the next filled slot in to the buffer (oldest data)
|
||||
// from the current position
|
||||
while ( pre_event_images && !pre_event_buffer[pre_index].timestamp->tv_sec ) {
|
||||
// ICON: I think this is supposed to handle when we havn't recorded enough images.
|
||||
while ( pre_event_images && !pre_event_buffer[pre_index].timestamp.tv_sec ) {
|
||||
pre_index = (pre_index + 1)%pre_event_buffer_count;
|
||||
// Slot is empty, removing image from counter
|
||||
pre_event_images--;
|
||||
}
|
||||
|
||||
event = new Event( this, *(pre_event_buffer[pre_index].timestamp), cause, noteSetMap );
|
||||
event = new Event( this, pre_event_buffer[pre_index].timestamp, cause, noteSetMap );
|
||||
} else {
|
||||
// If analysis fps is not set (analysis performed at capturing framerate),
|
||||
// compute the index for pre event images in the capturing buffer
|
||||
|
@ -1504,13 +1435,13 @@ bool Monitor::Analyse() {
|
|||
|
||||
// Seek forward the next filled slot in to the buffer (oldest data)
|
||||
// from the current position
|
||||
while ( pre_event_images && !image_buffer[pre_index].timestamp->tv_sec ) {
|
||||
while ( pre_event_images && !image_buffer[pre_index].timestamp.tv_sec ) {
|
||||
pre_index = (pre_index + 1)%image_buffer_count;
|
||||
// Slot is empty, removing image from counter
|
||||
pre_event_images--;
|
||||
}
|
||||
|
||||
event = new Event( this, *(image_buffer[pre_index].timestamp), cause, noteSetMap );
|
||||
event = new Event( this, image_buffer[pre_index].timestamp, cause, noteSetMap );
|
||||
}
|
||||
shared_data->last_event_id = event->Id();
|
||||
//set up video store data
|
||||
|
@ -1522,13 +1453,13 @@ bool Monitor::Analyse() {
|
|||
if ( pre_event_images ) {
|
||||
if ( analysis_fps ) {
|
||||
for ( int i = 0; i < pre_event_images; i++ ) {
|
||||
timestamps[i] = pre_event_buffer[pre_index].timestamp;
|
||||
timestamps[i] = &pre_event_buffer[pre_index].timestamp;
|
||||
images[i] = pre_event_buffer[pre_index].image;
|
||||
pre_index = (pre_index + 1)%pre_event_buffer_count;
|
||||
}
|
||||
} else {
|
||||
for ( int i = 0; i < pre_event_images; i++ ) {
|
||||
timestamps[i] = image_buffer[pre_index].timestamp;
|
||||
timestamps[i] = &image_buffer[pre_index].timestamp;
|
||||
images[i] = image_buffer[pre_index].image;
|
||||
pre_index = (pre_index + 1)%image_buffer_count;
|
||||
}
|
||||
|
@ -1596,12 +1527,14 @@ bool Monitor::Analyse() {
|
|||
if ( state == PREALARM )
|
||||
Event::AddPreAlarmFrame( snap_image, *timestamp, score, &alarm_image );
|
||||
else
|
||||
event->AddFrame( snap_image, *timestamp, score, &alarm_image );
|
||||
//event->AddFrame( snap_image, *timestamp, score, &alarm_image );
|
||||
event->AddPacket( snap, score, &alarm_image );
|
||||
} else {
|
||||
if ( state == PREALARM )
|
||||
Event::AddPreAlarmFrame( snap_image, *timestamp, score );
|
||||
else
|
||||
event->AddFrame( snap_image, *timestamp, score );
|
||||
//event->AddFrame( snap_image, *timestamp, score );
|
||||
event->AddPacket( snap, score );
|
||||
}
|
||||
} else {
|
||||
for( int i = 0; i < n_zones; i++ ) {
|
||||
|
@ -1614,7 +1547,8 @@ bool Monitor::Analyse() {
|
|||
if ( state == PREALARM )
|
||||
Event::AddPreAlarmFrame( snap_image, *timestamp, score );
|
||||
else
|
||||
event->AddFrame( snap_image, *timestamp, score );
|
||||
event->AddPacket( snap, score );
|
||||
//event->AddFrame( snap_image, *timestamp, score );
|
||||
}
|
||||
if ( event && noteSetMap.size() > 0 )
|
||||
event->updateNotes( noteSetMap );
|
||||
|
@ -1631,13 +1565,17 @@ bool Monitor::Analyse() {
|
|||
//}
|
||||
if ( !(image_count%(frame_skip+1)) ) {
|
||||
if ( config.bulk_frame_interval > 1 ) {
|
||||
event->AddFrame( snap_image, *timestamp, (event->Frames()<pre_event_count?0:-1) );
|
||||
event->AddPacket( snap, (event->Frames()<pre_event_count?0:-1) );
|
||||
//event->AddFrame( snap_image, *timestamp, (event->Frames()<pre_event_count?0:-1) );
|
||||
} else {
|
||||
event->AddFrame( snap_image, *timestamp );
|
||||
//event->AddFrame( snap_image, *timestamp );
|
||||
event->AddPacket( snap );
|
||||
}
|
||||
}
|
||||
}
|
||||
} // end if ! IDLE
|
||||
} else {
|
||||
Debug(3,"Not ready?");
|
||||
}
|
||||
} else {
|
||||
Debug(3, "trigger == off");
|
||||
|
@ -1660,17 +1598,19 @@ bool Monitor::Analyse() {
|
|||
} // end if Enabled()
|
||||
|
||||
shared_data->last_read_index = index % image_buffer_count;
|
||||
//shared_data->last_read_time = image_buffer[index].timestamp->tv_sec;
|
||||
shared_data->last_read_time = now.tv_sec;
|
||||
mutex.unlock();
|
||||
|
||||
if ( analysis_fps ) {
|
||||
// If analysis fps is set, add analysed image to dedicated pre event buffer
|
||||
int pre_index = image_count%pre_event_buffer_count;
|
||||
Debug(3,"analysis fps image_count(%d) pre_event_buffer_count(%d)", image_count, pre_event_buffer_count );
|
||||
int pre_index = pre_event_buffer_count ? image_count%pre_event_buffer_count : 0;
|
||||
Debug(3,"analysis fps pre_index(%d) = image_count(%d) %% pre_event_buffer_count(%d)", pre_index, image_count, pre_event_buffer_count );
|
||||
pre_event_buffer[pre_index].image->Assign(*snap->image);
|
||||
memcpy( pre_event_buffer[pre_index].timestamp, snap->timestamp, sizeof(struct timeval) );
|
||||
pre_event_buffer[pre_index].timestamp = snap->timestamp;
|
||||
}
|
||||
|
||||
image_count++;
|
||||
analysis_image_count++;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1850,7 +1790,7 @@ void Monitor::ReloadLinkedMonitors( const char *p_linked_monitors ) {
|
|||
|
||||
#if ZM_HAS_V4L
|
||||
int Monitor::LoadLocalMonitors( const char *device, Monitor **&monitors, Purpose purpose ) {
|
||||
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Device, Channel, Format, V4LMultiBuffer, V4LCapturesPerFrame, Method, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, SaveJPEGs, VideoWriter, EncoderParameters, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, SignalCheckColour, Exif from Monitors where Function != 'None' and Type = 'Local'";
|
||||
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Device, Channel, Format, V4LMultiBuffer, V4LCapturesPerFrame, Method, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, SaveJPEGs, VideoWriter, EncoderParameters, OutputCodec, OutputContainer, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, SignalCheckColour, Exif from Monitors where Function != 'None' and Type = 'Local'";
|
||||
;
|
||||
if ( device[0] ) {
|
||||
sql += " AND Device='";
|
||||
|
@ -1915,6 +1855,8 @@ int Monitor::LoadLocalMonitors( const char *device, Monitor **&monitors, Purpose
|
|||
int savejpegs = atoi(dbrow[col]); col++;
|
||||
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
||||
std::string encoderparams = dbrow[col] ? dbrow[col] : ""; col++;
|
||||
std::string output_codec = dbrow[col] ? dbrow[col] : ""; col++;
|
||||
std::string output_container = dbrow[col] ? dbrow[col] : ""; col++;
|
||||
bool record_audio = (*dbrow[col] != '0'); col++;
|
||||
|
||||
int brightness = atoi(dbrow[col]); col++;
|
||||
|
@ -1992,6 +1934,8 @@ int Monitor::LoadLocalMonitors( const char *device, Monitor **&monitors, Purpose
|
|||
savejpegs,
|
||||
videowriter,
|
||||
encoderparams,
|
||||
output_codec,
|
||||
output_container,
|
||||
record_audio,
|
||||
event_prefix,
|
||||
label_format,
|
||||
|
@ -2039,7 +1983,7 @@ int Monitor::LoadLocalMonitors( const char *device, Monitor **&monitors, Purpose
|
|||
#endif // ZM_HAS_V4L
|
||||
|
||||
int Monitor::LoadRemoteMonitors( const char *protocol, const char *host, const char *port, const char *path, Monitor **&monitors, Purpose purpose ) {
|
||||
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Protocol, Method, Host, Port, Path, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, RTSPDescribe, SaveJPEGs, VideoWriter, EncoderParameters, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, Exif from Monitors where Function != 'None' and Type = 'Remote'";
|
||||
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Protocol, Method, Host, Port, Path, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, RTSPDescribe, SaveJPEGs, VideoWriter, EncoderParameters, OutputCodec, OutputContainer, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, Exif from Monitors where Function != 'None' and Type = 'Remote'";
|
||||
if ( staticConfig.SERVER_ID ) {
|
||||
sql += stringtf( " AND ServerId=%d", staticConfig.SERVER_ID );
|
||||
}
|
||||
|
@ -2085,6 +2029,8 @@ int Monitor::LoadRemoteMonitors( const char *protocol, const char *host, const c
|
|||
int savejpegs = atoi(dbrow[col]); col++;
|
||||
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
||||
std::string encoderparams = dbrow[col] ? dbrow[col] : ""; col++;
|
||||
std::string output_codec = dbrow[col] ? dbrow[col] : ""; col++;
|
||||
std::string output_container = dbrow[col] ? dbrow[col] : ""; col++;
|
||||
bool record_audio = (*dbrow[col] != '0'); col++;
|
||||
|
||||
int brightness = atoi(dbrow[col]); col++;
|
||||
|
@ -2176,6 +2122,8 @@ int Monitor::LoadRemoteMonitors( const char *protocol, const char *host, const c
|
|||
savejpegs,
|
||||
videowriter,
|
||||
encoderparams,
|
||||
output_codec,
|
||||
output_container,
|
||||
record_audio,
|
||||
event_prefix,
|
||||
label_format,
|
||||
|
@ -2222,7 +2170,7 @@ int Monitor::LoadRemoteMonitors( const char *protocol, const char *host, const c
|
|||
}
|
||||
|
||||
int Monitor::LoadFileMonitors( const char *file, Monitor **&monitors, Purpose purpose ) {
|
||||
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Path, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, SaveJPEGs, VideoWriter, EncoderParameters, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, Exif from Monitors where Function != 'None' and Type = 'File'";
|
||||
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Path, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, SaveJPEGs, VideoWriter, EncoderParameters, OutputCodec, OutputContainer, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, Exif from Monitors where Function != 'None' and Type = 'File'";
|
||||
if ( file[0] ) {
|
||||
sql += " AND Path='";
|
||||
sql += file;
|
||||
|
@ -2264,6 +2212,8 @@ int Monitor::LoadFileMonitors( const char *file, Monitor **&monitors, Purpose pu
|
|||
int savejpegs = atoi(dbrow[col]); col++;
|
||||
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
||||
std::string encoderparams = dbrow[col]; col++;
|
||||
std::string output_codec = dbrow[col]; col++;
|
||||
std::string output_container = dbrow[col]; col++;
|
||||
bool record_audio = (*dbrow[col] != '0'); col++;
|
||||
|
||||
int brightness = atoi(dbrow[col]); col++;
|
||||
|
@ -2325,6 +2275,8 @@ int Monitor::LoadFileMonitors( const char *file, Monitor **&monitors, Purpose pu
|
|||
savejpegs,
|
||||
videowriter,
|
||||
encoderparams,
|
||||
output_codec,
|
||||
output_container,
|
||||
record_audio,
|
||||
event_prefix,
|
||||
label_format,
|
||||
|
@ -2372,7 +2324,7 @@ int Monitor::LoadFileMonitors( const char *file, Monitor **&monitors, Purpose pu
|
|||
|
||||
#if HAVE_LIBAVFORMAT
|
||||
int Monitor::LoadFfmpegMonitors( const char *file, Monitor **&monitors, Purpose purpose ) {
|
||||
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Path, Method, Options, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, SaveJPEGs, VideoWriter, EncoderParameters, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, Exif from Monitors where Function != 'None' and Type = 'Ffmpeg'";
|
||||
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Path, Method, Options, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, SaveJPEGs, VideoWriter, EncoderParameters, OutputCodec, OutputContainer, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, Exif from Monitors where Function != 'None' and Type = 'Ffmpeg'";
|
||||
if ( file[0] ) {
|
||||
sql += " AND Path = '";
|
||||
sql += file;
|
||||
|
@ -2417,6 +2369,8 @@ int Monitor::LoadFfmpegMonitors( const char *file, Monitor **&monitors, Purpose
|
|||
int savejpegs = atoi(dbrow[col]); col++;
|
||||
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
||||
std::string encoderparams = dbrow[col] ? dbrow[col] : ""; col++;
|
||||
std::string output_codec = dbrow[col] ? dbrow[col] : ""; col++;
|
||||
std::string output_container = dbrow[col] ? dbrow[col] : ""; col++;
|
||||
bool record_audio = (*dbrow[col] != '0'); col++;
|
||||
|
||||
int brightness = atoi(dbrow[col]); col++;
|
||||
|
@ -2484,6 +2438,8 @@ int Monitor::LoadFfmpegMonitors( const char *file, Monitor **&monitors, Purpose
|
|||
savejpegs,
|
||||
videowriter,
|
||||
encoderparams,
|
||||
output_codec,
|
||||
output_container,
|
||||
record_audio,
|
||||
event_prefix,
|
||||
label_format,
|
||||
|
@ -2532,7 +2488,7 @@ int Monitor::LoadFfmpegMonitors( const char *file, Monitor **&monitors, Purpose
|
|||
#endif // HAVE_LIBAVFORMAT
|
||||
|
||||
Monitor *Monitor::Load( unsigned int p_id, bool load_zones, Purpose purpose ) {
|
||||
std::string sql = stringtf( "select Id, Name, ServerId, StorageId, Type, Function+0, Enabled, LinkedMonitors, Device, Channel, Format, V4LMultiBuffer, V4LCapturesPerFrame, Protocol, Method, Host, Port, Path, Options, User, Pass, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, RTSPDescribe, SaveJPEGs, VideoWriter, EncoderParameters, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, SignalCheckColour, Exif from Monitors where Id = %d", p_id );
|
||||
std::string sql = stringtf( "select Id, Name, ServerId, StorageId, Type, Function+0, Enabled, LinkedMonitors, Device, Channel, Format, V4LMultiBuffer, V4LCapturesPerFrame, Protocol, Method, Host, Port, Path, Options, User, Pass, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, RTSPDescribe, SaveJPEGs, VideoWriter, EncoderParameters, OutputCodec, OutputContainer, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, SignalCheckColour, Exif from Monitors where Id = %d", p_id );
|
||||
|
||||
zmDbRow dbrow;
|
||||
if ( ! dbrow.fetch( sql.c_str() ) ) {
|
||||
|
@ -2592,7 +2548,9 @@ Monitor *Monitor::Load( unsigned int p_id, bool load_zones, Purpose purpose ) {
|
|||
bool rtsp_describe = (dbrow[col] && *dbrow[col] != '0'); col++;
|
||||
int savejpegs = atoi(dbrow[col]); col++;
|
||||
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
||||
std::string encoderparams = dbrow[col] ? dbrow[col] : ""; col++;
|
||||
std::string encoderparams = dbrow[col] ? dbrow[col] : ""; col++;
|
||||
std::string output_codec = dbrow[col] ? dbrow[col] : ""; col++;
|
||||
std::string output_container = dbrow[col] ? dbrow[col] : ""; col++;
|
||||
bool record_audio = (*dbrow[col] != '0'); col++;
|
||||
|
||||
int brightness = atoi(dbrow[col]); col++;
|
||||
|
@ -2811,6 +2769,8 @@ Monitor *Monitor::Load( unsigned int p_id, bool load_zones, Purpose purpose ) {
|
|||
savejpegs,
|
||||
videowriter,
|
||||
encoderparams,
|
||||
output_codec,
|
||||
output_container,
|
||||
record_audio,
|
||||
event_prefix,
|
||||
label_format,
|
||||
|
@ -2858,12 +2818,14 @@ Monitor *Monitor::Load( unsigned int p_id, bool load_zones, Purpose purpose ) {
|
|||
* Returns -1 on failure.
|
||||
*/
|
||||
int Monitor::Capture() {
|
||||
mutex.lock();
|
||||
static int FirstCapture = 1; // Used in de-interlacing to indicate whether this is the even or odd image
|
||||
|
||||
unsigned int index = image_count % image_buffer_count;
|
||||
Image* capture_image = image_buffer[index].image;
|
||||
ZMPacket packet;
|
||||
packet.set_image(capture_image);
|
||||
ZMPacket *packet = &image_buffer[index];
|
||||
// clears frame
|
||||
packet->reset();
|
||||
int captureResult = 0;
|
||||
|
||||
unsigned int deinterlacing_value = deinterlacing & 0xff;
|
||||
|
@ -2872,16 +2834,17 @@ int Monitor::Capture() {
|
|||
/* Copy the next image into the shared memory */
|
||||
capture_image->CopyBuffer(*(next_buffer.image));
|
||||
}
|
||||
|
||||
/* Capture a new next image */
|
||||
captureResult = camera->Capture(packet);
|
||||
captureResult = camera->Capture(*packet);
|
||||
gettimeofday( &packet->timestamp, NULL );
|
||||
|
||||
if ( FirstCapture ) {
|
||||
FirstCapture = 0;
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
captureResult = camera->Capture(packet);
|
||||
captureResult = camera->Capture(*packet);
|
||||
gettimeofday( &packet->timestamp, NULL );
|
||||
if ( captureResult < 0 ) {
|
||||
// Unable to capture image for temporary reason
|
||||
// Fake a signal loss image
|
||||
|
@ -2928,7 +2891,7 @@ int Monitor::Capture() {
|
|||
if ( (index == shared_data->last_read_index) && (function > MONITOR) ) {
|
||||
Warning( "Buffer overrun at index %d, image %d, slow down capture, speed up analysis or increase ring buffer size", index, image_count );
|
||||
time_t now = time(0);
|
||||
double approxFps = double(image_buffer_count)/double(now-image_buffer[index].timestamp->tv_sec);
|
||||
double approxFps = double(image_buffer_count)/double(now-image_buffer[index].timestamp.tv_sec);
|
||||
time_t last_read_delta = now - shared_data->last_read_time;
|
||||
if ( last_read_delta > (image_buffer_count/approxFps) ) {
|
||||
Warning( "Last image read from shared memory %ld seconds ago, zma may have gone away", last_read_delta )
|
||||
|
@ -2939,44 +2902,96 @@ int Monitor::Capture() {
|
|||
if ( privacy_bitmask )
|
||||
capture_image->MaskPrivacy( privacy_bitmask );
|
||||
|
||||
//gettimeofday( image_buffer[index].timestamp, NULL );
|
||||
if ( config.timestamp_on_capture ) {
|
||||
TimestampImage( capture_image, &packet.timestamp );
|
||||
TimestampImage( capture_image, &packet->timestamp );
|
||||
}
|
||||
|
||||
int video_stream_id = camera->get_VideoStreamId();
|
||||
|
||||
//packetqueue.clear_unwanted_packets(&video_store_data->recording, video_stream_id);
|
||||
//videoStore->write_packets(packetqueue);
|
||||
if ( ! event ) {
|
||||
#if 0
|
||||
//Video recording
|
||||
if ( video_store_data->recording.tv_sec ) {
|
||||
if ( shared_data->last_event_id != this->GetVideoWriterEventId() ) {
|
||||
Debug(2, "Have change of event. last_event(%d), our current (%d)",
|
||||
shared_data->last_event_id,
|
||||
this->GetVideoWriterEventId()
|
||||
);
|
||||
if ( videoStore ) {
|
||||
Debug(2, "Have videostore already?");
|
||||
// I don't know if this is important or not... but I figure we might as well write this last packet out to the store before closing it.
|
||||
// Also don't know how much it matters for audio.
|
||||
int ret = videoStore->writePacket( packet );
|
||||
if ( ret < 0 ) { //Less than zero and we skipped a frame
|
||||
Warning("Error writing last packet to videostore.");
|
||||
}
|
||||
|
||||
delete videoStore;
|
||||
videoStore = NULL;
|
||||
this->SetVideoWriterEventId( 0 );
|
||||
} // end if videoStore
|
||||
} // end if end of recording
|
||||
|
||||
if ( shared_data->last_event_id and ! videoStore ) {
|
||||
Debug(2,"New videostore");
|
||||
videoStore = new VideoStore(
|
||||
(const char *) video_store_data->event_file,
|
||||
"mp4",
|
||||
camera->get_VideoStream(),
|
||||
( record_audio ? camera->get_AudioStream() : NULL ),
|
||||
video_store_data->recording.tv_sec,
|
||||
this );
|
||||
|
||||
if ( ! videoStore->open() ) {
|
||||
delete videoStore;
|
||||
videoStore = NULL;
|
||||
} else {
|
||||
this->SetVideoWriterEventId(shared_data->last_event_id);
|
||||
|
||||
Debug(2, "Clearing packets");
|
||||
// Clear all packets that predate the moment when the recording began
|
||||
packetqueue.clear_unwanted_packets(&video_store_data->recording, video_stream_id);
|
||||
videoStore->write_packets(packetqueue);
|
||||
} // success opening
|
||||
} // end if ! was recording
|
||||
} else { // Not recording
|
||||
if ( videoStore ) {
|
||||
Info("Deleting videoStore instance");
|
||||
delete videoStore;
|
||||
videoStore = NULL;
|
||||
this->SetVideoWriterEventId( 0 );
|
||||
}
|
||||
|
||||
// Buffer video packets, since we are not recording.
|
||||
// All audio packets are keyframes, so only if it's a video keyframe
|
||||
if ( ( packet.packet.stream_index == video_stream_id ) && ( packet.keyframe ) ) {
|
||||
if ( ( packet->packet.stream_index == video_stream_id ) && ( packet->keyframe ) ) {
|
||||
packetqueue.clearQueue( this->GetPreEventCount(), video_stream_id );
|
||||
}
|
||||
// The following lines should ensure that the queue always begins with a video keyframe
|
||||
if ( packet.packet.stream_index == camera->get_AudioStreamId() ) {
|
||||
if ( packet->packet.stream_index == camera->get_AudioStreamId() ) {
|
||||
//Debug(2, "Have audio packet, reocrd_audio is (%d) and packetqueue.size is (%d)", record_audio, packetqueue.size() );
|
||||
if ( record_audio && packetqueue.size() ) {
|
||||
// if it's audio, and we are doing audio, and there is already something in the queue
|
||||
packetqueue.queuePacket( &packet );
|
||||
packetqueue.queuePacket( packet );
|
||||
}
|
||||
} else if ( packet.packet.stream_index == video_stream_id ) {
|
||||
if ( packet.keyframe || packetqueue.size() ) // it's a keyframe or we already have something in the queue
|
||||
packetqueue.queuePacket( &packet );
|
||||
} else if ( packet->packet.stream_index == video_stream_id ) {
|
||||
if ( packet->keyframe || packetqueue.size() ) // it's a keyframe or we already have something in the queue
|
||||
packetqueue.queuePacket( packet );
|
||||
} // end if audio or video
|
||||
} else {
|
||||
} // end if recording or not
|
||||
|
||||
if ( videoStore ) {
|
||||
//Write the packet to our video store, it will be smart enough to know what to do
|
||||
if ( ! event->WritePacket( packet ) ) {
|
||||
int ret = videoStore->writePacket( packet );
|
||||
if ( ret < 0 ) { //Less than zero and we skipped a frame
|
||||
Warning("problem writing packet");
|
||||
}
|
||||
} // end if recording or not
|
||||
}
|
||||
#endif
|
||||
} // end if deinterlacing
|
||||
|
||||
shared_data->signal = CheckSignal(capture_image);
|
||||
shared_data->last_write_index = index;
|
||||
shared_data->last_write_time = image_buffer[index].timestamp->tv_sec;
|
||||
|
||||
shared_data->last_write_time = image_buffer[index].timestamp.tv_sec;
|
||||
mutex.unlock();
|
||||
image_count++;
|
||||
|
||||
if ( image_count && fps_report_interval && !(image_count%fps_report_interval) ) {
|
||||
|
@ -2984,7 +2999,7 @@ int Monitor::Capture() {
|
|||
if ( !captureResult ) {
|
||||
gettimeofday( &now, NULL );
|
||||
} else {
|
||||
now.tv_sec = image_buffer[index].timestamp->tv_sec;
|
||||
now.tv_sec = image_buffer[index].timestamp.tv_sec;
|
||||
}
|
||||
|
||||
// If we are too fast, we get div by zero. This seems to happen in the case of audio packets.
|
||||
|
@ -3018,7 +3033,7 @@ int Monitor::Capture() {
|
|||
shared_data->action &= ~SET_SETTINGS;
|
||||
}
|
||||
return captureResult;
|
||||
}
|
||||
} // end Monitor::Capture
|
||||
|
||||
void Monitor::TimestampImage( Image *ts_image, const struct timeval *ts_time ) const {
|
||||
if ( label_format[0] ) {
|
||||
|
@ -3305,6 +3320,19 @@ int Monitor::PostCapture() {
|
|||
}
|
||||
Monitor::Orientation Monitor::getOrientation() const { return orientation; }
|
||||
|
||||
Monitor::Snapshot *Monitor::getSnapshot() {
|
||||
ZMPacket *Monitor::getSnapshot() {
|
||||
return &image_buffer[ shared_data->last_write_index%image_buffer_count ];
|
||||
}
|
||||
|
||||
// Wait for camera to get an image, and then assign it as the base reference image. So this should be done as the first task in the analysis thread startup.
|
||||
void Monitor::get_ref_image() {
|
||||
while (
|
||||
( shared_data->last_write_index == (unsigned int)image_buffer_count )
|
||||
&&
|
||||
( shared_data->last_write_time == 0 )
|
||||
) {
|
||||
Warning( "Waiting for capture daemon" );
|
||||
usleep( 100000 );
|
||||
}
|
||||
ref_image.Assign( width, height, camera->Colours(), camera->SubpixelOrder(), image_buffer[shared_data->last_write_index].image->Buffer(), camera->ImageSize());
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "zm_event.h"
|
||||
#include "zm_videostore.h"
|
||||
#include "zm_packetqueue.h"
|
||||
#include "zm_thread.h"
|
||||
|
||||
class Monitor;
|
||||
#include "zm_camera.h"
|
||||
|
@ -153,13 +154,6 @@ protected:
|
|||
char trigger_showtext[256];
|
||||
} TriggerData;
|
||||
|
||||
/* sizeof(Snapshot) expected to be 16 bytes on 32bit and 32 bytes on 64bit */
|
||||
struct Snapshot {
|
||||
struct timeval *timestamp;
|
||||
Image *image;
|
||||
void* padding;
|
||||
};
|
||||
|
||||
//TODO: Technically we can't exclude this struct when people don't have avformat as the Memory.pm module doesn't know about avformat
|
||||
//sizeOf(VideoStoreData) expected to be 4104 bytes on 32bit and 64bit
|
||||
typedef struct {
|
||||
|
@ -167,11 +161,13 @@ protected:
|
|||
uint32_t current_event;
|
||||
char event_file[4096];
|
||||
timeval recording; // used as both bool and a pointer to the timestamp when recording should begin
|
||||
//uint32_t frameNumber;
|
||||
} VideoStoreData;
|
||||
|
||||
VideoStore *videoStore;
|
||||
zm_packetqueue packetqueue;
|
||||
Mutex mutex;
|
||||
std::string output_codec;
|
||||
std::string output_container;
|
||||
|
||||
class MonitorLink {
|
||||
protected:
|
||||
|
@ -197,7 +193,6 @@ protected:
|
|||
int last_state;
|
||||
int last_event_id;
|
||||
|
||||
|
||||
public:
|
||||
MonitorLink( int p_id, const char *p_name );
|
||||
~MonitorLink();
|
||||
|
@ -286,6 +281,7 @@ protected:
|
|||
Purpose purpose; // What this monitor has been created to do
|
||||
int event_count;
|
||||
int image_count;
|
||||
int analysis_image_count;
|
||||
int ready_count;
|
||||
int first_alarm_count;
|
||||
int last_alarm_count;
|
||||
|
@ -294,6 +290,7 @@ protected:
|
|||
State state;
|
||||
time_t start_time;
|
||||
time_t last_fps_time;
|
||||
time_t last_analysis_fps_time;
|
||||
time_t auto_resume_time;
|
||||
unsigned int last_motion_score;
|
||||
|
||||
|
@ -313,9 +310,9 @@ protected:
|
|||
TriggerData *trigger_data;
|
||||
VideoStoreData *video_store_data;
|
||||
|
||||
Snapshot *image_buffer;
|
||||
Snapshot next_buffer; /* Used by four field deinterlacing */
|
||||
Snapshot *pre_event_buffer;
|
||||
ZMPacket *image_buffer;
|
||||
ZMPacket next_buffer; /* Used by four field deinterlacing */
|
||||
ZMPacket *pre_event_buffer;
|
||||
|
||||
Camera *camera;
|
||||
|
||||
|
@ -351,6 +348,8 @@ public:
|
|||
int p_savejpegs,
|
||||
VideoWriter p_videowriter,
|
||||
std::string p_encoderparams,
|
||||
std::string p_output_codec,
|
||||
std::string p_output_container,
|
||||
bool p_record_audio,
|
||||
const char *p_event_prefix,
|
||||
const char *p_label_format,
|
||||
|
@ -442,6 +441,8 @@ public:
|
|||
VideoWriter GetOptVideoWriter() const { return( videowriter ); }
|
||||
const std::vector<EncoderParameter_t>* GetOptEncoderParams() const { return( &encoderparamsvec ); }
|
||||
const std::string &GetEncoderOptions() const { return( encoderparams ); }
|
||||
const std::string &OutputCodec() const { return output_codec; }
|
||||
const std::string &OutputContainer() const { return output_container; }
|
||||
|
||||
uint32_t GetLastEventId() const { return shared_data->last_event_id; }
|
||||
uint32_t GetVideoWriterEventId() const { return video_store_data->current_event; }
|
||||
|
@ -450,7 +451,7 @@ public:
|
|||
unsigned int GetPreEventCount() const { return pre_event_count; };
|
||||
State GetState() const;
|
||||
int GetImage( int index=-1, int scale=100 );
|
||||
Snapshot *getSnapshot();
|
||||
ZMPacket *getSnapshot();
|
||||
struct timeval GetTimestamp( int index=-1 ) const;
|
||||
void UpdateAdaptiveSkip();
|
||||
useconds_t GetAnalysisRate();
|
||||
|
@ -471,6 +472,7 @@ public:
|
|||
inline void setStartupTime( time_t p_time ) {
|
||||
shared_data->startup_time = p_time;
|
||||
}
|
||||
void get_ref_image();
|
||||
|
||||
void actionReload();
|
||||
void actionEnable();
|
||||
|
|
|
@ -665,13 +665,13 @@ Debug(2, "Have checking command Queue for connkey: %d", connkey );
|
|||
if ( (frame_mod == 1) || ((frame_count%frame_mod) == 0) ) {
|
||||
if ( !paused && !delayed ) {
|
||||
// Send the next frame
|
||||
Monitor::Snapshot *snap = &monitor->image_buffer[index];
|
||||
ZMPacket *snap = &monitor->image_buffer[index];
|
||||
|
||||
if ( !sendFrame( snap->image, snap->timestamp ) ) {
|
||||
if ( !sendFrame( snap->image, &snap->timestamp ) ) {
|
||||
Debug(2, "sendFrame failed, quiting.");
|
||||
zm_terminate = true;
|
||||
}
|
||||
memcpy( &last_frame_timestamp, snap->timestamp, sizeof(last_frame_timestamp) );
|
||||
last_frame_timestamp = snap->timestamp;
|
||||
//frame_sent = true;
|
||||
|
||||
temp_read_index = temp_write_index;
|
||||
|
@ -679,14 +679,14 @@ Debug(2, "Have checking command Queue for connkey: %d", connkey );
|
|||
}
|
||||
if ( buffered_playback ) {
|
||||
if ( monitor->shared_data->valid ) {
|
||||
if ( monitor->image_buffer[index].timestamp->tv_sec ) {
|
||||
if ( monitor->image_buffer[index].timestamp.tv_sec ) {
|
||||
int temp_index = temp_write_index%temp_image_buffer_count;
|
||||
Debug( 2, "Storing frame %d", temp_index );
|
||||
if ( !temp_image_buffer[temp_index].valid ) {
|
||||
snprintf( temp_image_buffer[temp_index].file_name, sizeof(temp_image_buffer[0].file_name), "%s/zmswap-i%05d.jpg", swap_path, temp_index );
|
||||
temp_image_buffer[temp_index].valid = true;
|
||||
}
|
||||
memcpy( &(temp_image_buffer[temp_index].timestamp), monitor->image_buffer[index].timestamp, sizeof(temp_image_buffer[0].timestamp) );
|
||||
temp_image_buffer[temp_index].timestamp = monitor->image_buffer[index].timestamp;
|
||||
monitor->image_buffer[index].image->WriteJpeg( temp_image_buffer[temp_index].file_name, config.jpeg_file_quality );
|
||||
temp_write_index = MOD_ADD( temp_write_index, 1, temp_image_buffer_count );
|
||||
if ( temp_write_index == temp_read_index ) {
|
||||
|
@ -764,7 +764,7 @@ void MonitorStream::SingleImage( int scale ) {
|
|||
int img_buffer_size = 0;
|
||||
static JOCTET img_buffer[ZM_MAX_IMAGE_SIZE];
|
||||
Image scaled_image;
|
||||
Monitor::Snapshot *snap = monitor->getSnapshot();
|
||||
ZMPacket *snap = monitor->getSnapshot();
|
||||
Image *snap_image = snap->image;
|
||||
|
||||
if ( scale != ZM_SCALE_BASE ) {
|
||||
|
@ -773,7 +773,7 @@ void MonitorStream::SingleImage( int scale ) {
|
|||
snap_image = &scaled_image;
|
||||
}
|
||||
if ( !config.timestamp_on_capture ) {
|
||||
monitor->TimestampImage( snap_image, snap->timestamp );
|
||||
monitor->TimestampImage( snap_image, &snap->timestamp );
|
||||
}
|
||||
snap_image->EncodeJpeg( img_buffer, &img_buffer_size );
|
||||
|
||||
|
@ -784,7 +784,7 @@ void MonitorStream::SingleImage( int scale ) {
|
|||
|
||||
void MonitorStream::SingleImageRaw( int scale ) {
|
||||
Image scaled_image;
|
||||
Monitor::Snapshot *snap = monitor->getSnapshot();
|
||||
ZMPacket *snap = monitor->getSnapshot();
|
||||
Image *snap_image = snap->image;
|
||||
|
||||
if ( scale != ZM_SCALE_BASE ) {
|
||||
|
@ -793,7 +793,7 @@ void MonitorStream::SingleImageRaw( int scale ) {
|
|||
snap_image = &scaled_image;
|
||||
}
|
||||
if ( !config.timestamp_on_capture ) {
|
||||
monitor->TimestampImage( snap_image, snap->timestamp );
|
||||
monitor->TimestampImage( snap_image, &snap->timestamp );
|
||||
}
|
||||
|
||||
fprintf( stdout, "Content-Length: %d\r\n", snap_image->Size() );
|
||||
|
@ -806,7 +806,7 @@ void MonitorStream::SingleImageZip( int scale ) {
|
|||
static Bytef img_buffer[ZM_MAX_IMAGE_SIZE];
|
||||
Image scaled_image;
|
||||
|
||||
Monitor::Snapshot *snap = monitor->getSnapshot();
|
||||
ZMPacket *snap = monitor->getSnapshot();
|
||||
Image *snap_image = snap->image;
|
||||
|
||||
if ( scale != ZM_SCALE_BASE ) {
|
||||
|
@ -815,7 +815,7 @@ void MonitorStream::SingleImageZip( int scale ) {
|
|||
snap_image = &scaled_image;
|
||||
}
|
||||
if ( !config.timestamp_on_capture ) {
|
||||
monitor->TimestampImage( snap_image, snap->timestamp );
|
||||
monitor->TimestampImage( snap_image, &snap->timestamp );
|
||||
}
|
||||
snap_image->Zip( img_buffer, &img_buffer_size );
|
||||
|
||||
|
|
137
src/zm_mpeg.cpp
137
src/zm_mpeg.cpp
|
@ -117,10 +117,10 @@ void VideoStream::SetupFormat( ) {
|
|||
|
||||
void VideoStream::SetupCodec( int colours, int subpixelorder, int width, int height, int bitrate, double frame_rate ) {
|
||||
/* ffmpeg format matching */
|
||||
switch(colours) {
|
||||
switch ( colours ) {
|
||||
case ZM_COLOUR_RGB24:
|
||||
{
|
||||
if(subpixelorder == ZM_SUBPIX_ORDER_BGR) {
|
||||
if ( subpixelorder == ZM_SUBPIX_ORDER_BGR ) {
|
||||
/* BGR subpixel order */
|
||||
pf = AV_PIX_FMT_BGR24;
|
||||
} else {
|
||||
|
@ -211,39 +211,52 @@ void VideoStream::SetupCodec( int colours, int subpixelorder, int width, int hei
|
|||
|
||||
Debug( 1, "Allocated stream" );
|
||||
|
||||
AVCodecContext *c = ost->codec;
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
codec_context = avcodec_alloc_context3(NULL);
|
||||
avcodec_parameters_to_context(codec_context, ost->codecpar);
|
||||
#else
|
||||
codec_context = ost->codec;
|
||||
#endif
|
||||
|
||||
c->codec_id = codec->id;
|
||||
c->codec_type = codec->type;
|
||||
codec_context->codec_id = codec->id;
|
||||
codec_context->codec_type = codec->type;
|
||||
|
||||
c->pix_fmt = strcmp( "mjpeg", ofc->oformat->name ) == 0 ? AV_PIX_FMT_YUVJ422P : AV_PIX_FMT_YUV420P;
|
||||
codec_context->pix_fmt = strcmp( "mjpeg", ofc->oformat->name ) == 0 ? AV_PIX_FMT_YUVJ422P : AV_PIX_FMT_YUV420P;
|
||||
if ( bitrate <= 100 ) {
|
||||
// Quality based bitrate control (VBR). Scale is 1..31 where 1 is best.
|
||||
// This gets rid of artifacts in the beginning of the movie; and well, even quality.
|
||||
c->flags |= CODEC_FLAG_QSCALE;
|
||||
c->global_quality = FF_QP2LAMBDA * (31 - (31 * (bitrate / 100.0)));
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
codec_context->flags |= AV_CODEC_FLAG_QSCALE;
|
||||
#else
|
||||
codec_context->flags |= CODEC_FLAG_QSCALE;
|
||||
#endif
|
||||
codec_context->global_quality = FF_QP2LAMBDA * (31 - (31 * (bitrate / 100.0)));
|
||||
} else {
|
||||
c->bit_rate = bitrate;
|
||||
codec_context->bit_rate = bitrate;
|
||||
}
|
||||
|
||||
/* resolution must be a multiple of two */
|
||||
c->width = width;
|
||||
c->height = height;
|
||||
codec_context->width = width;
|
||||
codec_context->height = height;
|
||||
/* time base: this is the fundamental unit of time (in seconds) in terms
|
||||
of which frame timestamps are represented. for fixed-fps content,
|
||||
timebase should be 1/framerate and timestamp increments should be
|
||||
identically 1. */
|
||||
c->time_base.den = frame_rate;
|
||||
c->time_base.num = 1;
|
||||
codec_context->time_base.den = frame_rate;
|
||||
codec_context->time_base.num = 1;
|
||||
|
||||
Debug( 1, "Will encode in %d fps.", c->time_base.den );
|
||||
Debug( 1, "Will encode in %d fps.", codec_context->time_base.den );
|
||||
|
||||
/* emit one intra frame every second */
|
||||
c->gop_size = frame_rate;
|
||||
codec_context->gop_size = frame_rate;
|
||||
|
||||
// some formats want stream headers to be separate
|
||||
if ( of->flags & AVFMT_GLOBALHEADER )
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
|
||||
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
#else
|
||||
codec_context->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
#endif
|
||||
} else {
|
||||
Fatal( "of->video_codec == AV_CODEC_ID_NONE" );
|
||||
}
|
||||
|
@ -278,13 +291,11 @@ void VideoStream::OpenStream( ) {
|
|||
/* now that all the parameters are set, we can open the
|
||||
video codecs and allocate the necessary encode buffers */
|
||||
if ( ost ) {
|
||||
AVCodecContext *c = ost->codec;
|
||||
|
||||
/* open the codec */
|
||||
#if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0)
|
||||
if ( (avRet = avcodec_open( c, codec )) < 0 )
|
||||
if ( (avRet = avcodec_open( codec_context, codec )) < 0 )
|
||||
#else
|
||||
if ( (avRet = avcodec_open2( c, codec, 0 )) < 0 )
|
||||
if ( (avRet = avcodec_open2( codec_context, codec, 0 )) < 0 )
|
||||
#endif
|
||||
{
|
||||
Fatal( "Could not open codec. Error code %d \"%s\"", avRet, av_err2str( avRet ) );
|
||||
|
@ -293,19 +304,15 @@ void VideoStream::OpenStream( ) {
|
|||
Debug( 1, "Opened codec" );
|
||||
|
||||
/* allocate the encoded raw picture */
|
||||
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
|
||||
opicture = av_frame_alloc( );
|
||||
#else
|
||||
opicture = avcodec_alloc_frame( );
|
||||
#endif
|
||||
opicture = zm_av_frame_alloc( );
|
||||
if ( !opicture ) {
|
||||
Panic( "Could not allocate opicture" );
|
||||
}
|
||||
|
||||
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
|
||||
int size = av_image_get_buffer_size( c->pix_fmt, c->width, c->height, 1 );
|
||||
int size = av_image_get_buffer_size( codec_context->pix_fmt, codec_context->width, codec_context->height, 1 );
|
||||
#else
|
||||
int size = avpicture_get_size( c->pix_fmt, c->width, c->height );
|
||||
int size = avpicture_get_size( codec_context->pix_fmt, codec_context->width, codec_context->height );
|
||||
#endif
|
||||
|
||||
uint8_t *opicture_buf = (uint8_t *)av_malloc( size );
|
||||
|
@ -315,17 +322,17 @@ void VideoStream::OpenStream( ) {
|
|||
}
|
||||
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
|
||||
av_image_fill_arrays(opicture->data, opicture->linesize,
|
||||
opicture_buf, c->pix_fmt, c->width, c->height, 1);
|
||||
opicture_buf, codec_context->pix_fmt, codec_context->width, codec_context->height, 1);
|
||||
#else
|
||||
avpicture_fill( (AVPicture *)opicture, opicture_buf, c->pix_fmt,
|
||||
c->width, c->height );
|
||||
avpicture_fill( (AVPicture *)opicture, opicture_buf, codec_context->pix_fmt,
|
||||
codec_context->width, codec_context->height );
|
||||
#endif
|
||||
|
||||
/* if the output format is not identical to the input format, then a temporary
|
||||
picture is needed too. It is then converted to the required
|
||||
output format */
|
||||
tmp_opicture = NULL;
|
||||
if ( c->pix_fmt != pf ) {
|
||||
if ( codec_context->pix_fmt != pf ) {
|
||||
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
|
||||
tmp_opicture = av_frame_alloc( );
|
||||
#else
|
||||
|
@ -335,9 +342,9 @@ void VideoStream::OpenStream( ) {
|
|||
Panic( "Could not allocate tmp_opicture" );
|
||||
}
|
||||
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
|
||||
int size = av_image_get_buffer_size( pf, c->width, c->height,1 );
|
||||
int size = av_image_get_buffer_size( pf, codec_context->width, codec_context->height,1 );
|
||||
#else
|
||||
int size = avpicture_get_size( pf, c->width, c->height );
|
||||
int size = avpicture_get_size( pf, codec_context->width, codec_context->height );
|
||||
#endif
|
||||
uint8_t *tmp_opicture_buf = (uint8_t *)av_malloc( size );
|
||||
if ( !tmp_opicture_buf ) {
|
||||
|
@ -347,10 +354,10 @@ void VideoStream::OpenStream( ) {
|
|||
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
|
||||
av_image_fill_arrays(tmp_opicture->data,
|
||||
tmp_opicture->linesize, tmp_opicture_buf, pf,
|
||||
c->width, c->height, 1);
|
||||
codec_context->width, codec_context->height, 1);
|
||||
#else
|
||||
avpicture_fill( (AVPicture *)tmp_opicture,
|
||||
tmp_opicture_buf, pf, c->width, c->height );
|
||||
tmp_opicture_buf, pf, codec_context->width, codec_context->height );
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -375,7 +382,12 @@ void VideoStream::OpenStream( ) {
|
|||
}
|
||||
|
||||
video_outbuf = NULL;
|
||||
#if LIBAVFORMAT_VERSION_CHECK(57, 0, 0, 0, 0)
|
||||
if (codec_context->codec_type == AVMEDIA_TYPE_VIDEO &&
|
||||
codec_context->codec_id == AV_CODEC_ID_RAWVIDEO) {
|
||||
#else
|
||||
if ( !(of->flags & AVFMT_RAWPICTURE) ) {
|
||||
#endif
|
||||
/* allocate output buffer */
|
||||
/* XXX: API change will be done */
|
||||
// TODO: Make buffer dynamic.
|
||||
|
@ -446,6 +458,8 @@ VideoStream::VideoStream( const char *in_filename, const char *in_format, int bi
|
|||
if ( pthread_mutex_init( buffer_copy_lock, NULL ) != 0 ) {
|
||||
Fatal("pthread_mutex_init failed");
|
||||
}
|
||||
|
||||
codec_context = NULL;
|
||||
}
|
||||
|
||||
VideoStream::~VideoStream( ) {
|
||||
|
@ -481,7 +495,7 @@ VideoStream::~VideoStream( ) {
|
|||
|
||||
/* close each codec */
|
||||
if ( ost ) {
|
||||
avcodec_close( ost->codec );
|
||||
avcodec_close( codec_context );
|
||||
av_free( opicture->data[0] );
|
||||
av_frame_free( &opicture );
|
||||
if ( tmp_opicture ) {
|
||||
|
@ -564,17 +578,15 @@ double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size,
|
|||
static struct SwsContext *img_convert_ctx = 0;
|
||||
#endif // HAVE_LIBSWSCALE
|
||||
|
||||
AVCodecContext *c = ost->codec;
|
||||
|
||||
if ( c->pix_fmt != pf ) {
|
||||
if ( codec_context->pix_fmt != pf ) {
|
||||
memcpy( tmp_opicture->data[0], buffer, buffer_size );
|
||||
#ifdef HAVE_LIBSWSCALE
|
||||
if ( !img_convert_ctx ) {
|
||||
img_convert_ctx = sws_getCachedContext( NULL, c->width, c->height, pf, c->width, c->height, c->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
|
||||
img_convert_ctx = sws_getCachedContext( NULL, codec_context->width, codec_context->height, pf, codec_context->width, codec_context->height, codec_context->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
|
||||
if ( !img_convert_ctx )
|
||||
Panic( "Unable to initialise image scaling context" );
|
||||
}
|
||||
sws_scale( img_convert_ctx, tmp_opicture->data, tmp_opicture->linesize, 0, c->height, opicture->data, opicture->linesize );
|
||||
sws_scale( img_convert_ctx, tmp_opicture->data, tmp_opicture->linesize, 0, codec_context->height, opicture->data, opicture->linesize );
|
||||
#else // HAVE_LIBSWSCALE
|
||||
Fatal( "swscale is required for MPEG mode" );
|
||||
#endif // HAVE_LIBSWSCALE
|
||||
|
@ -586,7 +598,13 @@ double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size,
|
|||
AVPacket *pkt = packet_buffers[packet_index];
|
||||
av_init_packet( pkt );
|
||||
int got_packet = 0;
|
||||
#if LIBAVFORMAT_VERSION_CHECK(57, 0, 0, 0, 0)
|
||||
if (codec_context->codec_type == AVMEDIA_TYPE_VIDEO &&
|
||||
codec_context->codec_id == AV_CODEC_ID_RAWVIDEO) {
|
||||
#else
|
||||
if ( of->flags & AVFMT_RAWPICTURE ) {
|
||||
#endif
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(52, 30, 2, 30, 2)
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
#else
|
||||
|
@ -597,19 +615,34 @@ double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size,
|
|||
pkt->size = sizeof (AVPicture);
|
||||
got_packet = 1;
|
||||
} else {
|
||||
opicture_ptr->pts = c->frame_number;
|
||||
opicture_ptr->quality = c->global_quality;
|
||||
opicture_ptr->pts = codec_context->frame_number;
|
||||
opicture_ptr->quality = codec_context->global_quality;
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
// Put encoder into flushing mode
|
||||
avcodec_send_frame(codec_context, opicture_ptr);
|
||||
int ret = avcodec_receive_packet(codec_context, pkt);
|
||||
if ( ret < 0 ) {
|
||||
if ( AVERROR_EOF != ret ) {
|
||||
Error("ERror encoding video (%d) (%s)", ret,
|
||||
av_err2str(ret));
|
||||
}
|
||||
} else {
|
||||
got_packet = 1;
|
||||
}
|
||||
#else
|
||||
|
||||
#if LIBAVFORMAT_VERSION_CHECK(54, 1, 0, 2, 100)
|
||||
int ret = avcodec_encode_video2( c, pkt, opicture_ptr, &got_packet );
|
||||
int ret = avcodec_encode_video2( codec_context, pkt, opicture_ptr, &got_packet );
|
||||
if ( ret != 0 ) {
|
||||
Fatal( "avcodec_encode_video2 failed with errorcode %d \"%s\"", ret, av_err2str( ret ) );
|
||||
}
|
||||
#else
|
||||
int out_size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, opicture_ptr );
|
||||
int out_size = avcodec_encode_video( codec_context, video_outbuf, video_outbuf_size, opicture_ptr );
|
||||
got_packet = out_size > 0 ? 1 : 0;
|
||||
pkt->data = got_packet ? video_outbuf : NULL;
|
||||
pkt->size = got_packet ? out_size : 0;
|
||||
#endif
|
||||
#endif
|
||||
if ( got_packet ) {
|
||||
// if ( c->coded_frame->key_frame )
|
||||
|
@ -622,12 +655,12 @@ double VideoStream::ActuallyEncodeFrame( const uint8_t *buffer, int buffer_size,
|
|||
// }
|
||||
|
||||
if ( pkt->pts != (int64_t)AV_NOPTS_VALUE ) {
|
||||
pkt->pts = av_rescale_q( pkt->pts, c->time_base, ost->time_base );
|
||||
pkt->pts = av_rescale_q( pkt->pts, codec_context->time_base, ost->time_base );
|
||||
}
|
||||
if ( pkt->dts != (int64_t)AV_NOPTS_VALUE ) {
|
||||
pkt->dts = av_rescale_q( pkt->dts, c->time_base, ost->time_base );
|
||||
pkt->dts = av_rescale_q( pkt->dts, codec_context->time_base, ost->time_base );
|
||||
}
|
||||
pkt->duration = av_rescale_q( pkt->duration, c->time_base, ost->time_base );
|
||||
pkt->duration = av_rescale_q( pkt->duration, codec_context->time_base, ost->time_base );
|
||||
pkt->stream_index = ost->index;
|
||||
}
|
||||
}
|
||||
|
@ -658,8 +691,12 @@ void *VideoStream::StreamingThreadCallback(void *ctx){
|
|||
VideoStream* videoStream = reinterpret_cast<VideoStream*>(ctx);
|
||||
|
||||
const uint64_t nanosecond_multiplier = 1000000000;
|
||||
|
||||
uint64_t target_interval_ns = nanosecond_multiplier * ( ((double)videoStream->ost->codec->time_base.num) / (videoStream->ost->codec->time_base.den) );
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
uint64_t target_interval_ns = nanosecond_multiplier * ( ((double)videoStream->codec_context->time_base.num) / (videoStream->codec_context->time_base.den) );
|
||||
#else
|
||||
uint64_t target_interval_ns = nanosecond_multiplier * ( ((double)videoStream->codec_context->time_base.num) / (videoStream->codec_context->time_base.den) );
|
||||
#endif
|
||||
uint64_t frame_count = 0;
|
||||
timespec start_time;
|
||||
clock_gettime(CLOCK_MONOTONIC, &start_time);
|
||||
|
|
|
@ -46,6 +46,7 @@ protected:
|
|||
AVOutputFormat *of;
|
||||
AVFormatContext *ofc;
|
||||
AVStream *ost;
|
||||
AVCodecContext *codec_context;
|
||||
AVCodec *codec;
|
||||
AVFrame *opicture;
|
||||
AVFrame *tmp_opicture;
|
||||
|
|
|
@ -28,23 +28,26 @@ ZMPacket::ZMPacket( ) {
|
|||
keyframe = 0;
|
||||
image = NULL;
|
||||
frame = NULL;
|
||||
buffer = NULL;
|
||||
av_init_packet( &packet );
|
||||
packet.size = 0;
|
||||
gettimeofday( ×tamp, NULL );
|
||||
packet.size = 0; // So we can detect whether it has been filled.
|
||||
timestamp = (struct timeval){0};
|
||||
}
|
||||
|
||||
ZMPacket::ZMPacket( Image *i ) {
|
||||
keyframe = 1;
|
||||
image = i;
|
||||
frame = NULL;
|
||||
buffer = NULL;
|
||||
av_init_packet( &packet );
|
||||
gettimeofday( ×tamp, NULL );
|
||||
timestamp = (struct timeval){0};
|
||||
}
|
||||
|
||||
ZMPacket::ZMPacket( AVPacket *p ) {
|
||||
av_init_packet( &packet );
|
||||
set_packet( p );
|
||||
keyframe = p->flags & AV_PKT_FLAG_KEY;
|
||||
buffer = NULL;
|
||||
}
|
||||
|
||||
ZMPacket::ZMPacket( AVPacket *p, struct timeval *t ) {
|
||||
|
@ -52,23 +55,40 @@ ZMPacket::ZMPacket( AVPacket *p, struct timeval *t ) {
|
|||
set_packet( p );
|
||||
timestamp = *t;
|
||||
keyframe = p->flags & AV_PKT_FLAG_KEY;
|
||||
buffer = NULL;
|
||||
}
|
||||
ZMPacket::ZMPacket( AVPacket *p, AVFrame *f, Image *i ) {
|
||||
av_init_packet( &packet );
|
||||
set_packet( p );
|
||||
image = i;
|
||||
frame = f;
|
||||
buffer = NULL;
|
||||
}
|
||||
|
||||
ZMPacket::~ZMPacket() {
|
||||
zm_av_packet_unref( &packet );
|
||||
if ( frame ) {
|
||||
//av_free(frame->data);
|
||||
av_frame_free( &frame );
|
||||
}
|
||||
if ( buffer ) {
|
||||
av_freep( &buffer );
|
||||
}
|
||||
// We assume the image was allocated elsewhere, so we just unref it.
|
||||
image = NULL;
|
||||
//if ( image ) {
|
||||
//delete image;
|
||||
//}
|
||||
}
|
||||
|
||||
void ZMPacket::reset() {
|
||||
Debug(2,"reset");
|
||||
zm_av_packet_unref( &packet );
|
||||
packet.size = 0;
|
||||
if ( frame ) {
|
||||
av_frame_free( &frame );
|
||||
}
|
||||
if ( buffer ) {
|
||||
Debug(2,"freeing buffer");
|
||||
av_freep( &buffer );
|
||||
}
|
||||
}
|
||||
|
||||
int ZMPacket::decode( AVCodecContext *ctx ) {
|
||||
|
|
|
@ -36,7 +36,8 @@ class ZMPacket {
|
|||
int keyframe;
|
||||
AVPacket packet; // Input packet, undecoded
|
||||
AVFrame *frame; // Input image, decoded Theoretically only filled if needed.
|
||||
Image *image; // Our internal image oject representing this frame
|
||||
uint8_t *buffer;
|
||||
Image *image; // Our internal image object representing this frame
|
||||
struct timeval timestamp;
|
||||
public:
|
||||
AVPacket *av_packet() { return &packet; }
|
||||
|
@ -47,6 +48,7 @@ class ZMPacket {
|
|||
|
||||
int is_keyframe() { return keyframe; };
|
||||
int decode( AVCodecContext *ctx );
|
||||
void reset();
|
||||
ZMPacket( AVPacket *packet, struct timeval *timestamp );
|
||||
ZMPacket( AVPacket *packet );
|
||||
ZMPacket( AVPacket *packet, AVFrame *frame, Image *image );
|
||||
|
|
|
@ -68,6 +68,7 @@ RemoteCameraNVSocket::RemoteCameraNVSocket(
|
|||
timeout.tv_sec = 0;
|
||||
timeout.tv_usec = 0;
|
||||
subpixelorder = ZM_SUBPIX_ORDER_BGR;
|
||||
video_stream = NULL;
|
||||
|
||||
if ( capture ) {
|
||||
Initialise();
|
||||
|
@ -212,3 +213,26 @@ int RemoteCameraNVSocket::Capture( ZMPacket &zm_packet ) {
|
|||
int RemoteCameraNVSocket::PostCapture() {
|
||||
return( 0 );
|
||||
}
|
||||
AVStream *RemoteCameraNVSocket::get_VideoStream() {
|
||||
if ( ! video_stream ) {
|
||||
AVFormatContext *oc = avformat_alloc_context();
|
||||
video_stream = avformat_new_stream( oc, NULL );
|
||||
if ( video_stream ) {
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
video_stream->codecpar->width = width;
|
||||
video_stream->codecpar->height = height;
|
||||
video_stream->codecpar->format = GetFFMPEGPixelFormat(colours,subpixelorder);
|
||||
#else
|
||||
video_stream->codec->width = width;
|
||||
video_stream->codec->height = height;
|
||||
video_stream->codec->pix_fmt = GetFFMPEGPixelFormat(colours,subpixelorder);
|
||||
#endif
|
||||
} else {
|
||||
Error("Can't create video stream");
|
||||
}
|
||||
} else {
|
||||
Debug(2,"Have videostream");
|
||||
}
|
||||
Debug(2,"Get videoStream");
|
||||
return video_stream;
|
||||
}
|
||||
|
|
|
@ -69,6 +69,7 @@ bool p_record_audio );
|
|||
int PrimeCapture();
|
||||
int Capture( ZMPacket &p );
|
||||
int PostCapture();
|
||||
AVStream* get_VideoStream();
|
||||
};
|
||||
|
||||
#endif // ZM_REMOTE_CAMERA_NVSOCKET_H
|
||||
|
|
|
@ -287,13 +287,13 @@ int RemoteCameraRtsp::Capture( ZMPacket &zm_packet ) {
|
|||
directbuffer = zm_packet.image->WriteBuffer(width, height, colours, subpixelorder);
|
||||
if ( directbuffer == NULL ) {
|
||||
Error("Failed requesting writeable buffer for the captured image.");
|
||||
return -1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
while ( !frameComplete ) {
|
||||
buffer.clear();
|
||||
if ( !rtspThread->isRunning() )
|
||||
return -1;
|
||||
return NULL;
|
||||
|
||||
if ( rtspThread->getFrame( buffer ) ) {
|
||||
Debug( 3, "Read frame %d bytes", buffer.size() );
|
||||
|
@ -301,7 +301,7 @@ int RemoteCameraRtsp::Capture( ZMPacket &zm_packet ) {
|
|||
Hexdump( 4, buffer.head(), 16 );
|
||||
|
||||
if ( !buffer.size() )
|
||||
return -1;
|
||||
return NULL;
|
||||
|
||||
if ( mCodecContext->codec_id == AV_CODEC_ID_H264 ) {
|
||||
// SPS and PPS frames should be saved and appended to IDR frames
|
||||
|
|
|
@ -379,21 +379,31 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const
|
|||
stream->id = i;
|
||||
#endif
|
||||
|
||||
AVCodecContext *codec_context = NULL;
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
codec_context = avcodec_alloc_context3(NULL);
|
||||
avcodec_parameters_to_context(codec_context, stream->codecpar);
|
||||
#else
|
||||
codec_context = stream->codec;
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
Debug( 1, "Looking for codec for %s payload type %d / %s", mediaDesc->getType().c_str(), mediaDesc->getPayloadType(), mediaDesc->getPayloadDesc().c_str() );
|
||||
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
|
||||
if ( mediaDesc->getType() == "video" )
|
||||
stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
codec_context->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
else if ( mediaDesc->getType() == "audio" )
|
||||
stream->codec->codec_type = AVMEDIA_TYPE_AUDIO;
|
||||
codec_context->codec_type = AVMEDIA_TYPE_AUDIO;
|
||||
else if ( mediaDesc->getType() == "application" )
|
||||
stream->codec->codec_type = AVMEDIA_TYPE_DATA;
|
||||
codec_context->codec_type = AVMEDIA_TYPE_DATA;
|
||||
#else
|
||||
if ( mediaDesc->getType() == "video" )
|
||||
stream->codec->codec_type = CODEC_TYPE_VIDEO;
|
||||
codec_context->codec_type = CODEC_TYPE_VIDEO;
|
||||
else if ( mediaDesc->getType() == "audio" )
|
||||
stream->codec->codec_type = CODEC_TYPE_AUDIO;
|
||||
codec_context->codec_type = CODEC_TYPE_AUDIO;
|
||||
else if ( mediaDesc->getType() == "application" )
|
||||
stream->codec->codec_type = CODEC_TYPE_DATA;
|
||||
codec_context->codec_type = CODEC_TYPE_DATA;
|
||||
#endif
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(55, 50, 3, 60, 103)
|
||||
|
@ -410,31 +420,27 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const
|
|||
#if LIBAVCODEC_VERSION_CHECK(55, 50, 3, 60, 103)
|
||||
codec_name = std::string( smStaticPayloads[i].payloadName );
|
||||
#else
|
||||
strncpy( stream->codec->codec_name, smStaticPayloads[i].payloadName, sizeof(stream->codec->codec_name) );;
|
||||
strncpy( codec_context->codec_name, smStaticPayloads[i].payloadName, sizeof(codec_context->codec_name) );;
|
||||
#endif
|
||||
stream->codec->codec_type = smStaticPayloads[i].codecType;
|
||||
stream->codec->codec_id = smStaticPayloads[i].codecId;
|
||||
stream->codec->sample_rate = smStaticPayloads[i].clockRate;
|
||||
codec_context->codec_type = smStaticPayloads[i].codecType;
|
||||
codec_context->codec_id = smStaticPayloads[i].codecId;
|
||||
codec_context->sample_rate = smStaticPayloads[i].clockRate;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
// Look in dynamic table
|
||||
for ( unsigned int i = 0; i < (sizeof(smDynamicPayloads)/sizeof(*smDynamicPayloads)); i++ )
|
||||
{
|
||||
if ( smDynamicPayloads[i].payloadName == mediaDesc->getPayloadDesc() )
|
||||
{
|
||||
for ( unsigned int i = 0; i < (sizeof(smDynamicPayloads)/sizeof(*smDynamicPayloads)); i++ ) {
|
||||
if ( smDynamicPayloads[i].payloadName == mediaDesc->getPayloadDesc() ) {
|
||||
Debug( 1, "Got dynamic payload type %d, %s", mediaDesc->getPayloadType(), smDynamicPayloads[i].payloadName );
|
||||
#if LIBAVCODEC_VERSION_CHECK(55, 50, 3, 60, 103)
|
||||
codec_name = std::string( smStaticPayloads[i].payloadName );
|
||||
#else
|
||||
strncpy( stream->codec->codec_name, smDynamicPayloads[i].payloadName, sizeof(stream->codec->codec_name) );;
|
||||
strncpy( codec_context->codec_name, smDynamicPayloads[i].payloadName, sizeof(codec_context->codec_name) );;
|
||||
#endif
|
||||
stream->codec->codec_type = smDynamicPayloads[i].codecType;
|
||||
stream->codec->codec_id = smDynamicPayloads[i].codecId;
|
||||
stream->codec->sample_rate = mediaDesc->getClock();
|
||||
codec_context->codec_type = smDynamicPayloads[i].codecType;
|
||||
codec_context->codec_id = smDynamicPayloads[i].codecId;
|
||||
codec_context->sample_rate = mediaDesc->getClock();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -450,14 +456,13 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const
|
|||
//return( 0 );
|
||||
}
|
||||
if ( mediaDesc->getWidth() )
|
||||
stream->codec->width = mediaDesc->getWidth();
|
||||
codec_context->width = mediaDesc->getWidth();
|
||||
if ( mediaDesc->getHeight() )
|
||||
stream->codec->height = mediaDesc->getHeight();
|
||||
if ( stream->codec->codec_id == AV_CODEC_ID_H264 && mediaDesc->getSprops().size())
|
||||
{
|
||||
codec_context->height = mediaDesc->getHeight();
|
||||
if ( codec_context->codec_id == AV_CODEC_ID_H264 && mediaDesc->getSprops().size()) {
|
||||
uint8_t start_sequence[]= { 0, 0, 1 };
|
||||
stream->codec->extradata_size= 0;
|
||||
stream->codec->extradata= NULL;
|
||||
codec_context->extradata_size= 0;
|
||||
codec_context->extradata= NULL;
|
||||
char pvalue[1024], *value = pvalue;
|
||||
|
||||
strcpy(pvalue, mediaDesc->getSprops().c_str());
|
||||
|
@ -482,22 +487,33 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const
|
|||
if (packet_size) {
|
||||
uint8_t *dest =
|
||||
(uint8_t *)av_malloc(packet_size + sizeof(start_sequence) +
|
||||
stream->codec->extradata_size +
|
||||
FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
codec_context->extradata_size +
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 0, 0, 0, 0)
|
||||
AV_INPUT_BUFFER_PADDING_SIZE
|
||||
#else
|
||||
FF_INPUT_BUFFER_PADDING_SIZE
|
||||
#endif
|
||||
);
|
||||
if(dest) {
|
||||
if(stream->codec->extradata_size) {
|
||||
if(codec_context->extradata_size) {
|
||||
// av_realloc?
|
||||
memcpy(dest, stream->codec->extradata, stream->codec->extradata_size);
|
||||
av_free(stream->codec->extradata);
|
||||
memcpy(dest, codec_context->extradata, codec_context->extradata_size);
|
||||
av_free(codec_context->extradata);
|
||||
}
|
||||
|
||||
memcpy(dest+stream->codec->extradata_size, start_sequence, sizeof(start_sequence));
|
||||
memcpy(dest+stream->codec->extradata_size+sizeof(start_sequence), decoded_packet, packet_size);
|
||||
memset(dest+stream->codec->extradata_size+sizeof(start_sequence)+
|
||||
packet_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
memcpy(dest+codec_context->extradata_size, start_sequence, sizeof(start_sequence));
|
||||
memcpy(dest+codec_context->extradata_size+sizeof(start_sequence), decoded_packet, packet_size);
|
||||
memset(dest+codec_context->extradata_size+sizeof(start_sequence)+
|
||||
packet_size, 0,
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 0, 0, 0, 0)
|
||||
AV_INPUT_BUFFER_PADDING_SIZE
|
||||
#else
|
||||
FF_INPUT_BUFFER_PADDING_SIZE
|
||||
#endif
|
||||
);
|
||||
|
||||
stream->codec->extradata= dest;
|
||||
stream->codec->extradata_size+= sizeof(start_sequence)+packet_size;
|
||||
codec_context->extradata= dest;
|
||||
codec_context->extradata_size+= sizeof(start_sequence)+packet_size;
|
||||
// } else {
|
||||
// av_log(codec, AV_LOG_ERROR, "Unable to allocate memory for extradata!");
|
||||
// return AVERROR(ENOMEM);
|
||||
|
|
18
src/zm_sdp.h
18
src/zm_sdp.h
|
@ -31,13 +31,11 @@
|
|||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
class SessionDescriptor
|
||||
{
|
||||
class SessionDescriptor {
|
||||
protected:
|
||||
enum { PAYLOAD_TYPE_DYNAMIC=96 };
|
||||
|
||||
struct StaticPayloadDesc
|
||||
{
|
||||
struct StaticPayloadDesc {
|
||||
int payloadType;
|
||||
const char payloadName[6];
|
||||
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
|
||||
|
@ -50,8 +48,7 @@ protected:
|
|||
int autoChannels;
|
||||
};
|
||||
|
||||
struct DynamicPayloadDesc
|
||||
{
|
||||
struct DynamicPayloadDesc {
|
||||
const char payloadName[32];
|
||||
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
|
||||
AVMediaType codecType;
|
||||
|
@ -65,8 +62,7 @@ protected:
|
|||
};
|
||||
|
||||
public:
|
||||
class ConnInfo
|
||||
{
|
||||
class ConnInfo {
|
||||
protected:
|
||||
std::string mNetworkType;
|
||||
std::string mAddressType;
|
||||
|
@ -78,8 +74,7 @@ public:
|
|||
ConnInfo( const std::string &connInfo );
|
||||
};
|
||||
|
||||
class BandInfo
|
||||
{
|
||||
class BandInfo {
|
||||
protected:
|
||||
std::string mType;
|
||||
int mValue;
|
||||
|
@ -88,8 +83,7 @@ public:
|
|||
BandInfo( const std::string &bandInfo );
|
||||
};
|
||||
|
||||
class MediaDescriptor
|
||||
{
|
||||
class MediaDescriptor {
|
||||
protected:
|
||||
std::string mType;
|
||||
int mPort;
|
||||
|
|
|
@ -78,7 +78,17 @@ int SWScale::SetDefaults(enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int SWScale::Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
|
||||
|
||||
int SWScale::Convert(
|
||||
const uint8_t* in_buffer,
|
||||
const size_t in_buffer_size,
|
||||
uint8_t* out_buffer,
|
||||
const size_t out_buffer_size,
|
||||
enum _AVPIXELFORMAT in_pf,
|
||||
enum _AVPIXELFORMAT out_pf,
|
||||
unsigned int width,
|
||||
unsigned int height
|
||||
) {
|
||||
/* Parameter checking */
|
||||
if(in_buffer == NULL || out_buffer == NULL) {
|
||||
Error("NULL Input or output buffer");
|
||||
|
@ -119,14 +129,14 @@ int SWScale::Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint
|
|||
size_t outsize = avpicture_get_size(out_pf, width, height);
|
||||
#endif
|
||||
|
||||
if(outsize < out_buffer_size) {
|
||||
if ( outsize < out_buffer_size ) {
|
||||
Error("The output buffer is undersized for the output format. Required: %d Available: %d", outsize, out_buffer_size);
|
||||
return -5;
|
||||
}
|
||||
|
||||
/* Get the context */
|
||||
swscale_ctx = sws_getCachedContext( swscale_ctx, width, height, in_pf, width, height, out_pf, SWS_FAST_BILINEAR, NULL, NULL, NULL );
|
||||
if(swscale_ctx == NULL) {
|
||||
if ( swscale_ctx == NULL ) {
|
||||
Error("Failed getting swscale context");
|
||||
return -6;
|
||||
}
|
||||
|
@ -163,22 +173,22 @@ int SWScale::Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint
|
|||
}
|
||||
|
||||
int SWScale::Convert(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
|
||||
if(img->Width() != width) {
|
||||
if ( img->Width() != width ) {
|
||||
Error("Source image width differs. Source: %d Output: %d",img->Width(), width);
|
||||
return -12;
|
||||
}
|
||||
|
||||
if(img->Height() != height) {
|
||||
if ( img->Height() != height ) {
|
||||
Error("Source image height differs. Source: %d Output: %d",img->Height(), height);
|
||||
return -13;
|
||||
}
|
||||
|
||||
return Convert(img->Buffer(),img->Size(),out_buffer,out_buffer_size,in_pf,out_pf,width,height);
|
||||
return Convert(img->Buffer(), img->Size(), out_buffer, out_buffer_size, in_pf, out_pf, width, height);
|
||||
}
|
||||
|
||||
int SWScale::ConvertDefaults(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size) {
|
||||
|
||||
if(!gotdefaults) {
|
||||
if ( !gotdefaults ) {
|
||||
Error("Defaults are not set");
|
||||
return -24;
|
||||
}
|
||||
|
@ -188,7 +198,7 @@ int SWScale::ConvertDefaults(const Image* img, uint8_t* out_buffer, const size_t
|
|||
|
||||
int SWScale::ConvertDefaults(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size) {
|
||||
|
||||
if(!gotdefaults) {
|
||||
if ( !gotdefaults ) {
|
||||
Error("Defaults are not set");
|
||||
return -24;
|
||||
}
|
||||
|
|
|
@ -100,8 +100,17 @@ bool startsWith( const std::string &haystack, const std::string &needle )
|
|||
return( haystack.substr( 0, needle.length() ) == needle );
|
||||
}
|
||||
|
||||
StringVector split( const std::string &string, const std::string &chars, int limit )
|
||||
{
|
||||
std::vector<std::string> split(const std::string &s, char delim) {
|
||||
std::vector<std::string> elems;
|
||||
std::stringstream ss(s);
|
||||
std::string item;
|
||||
while(std::getline(ss, item, delim)) {
|
||||
elems.push_back(trimSpaces(item));
|
||||
}
|
||||
return elems;
|
||||
}
|
||||
|
||||
StringVector split( const std::string &string, const std::string &chars, int limit ) {
|
||||
StringVector stringVector;
|
||||
std::string tempString = string;
|
||||
std::string::size_type startIndex = 0;
|
||||
|
|
|
@ -86,9 +86,12 @@ VideoStore::VideoStore(
|
|||
video_in_stream_index = video_in_stream->index;
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
video_in_ctx = avcodec_alloc_context3(NULL);
|
||||
Debug(2,"About to copy aparames");
|
||||
avcodec_parameters_to_context(video_in_ctx,
|
||||
video_in_stream->codecpar);
|
||||
zm_dump_codecpar( video_in_stream->codecpar );
|
||||
Debug(2,"About to copy aparames");
|
||||
//video_in_ctx.codec_id = video_in_stream->codecpar.codec_id;
|
||||
#else
|
||||
video_in_ctx = video_in_stream->codec;
|
||||
#endif
|
||||
|
@ -99,6 +102,7 @@ VideoStore::VideoStore(
|
|||
}
|
||||
|
||||
video_out_ctx = NULL;
|
||||
video_out_ctx = avcodec_alloc_context3(NULL);
|
||||
|
||||
// Copy params from instream to ctx
|
||||
if ( video_in_stream && ( video_in_ctx->codec_id == AV_CODEC_ID_H264 ) ) {
|
||||
|
@ -113,13 +117,10 @@ VideoStore::VideoStore(
|
|||
zm_dump_codec(video_out_ctx);
|
||||
}
|
||||
#else
|
||||
video_out_ctx = avcodec_alloc_context3(NULL);
|
||||
avcodec_copy_context( video_out_ctx, video_in_ctx );
|
||||
#endif
|
||||
// Same codec, just copy the packets, otherwise we have to decode/encode
|
||||
video_out_codec = (AVCodec *)video_in_ctx->codec;
|
||||
video_out_ctx->time_base = video_in_ctx->time_base;
|
||||
video_out_stream->time_base = video_in_stream->time_base;
|
||||
} else {
|
||||
|
||||
/** Create a new frame to store the */
|
||||
|
@ -127,84 +128,95 @@ VideoStore::VideoStore(
|
|||
Error("Could not allocate in frame");
|
||||
return;
|
||||
}
|
||||
video_out_codec = avcodec_find_encoder_by_name("h264_omx");
|
||||
if ( ! video_out_codec ) {
|
||||
Debug(1, "Didn't find omx");
|
||||
video_out_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
|
||||
}
|
||||
if ( !video_out_codec ) {
|
||||
Fatal("Could not find codec for H264");
|
||||
}
|
||||
Debug(2, "Have video out codec");
|
||||
video_out_ctx = avcodec_alloc_context3( video_out_codec );
|
||||
// Don't have an input stream, so need to tell it what we are sending it, or are transcoding
|
||||
video_out_ctx->width = monitor->Width();
|
||||
video_out_ctx->height = monitor->Height();
|
||||
video_out_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
|
||||
video_out_ctx = avcodec_alloc_context3( video_out_codec );
|
||||
// Don't have an input stream, so need to tell it what we are sending it, or are transcoding
|
||||
video_out_ctx->width = monitor->Width();
|
||||
video_out_ctx->height = monitor->Height();
|
||||
video_out_ctx->codec_id = AV_CODEC_ID_H264;
|
||||
//video_out_ctx->sample_aspect_ratio = (AVRational){4,3};
|
||||
video_out_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
//video_in_ctx->sample_aspect_ratio;
|
||||
/* take first format from list of supported formats */
|
||||
//video_out_ctx->pix_fmt = video_out_codec->pix_fmts[0];
|
||||
video_out_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
/* video time_base can be set to whatever is handy and supported by encoder */
|
||||
video_out_ctx->time_base = (AVRational){1, 1000000}; // microseconds as base frame rate
|
||||
video_out_ctx->framerate = (AVRational){0,1}; // Unknown framerate
|
||||
video_out_ctx->gop_size = 12;
|
||||
video_out_ctx->bit_rate = 4000000;
|
||||
video_out_ctx->qmin = 10;
|
||||
video_out_ctx->qmax = 51;
|
||||
video_out_ctx->qcompress = 0.6;
|
||||
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
|
||||
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
|
||||
video_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
#else
|
||||
video_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
#endif
|
||||
}
|
||||
|
||||
AVDictionary *opts = 0;
|
||||
std::string Options = monitor->GetEncoderOptions();
|
||||
ret = av_dict_parse_string(&opts, Options.c_str(), "=", ",#\n", 0);
|
||||
if ( ret < 0 ) {
|
||||
Warning("Could not parse ffmpeg encoder options list '%s'\n", Options.c_str());
|
||||
} else {
|
||||
AVDictionaryEntry *e = NULL;
|
||||
while ( (e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL ) {
|
||||
Debug( 3, "Encoder Option %s=%s", e->key, e->value );
|
||||
}
|
||||
}
|
||||
if ( ! av_dict_get( opts, "preset", NULL, 0 ) )
|
||||
av_dict_set( &opts, "preset", "superfast", 0 );
|
||||
|
||||
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
|
||||
Warning("Can't open video codec (%s)! %s, trying h264",
|
||||
video_out_codec->name,
|
||||
av_make_error_string(ret).c_str()
|
||||
);
|
||||
video_out_codec = avcodec_find_encoder_by_name("h264");
|
||||
if ( monitor->OutputCodec() == "mjpeg" ) {
|
||||
video_out_codec = avcodec_find_encoder_by_name("mjpeg");
|
||||
if ( ! video_out_codec ) {
|
||||
Error("Can't find h264 encoder");
|
||||
video_out_codec = avcodec_find_encoder_by_name("libx264");
|
||||
Debug(1, "Didn't find omx");
|
||||
video_out_codec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
|
||||
}
|
||||
video_out_ctx->codec_id = video_out_codec->id;
|
||||
video_out_ctx->pix_fmt = AV_PIX_FMT_YUVJ422P;
|
||||
|
||||
} else if ( monitor->OutputCodec() == "h264" ) {
|
||||
video_out_codec = avcodec_find_encoder_by_name("h264_omx");
|
||||
if ( ! video_out_codec ) {
|
||||
Debug(1, "Didn't find omx");
|
||||
video_out_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
|
||||
}
|
||||
if ( !video_out_codec ) {
|
||||
Fatal("Could not find codec for H264");
|
||||
}
|
||||
Debug(2, "Have video out codec");
|
||||
|
||||
video_out_ctx->codec_id = AV_CODEC_ID_H264;
|
||||
//video_in_ctx->sample_aspect_ratio;
|
||||
/* take first format from list of supported formats */
|
||||
//video_out_ctx->pix_fmt = video_out_codec->pix_fmts[0];
|
||||
video_out_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
/* video time_base can be set to whatever is handy and supported by encoder */
|
||||
video_out_ctx->time_base = (AVRational){1, 1000000}; // microseconds as base frame rate
|
||||
video_out_ctx->framerate = (AVRational){0,1}; // Unknown framerate
|
||||
video_out_ctx->gop_size = 12;
|
||||
video_out_ctx->bit_rate = 4000000;
|
||||
video_out_ctx->qmin = 10;
|
||||
video_out_ctx->qmax = 51;
|
||||
video_out_ctx->qcompress = 0.6;
|
||||
|
||||
AVDictionary *opts = 0;
|
||||
std::string Options = monitor->GetEncoderOptions();
|
||||
ret = av_dict_parse_string(&opts, Options.c_str(), "=", ",#\n", 0);
|
||||
if ( ret < 0 ) {
|
||||
Warning("Could not parse ffmpeg encoder options list '%s'\n", Options.c_str());
|
||||
} else {
|
||||
AVDictionaryEntry *e = NULL;
|
||||
while ( (e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL ) {
|
||||
Debug( 3, "Encoder Option %s=%s", e->key, e->value );
|
||||
}
|
||||
}
|
||||
|
||||
if ( ! av_dict_get( opts, "preset", NULL, 0 ) ) {
|
||||
Debug(2,"Setting preset to ultrafast");
|
||||
av_dict_set( &opts, "preset", "ultrafast", 0 );
|
||||
}
|
||||
|
||||
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
|
||||
Warning("Can't open video codec (%s)! %s, trying h264",
|
||||
video_out_codec->name,
|
||||
av_make_error_string(ret).c_str()
|
||||
);
|
||||
video_out_codec = avcodec_find_encoder_by_name("h264");
|
||||
if ( ! video_out_codec ) {
|
||||
Error("Can't find libx264 encoder");
|
||||
Error("Can't find h264 encoder");
|
||||
video_out_codec = avcodec_find_encoder_by_name("libx264");
|
||||
if ( ! video_out_codec ) {
|
||||
Error("Can't find libx264 encoder");
|
||||
return;
|
||||
}
|
||||
}
|
||||
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
|
||||
Error("Can't open video codec (%s)! %s",
|
||||
video_out_codec->name,
|
||||
av_make_error_string(ret).c_str() );
|
||||
return;
|
||||
}
|
||||
}
|
||||
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
|
||||
Error("Can't open video codec (%s)! %s",
|
||||
video_out_codec->name,
|
||||
av_make_error_string(ret).c_str() );
|
||||
return;
|
||||
AVDictionaryEntry *e = NULL;
|
||||
while ( (e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL ) {
|
||||
Warning( "Encoder Option %s not recognized by ffmpeg codec", e->key);
|
||||
}
|
||||
}
|
||||
AVDictionaryEntry *e = NULL;
|
||||
while ( (e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL ) {
|
||||
Warning( "Encoder Option %s not recognized by ffmpeg codec", e->key);
|
||||
}
|
||||
av_dict_free(&opts);
|
||||
av_dict_free(&opts);
|
||||
if ( !video_out_ctx->codec_tag ) {
|
||||
video_out_ctx->codec_tag =
|
||||
av_codec_get_tag(oc->oformat->codec_tag, AV_CODEC_ID_H264 );
|
||||
Debug(2, "No codec_tag, setting to %d", video_out_ctx->codec_tag);
|
||||
}
|
||||
}// end if codec == h264
|
||||
|
||||
swscale.SetDefaults(
|
||||
video_in_ctx->pix_fmt,
|
||||
|
@ -214,11 +226,6 @@ VideoStore::VideoStore(
|
|||
);
|
||||
} // end if copying or trasncoding
|
||||
|
||||
if ( !video_out_ctx->codec_tag ) {
|
||||
video_out_ctx->codec_tag =
|
||||
av_codec_get_tag(oc->oformat->codec_tag, AV_CODEC_ID_H264 );
|
||||
Debug(2, "No codec_tag, setting to %d", video_out_ctx->codec_tag);
|
||||
}
|
||||
|
||||
video_out_stream = avformat_new_stream(oc, video_out_codec);
|
||||
if ( !video_out_stream ) {
|
||||
|
@ -434,6 +441,7 @@ void VideoStore::write_audio_packet( AVPacket &pkt ) {
|
|||
|
||||
VideoStore::~VideoStore() {
|
||||
if ( video_out_ctx->codec_id != video_in_ctx->codec_id ) {
|
||||
Debug(2,"Different codecs between in and out");
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
if ( video_out_ctx->codec && ( video_out_ctx->codec->capabilities & AV_CODEC_CAP_DELAY ) ) {
|
||||
|
@ -475,7 +483,8 @@ VideoStore::~VideoStore() {
|
|||
break;
|
||||
}
|
||||
#endif
|
||||
Debug(3, "dts:%d, pts:%d", pkt.dts, pkt.pts );
|
||||
int keyframe = pkt.flags & AV_PKT_FLAG_KEY;
|
||||
Debug(3, "dts:%d, pts:%d, keyframe:%d", pkt.dts, pkt.pts, keyframe );
|
||||
//pkt.dts = video_next_dts;
|
||||
pkt.pts = pkt.dts;
|
||||
//pkt.duration = video_last_duration;
|
||||
|
@ -830,7 +839,7 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
|
|||
return 0;
|
||||
}
|
||||
#else
|
||||
int data_present;
|
||||
int data_present;
|
||||
if ((ret = avcodec_decode_video2(video_in_ctx, in_frame,
|
||||
&data_present, ipkt )) < 0) {
|
||||
Error("Could not decode frame (error '%s')\n",
|
||||
|
@ -857,11 +866,11 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
|
|||
video_out_ctx->pix_fmt,
|
||||
video_out_ctx->width,
|
||||
video_out_ctx->height, 1);
|
||||
uint8_t *buffer = (uint8_t *)av_malloc(codec_imgsize);
|
||||
zm_packet->buffer = (uint8_t *)av_malloc(codec_imgsize);
|
||||
av_image_fill_arrays(
|
||||
frame->data,
|
||||
frame->linesize,
|
||||
buffer,
|
||||
zm_packet->buffer,
|
||||
video_out_ctx->pix_fmt,
|
||||
video_out_ctx->width,
|
||||
video_out_ctx->height,
|
||||
|
@ -871,10 +880,10 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
|
|||
video_out_ctx->pix_fmt,
|
||||
video_out_ctx->width,
|
||||
video_out_ctx->height);
|
||||
uint8_t *buffer = (uint8_t *)av_malloc(codec_imgsize);
|
||||
zm_packet->buffer = (uint8_t *)av_malloc(codec_imgsize);
|
||||
avpicture_fill(
|
||||
(AVPicture *)frame,
|
||||
buffer,
|
||||
zm_packet->buffer,
|
||||
video_out_ctx->pix_fmt,
|
||||
video_out_ctx->width,
|
||||
video_out_ctx->height
|
||||
|
@ -885,7 +894,7 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
|
|||
frame->height = video_out_ctx->height;
|
||||
frame->format = video_out_ctx->pix_fmt;
|
||||
swscale.Convert(zm_packet->image,
|
||||
buffer,
|
||||
zm_packet->buffer,
|
||||
codec_imgsize,
|
||||
(AVPixelFormat)zm_packet->image->AVPixFormat(),
|
||||
video_out_ctx->pix_fmt,
|
||||
|
@ -907,19 +916,20 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
|
|||
if ( (ret = avcodec_send_frame(video_out_ctx, zm_packet->frame)) < 0 ) {
|
||||
Error("Could not send frame (error '%s')", av_make_error_string(ret).c_str());
|
||||
zm_av_packet_unref(&opkt); // NOT SURE THIS IS NECCESSARY
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
if ( (ret = avcodec_receive_packet(video_out_ctx, &opkt)) < 0 ) {
|
||||
zm_av_packet_unref(&opkt);
|
||||
if ( AVERROR(EAGAIN) == ret ) {
|
||||
// THe codec may need more samples than it has, perfectly valid
|
||||
Debug(3, "Could not recieve packet (error '%s')",
|
||||
av_make_error_string(ret).c_str());
|
||||
return 0;
|
||||
} else {
|
||||
Error("Could not recieve packet (error %d = '%s')", ret,
|
||||
av_make_error_string(ret).c_str());
|
||||
}
|
||||
zm_av_packet_unref(&opkt);
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
#else
|
||||
if ( (ret = avcodec_encode_video2(
|
||||
|
@ -945,34 +955,8 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
|
|||
opkt.flags = ipkt->flags;
|
||||
}
|
||||
|
||||
//opkt.dts = opkt.pts = ( zm_packet->timestamp.tv_sec*1000000 + zm_packet->timestamp.tv_usec ) - video_last_pts;
|
||||
#if 0
|
||||
opkt.dts = video_next_dts;
|
||||
opkt.pts = video_next_pts;
|
||||
|
||||
int duration;
|
||||
if ( !video_last_pts ) {
|
||||
duration = 0;
|
||||
} else {
|
||||
duration = av_rescale_q(
|
||||
ipkt->pts - video_last_pts,
|
||||
video_in_stream->time_base,
|
||||
video_out_stream->time_base
|
||||
);
|
||||
Debug(1, "duration calc: pts(%d) - last_pts(%d) = (%d)", ipkt->pts,
|
||||
video_last_pts, duration);
|
||||
if ( duration < 0 ) {
|
||||
duration = ipkt->duration;
|
||||
}
|
||||
}
|
||||
|
||||
// our timebase is always /1000000 now, so we can use the timestamp as the pts/dts
|
||||
video_last_pts = zm_packet->timestamp.tv_sec*1000000 + zm_packet->timestamp.tv_usec;
|
||||
video_last_dts = video_last_pts;
|
||||
video_last_duration = duration;
|
||||
opkt.duration = duration;
|
||||
#endif
|
||||
|
||||
int keyframe = opkt.flags & AV_PKT_FLAG_KEY;
|
||||
Debug(3, "dts:%d, pts:%d, keyframe:%d", opkt.dts, opkt.pts, keyframe );
|
||||
write_video_packet( opkt );
|
||||
zm_av_packet_unref(&opkt);
|
||||
|
||||
|
@ -981,7 +965,7 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
|
|||
|
||||
void VideoStore::write_video_packet( AVPacket &opkt ) {
|
||||
|
||||
if (opkt.dts > opkt.pts) {
|
||||
if ( opkt.dts > opkt.pts ) {
|
||||
Debug(1,
|
||||
"opkt.dts(%d) must be <= opkt.pts(%d). Decompression must happen "
|
||||
"before presentation.",
|
||||
|
@ -1012,8 +996,7 @@ void VideoStore::write_video_packet( AVPacket &opkt ) {
|
|||
//dumpPacket(&opkt);
|
||||
|
||||
} else {
|
||||
ret = av_interleaved_write_frame(oc, &opkt);
|
||||
if (ret < 0) {
|
||||
if ( (ret = av_interleaved_write_frame(oc, &opkt)) < 0 ) {
|
||||
// There's nothing we can really do if the frame is rejected, just drop it
|
||||
// and get on with the next
|
||||
Warning(
|
||||
|
|
27
src/zmc.cpp
27
src/zmc.cpp
|
@ -70,6 +70,7 @@ possible, this should run at more or less constant speed.
|
|||
#include "zm_time.h"
|
||||
#include "zm_signal.h"
|
||||
#include "zm_monitor.h"
|
||||
#include "zm_analysis_thread.h"
|
||||
|
||||
void Usage() {
|
||||
fprintf(stderr, "zmc -d <device_path> or -r <proto> -H <host> -P <port> -p <path> or -f <file_path> or -m <monitor_id>\n");
|
||||
|
@ -244,6 +245,7 @@ int main(int argc, char *argv[]) {
|
|||
exit(-1);
|
||||
}
|
||||
|
||||
AnalysisThread **analysis_threads = new AnalysisThread *[n_monitors];
|
||||
long *capture_delays = new long[n_monitors];
|
||||
long *alarm_capture_delays = new long[n_monitors];
|
||||
long *next_delays = new long[n_monitors];
|
||||
|
@ -252,12 +254,22 @@ int main(int argc, char *argv[]) {
|
|||
last_capture_times[i].tv_sec = last_capture_times[i].tv_usec = 0;
|
||||
capture_delays[i] = monitors[i]->GetCaptureDelay();
|
||||
alarm_capture_delays[i] = monitors[i]->GetAlarmCaptureDelay();
|
||||
}
|
||||
|
||||
Monitor::Function function = monitors[0]->GetFunction();
|
||||
if ( function == Monitor::MODECT || function == Monitor::MOCORD || function == Monitor::RECORD) {
|
||||
Debug(1, "Starting an analysis thread for monitor (%d)", monitors[i]->Id());
|
||||
analysis_threads[i] = new AnalysisThread(monitors[i]);
|
||||
analysis_threads[i]->start();
|
||||
} else {
|
||||
analysis_threads[i] = NULL;
|
||||
}
|
||||
} // end foreach monitor
|
||||
|
||||
int result = 0;
|
||||
struct timeval now;
|
||||
struct DeltaTimeval delta_time;
|
||||
while ( !zm_terminate ) {
|
||||
//Debug(2,"blocking");
|
||||
sigprocmask(SIG_BLOCK, &block_set, 0);
|
||||
for ( int i = 0; i < n_monitors; i++ ) {
|
||||
long min_delay = MAXINT;
|
||||
|
@ -305,18 +317,29 @@ int main(int argc, char *argv[]) {
|
|||
DELTA_TIMEVAL(delta_time, now, last_capture_times[i], DT_PREC_3);
|
||||
long sleep_time = next_delays[i]-delta_time.delta;
|
||||
if ( sleep_time > 0 ) {
|
||||
Debug(2,"usleeping (%d)", sleep_time*(DT_MAXGRAN/DT_PREC_3) );
|
||||
usleep(sleep_time*(DT_MAXGRAN/DT_PREC_3));
|
||||
}
|
||||
last_capture_times[i] = now;
|
||||
} else {
|
||||
gettimeofday(&(last_capture_times[i]), NULL);
|
||||
}
|
||||
gettimeofday(&(last_capture_times[i]), NULL);
|
||||
} // end if next_delay <= min_delay || next_delays[i] <= 0 )
|
||||
|
||||
} // end foreach n_monitors
|
||||
//Debug(2,"unblocking");
|
||||
sigprocmask(SIG_UNBLOCK, &block_set, 0);
|
||||
} // end while ! zm_terminate
|
||||
for ( int i = 0; i < n_monitors; i++ ) {
|
||||
if ( analysis_threads[i] ) {
|
||||
analysis_threads[i]->stop();
|
||||
analysis_threads[i]->join();
|
||||
delete analysis_threads[i];
|
||||
analysis_threads[i] = 0;
|
||||
}
|
||||
delete monitors[i];
|
||||
}
|
||||
delete [] analysis_threads;
|
||||
delete [] monitors;
|
||||
delete [] alarm_capture_delays;
|
||||
delete [] capture_delays;
|
||||
|
|
|
@ -360,7 +360,8 @@ function getNearEvents() {
|
|||
$eventId = $_REQUEST['id'];
|
||||
$event = dbFetchOne( 'SELECT * FROM Events WHERE Id=?', NULL, array( $eventId ) );
|
||||
|
||||
parseFilter( $_REQUEST['filter'] );
|
||||
if ( isset($_REQUEST['filter']) )
|
||||
parseFilter( $_REQUEST['filter'] );
|
||||
parseSort();
|
||||
|
||||
if ( $user['MonitorIds'] )
|
||||
|
|
|
@ -9,6 +9,7 @@ class Event {
|
|||
'StorageId',
|
||||
'Name',
|
||||
'DiskSpace',
|
||||
'SaveJPEGs',
|
||||
);
|
||||
public function __construct( $IdOrRow = null ) {
|
||||
$row = NULL;
|
||||
|
|
|
@ -15,6 +15,8 @@ private $defaults = array(
|
|||
'Height' => null,
|
||||
'Orientation' => null,
|
||||
'AnalysisFPSLimit' => null,
|
||||
'OutputCodec',
|
||||
'OutputContainer',
|
||||
);
|
||||
private $control_fields = array(
|
||||
'Name' => '',
|
||||
|
|
|
@ -326,7 +326,7 @@ function changeGroup( e, depth ) {
|
|||
}
|
||||
function changeMonitor( e ) {
|
||||
var monitor_id = e.value;
|
||||
Cookie.write( 'zmMonitorId', monitor_id, { duration: 10*365 } );
|
||||
Cookie.write( 'MonitorId', monitor_id, { duration: 10*365 } );
|
||||
window.location = window.location;
|
||||
}
|
||||
function changeFilter( e ) {
|
||||
|
|
|
@ -97,12 +97,17 @@ $groupSql = Group::get_group_sql( $group_id );
|
|||
$monitors_dropdown[$monitors[$i]['Id']] = $monitors[$i]['Name'];
|
||||
if ( $monitors[$i]['Id'] == $monitor_id ) {
|
||||
$found_selected_monitor = true;
|
||||
} else {
|
||||
Warning("didn't find monitor $monitor_id " . $monitors[$i]['Id'] );
|
||||
}
|
||||
}
|
||||
} // end foreach monitor
|
||||
if ( ! $found_selected_monitor ) {
|
||||
$monitor_id = '';
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Warning("Monitor id not specified");
|
||||
} // end if a monitor was specified
|
||||
|
||||
for ( $i = 0; $i < count($monitors); $i++ ) {
|
||||
if ( !visibleMonitor( $monitors[$i]['Id'] ) ) {
|
||||
continue;
|
||||
|
@ -114,7 +119,7 @@ $groupSql = Group::get_group_sql( $group_id );
|
|||
}
|
||||
$displayMonitors[] = $monitors[$i];
|
||||
}
|
||||
echo htmlSelect( 'MonitorId', $monitors_dropdown, $monitor_id, array('onchange'=>'changeMonitor(this);') );
|
||||
echo htmlSelect( 'MonitorId', $monitors_dropdown, $monitor_id, array('onchange'=>'changeFilter(this);') );
|
||||
?>
|
||||
</span>
|
||||
<?php
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
<input type="hidden" name="newMonitor[Method]" value="<?php echo validHtmlStr($monitor->Method()) ?>"/>
|
||||
<tr><td><?php echo translate('HostName') ?></td><td><input type="text" name="newMonitor[Host]" value="<?php echo validHtmlStr($monitor->Host()) ?>" size="36"/></td></tr>
|
||||
<tr><td><?php echo translate('Port') ?></td><td><input type="number" name="newMonitor[Port]" value="<?php echo validHtmlStr($monitor->Port()) ?>" size="6"/></td></tr>
|
||||
<tr><td><?php echo translate('Path') ?></td><td><input type="text" name="newMonitor[Path]" value="<?php echo validHtmlStr($monitor->Path()) ?>" size="36"/></td></tr>
|
||||
<input type="hidden" name="newMonitor[User]" value="<?php echo validHtmlStr($monitor->User()) ?>"/>
|
||||
<input type="hidden" name="newMonitor[Pass]" value="<?php echo validHtmlStr($monitor->Pass()) ?>"/>
|
||||
<input type="hidden" name="newMonitor[Options]" value="<?php echo validHtmlStr($monitor->Options()) ?>"/>
|
||||
<tr><td><?php echo translate('TargetColorspace') ?></td><td><select name="newMonitor[Colours]"><?php foreach ( $Colours as $name => $value ) { ?><option value="<?php echo $value ?>"<?php if ( $value == $monitor->Colours()) { ?> selected="selected"<?php } ?>><?php echo $name ?></option><?php } ?></select></td></tr>
|
||||
<tr><td><?php echo translate('CaptureWidth') ?> (<?php echo translate('Pixels') ?>)</td><td><input type="number" name="newMonitor[Width]" value="<?php echo validHtmlStr($monitor->Width()) ?>" size="4" onkeyup="updateMonitorDimensions(this);"/></td></tr>
|
||||
<tr><td><?php echo translate('CaptureHeight') ?> (<?php echo translate('Pixels') ?>)</td><td><input type="number" name="newMonitor[Height]" value="<?php echo validHtmlStr($monitor->Height()) ?>" size="4" onkeyup="updateMonitorDimensions(this);"/></td></tr>
|
||||
<tr><td><?php echo translate('PreserveAspect') ?></td><td><input type="checkbox" name="preserveAspectRatio" value="1"/></td></tr>
|
||||
<tr><td><?php echo translate('Orientation') ?></td><td><?php echo htmlselect( 'newMonitor[Orientation]', $orientations, $monitor->Orientation() );?></td></tr>
|
||||
<input type="hidden" name="newMonitor[Deinterlacing]" value="<?php echo validHtmlStr($monitor->Deinterlacing()) ?>"/>
|
||||
<input type="hidden" name="newMonitor[RTSPDescribe]" value="<?php echo validHtmlStr($monitor->RTSPDescribe()) ?>"/>
|
|
@ -74,6 +74,10 @@ if ( ( ! $replayMode ) or ( ! $replayModes[$replayMode] ) ) {
|
|||
$replayMode = 'none';
|
||||
}
|
||||
|
||||
$video_tag = false;
|
||||
if ( $Event->DefaultVideo() and ( 'mp4' == pathinfo($Event->DefaultVideo(), PATHINFO_EXTENSION) ) ) {
|
||||
$video_tag = true;
|
||||
}
|
||||
// videojs zoomrotate only when direct recording
|
||||
$Zoom = 1;
|
||||
$Rotation = 0;
|
||||
|
@ -150,7 +154,7 @@ if ( $Event->SaveJPEGs() & 3 ) { // Analysis or Jpegs
|
|||
<div id="exportEvent"><a href="#" onclick="exportEvent();"><?php echo translate('Export') ?></a></div>
|
||||
</div>
|
||||
<?php
|
||||
if ( $Event->DefaultVideo() ) {
|
||||
if ( $video_tag ) {
|
||||
?>
|
||||
<div id="eventVideo" class="">
|
||||
<div id="videoFeed">
|
||||
|
@ -170,7 +174,7 @@ if ( $Event->DefaultVideo() ) {
|
|||
<?php
|
||||
} // end if DefaultVideo
|
||||
?>
|
||||
<?php if (!$Event->DefaultVideo()) { ?>
|
||||
<?php if ( !$video_tag ) { ?>
|
||||
<div id="imageFeed">
|
||||
<?php
|
||||
if ( ZM_WEB_STREAM_METHOD == 'mpeg' && ZM_MPEG_LIVE_FORMAT ) {
|
||||
|
|
|
@ -471,6 +471,18 @@ $videowriteropts = array(
|
|||
'X264 Encode' => 1,
|
||||
'H264 Camera Passthrough' => 2
|
||||
);
|
||||
$videowriter_codecs = array(
|
||||
'' => translate('Disabled'),
|
||||
'h264' => 'h264',
|
||||
'mjpeg' => 'mjpeg',
|
||||
'mpeg1' => 'mpeg1',
|
||||
'mpeg2' => 'mpeg2',
|
||||
);
|
||||
$videowriter_containers = array(
|
||||
'' => translate('Auto'),
|
||||
'mp4' => 'mp4',
|
||||
'mkv' => 'mkv',
|
||||
);
|
||||
|
||||
xhtmlHeaders(__FILE__, translate('Monitor')." - ".validHtmlStr($monitor->Name()) );
|
||||
?>
|
||||
|
@ -597,6 +609,8 @@ if ( $tab != 'storage' ) {
|
|||
?>
|
||||
<input type="hidden" name="newMonitor[SaveJPEGs]" value="<?php echo validHtmlStr($monitor->SaveJPEGs()) ?>"/>
|
||||
<input type="hidden" name="newMonitor[VideoWriter]" value="<?php echo validHtmlStr($monitor->VideoWriter()) ?>"/>
|
||||
<input type="hidden" name="newMonitor[OutputCodec]" value="<?php echo validHtmlStr($monitor->OutputCodec()) ?>"/>
|
||||
<input type="hidden" name="newMonitor[OutputContainer]" value="<?php echo validHtmlStr($monitor->OutputContainer()) ?>"/>
|
||||
<input type="hidden" name="newMonitor[EncoderParameters]" value="<?php echo validHtmlStr($monitor->EncoderParameters()) ?>"/>
|
||||
<input type="hidden" name="newMonitor[RecordAudio]" value="<?php echo validHtmlStr($monitor->RecordAudio()) ?>"/>
|
||||
<?php
|
||||
|
@ -894,6 +908,8 @@ if ( $monitor->Type() == 'Local' ) {
|
|||
?>
|
||||
<tr><td><?php echo translate('SaveJPEGs') ?></td><td><select name="newMonitor[SaveJPEGs]"><?php foreach ( $savejpegopts as $name => $value ) { ?><option value="<?php echo $value ?>"<?php if ( $value == $monitor->SaveJPEGs() ) { ?> selected="selected"<?php } ?>><?php echo $name ?></option><?php } ?></select></td></tr>
|
||||
<tr><td><?php echo translate('VideoWriter') ?></td><td><select name="newMonitor[VideoWriter]"><?php foreach ( $videowriteropts as $name => $value ) { ?><option value="<?php echo $value ?>"<?php if ( $value == $monitor->VideoWriter() ) { ?> selected="selected"<?php } ?>><?php echo $name ?></option><?php } ?></select></td></tr>
|
||||
<tr><td><?php echo translate('OutputCodec') ?></td><td><?php echo htmlSelect( 'newMonitor[OutputCodec]', $videowriter_codecs, $monitor->OutputCodec() );?></td></tr>
|
||||
<tr><td><?php echo translate('OutputContainer') ?></td><td><?php echo htmlSelect( 'newMonitor[OutputContainer]', $videowriter_containers, $monitor->OutputContainer() );?></td></tr>
|
||||
<tr><td><?php echo translate('OptionalEncoderParam') ?></td><td><textarea name="newMonitor[EncoderParameters]" rows="4" cols="36"><?php echo validHtmlStr($monitor->EncoderParameters()) ?></textarea></td></tr>
|
||||
<tr><td><?php echo translate('RecordAudio') ?></td><td><input type="checkbox" name="newMonitor[RecordAudio]" value="1"<?php if ( $monitor->RecordAudio() ) { ?> checked="checked"<?php } ?>/></td></tr>
|
||||
<?php
|
||||
|
|
Loading…
Reference in New Issue