Merge branch 'rtsp_server'

This commit is contained in:
Isaac Connor 2021-01-26 14:22:10 -05:00
commit 4dcce4ac95
110 changed files with 7268 additions and 3714 deletions

View File

@ -59,7 +59,7 @@ if(NOT HOST_OS)
endif(NOT HOST_OS)
set (CMAKE_CXX_STANDARD 11)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14")
# Default CLFAGS and CXXFLAGS:
set(CMAKE_C_FLAGS_RELEASE "-Wall -D__STDC_CONSTANT_MACROS -O2")
set(CMAKE_CXX_FLAGS_RELEASE "-Wall -D__STDC_CONSTANT_MACROS -O2")
@ -69,6 +69,8 @@ set(CMAKE_C_FLAGS_OPTIMISED "-Wall -D__STDC_CONSTANT_MACROS -O3")
set(CMAKE_CXX_FLAGS_OPTIMISED "-Wall -D__STDC_CONSTANT_MACROS -O3")
set(CMAKE_INCLUDE_CURRENT_DIR ON)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/")
set (CMAKE_CXX_STANDARD 11)
# GCC below 6.0 doesn't support __target__("fpu=neon") attribute, required for compiling ARM Neon code, otherwise compilation fails.
# Must use -mfpu=neon compiler flag instead, but only do that for processors that support neon, otherwise strip the neon code alltogether,
@ -201,6 +203,8 @@ set(ZM_NO_X10 "OFF" CACHE BOOL
set(ZM_ONVIF "ON" CACHE BOOL
"Set to ON to enable basic ONVIF support. This is EXPERIMENTAL and may not
work with all cameras claiming to be ONVIF compliant. default: ON")
set(ZM_NO_RTSPSERVER "OFF" CACHE BOOL
"Set to ON to skip live555 checks and force building ZM without rtsp server support. default: OFF")
set(ZM_PERL_MM_PARMS INSTALLDIRS=vendor NO_PACKLIST=1 NO_PERLLOCAL=1 CACHE STRING
"By default, ZoneMinder's Perl modules are installed into the Vendor folders,
as defined by your installation of Perl. You can change that here. Consult Perl's
@ -708,6 +712,21 @@ endif(NOT ZM_NO_LIBVNC)
##set(CMAKE_REQUIRED_INCLUDES "${Boost_INCLUDE_DIRS}")
#list(APPEND ZM_BIN_LIBS "${Boost_LIBRARIES}")
#endif()
if(NOT ZM_NO_RTSPSERVER)
find_package(Live555)
if(Live555_FOUND)
include_directories(${Live555_INCLUDE_DIRS})
set(CMAKE_REQUIRED_INCLUDES "${Live555_INCLUDE_DIRS}")
list(APPEND ZM_BIN_LIBS "${Live555_LIBRARIES}")
set(HAVE_RTSP_SERVER 1)
else(Live555_FOUND)
set(HAVE_RTSP_SERVER 0)
endif(Live555_FOUND)
else(NOT ZM_NO_RTSPSERVER)
set(HAVE_RTSP_SERVER 0)
endif(NOT ZM_NO_RTSPSERVER)
#
# *** END OF LIBRARY CHECKS ***

View File

@ -0,0 +1,75 @@
# Try to find Live555 libraries
# Once done this will define
# Live555_FOUND
# Live555_INCLUDE_DIRS
# Live555_LIBRARIES
if (NOT Live555_FOUND)
set(_Live555_FOUND ON)
foreach (library liveMedia BasicUsageEnvironment Groupsock UsageEnvironment)
string(TOLOWER ${library} lowercase_library)
find_path(Live555_${library}_INCLUDE_DIR
NAMES
${library}.hh
${lowercase_library}.hh
PATHS
${Live555_ROOT}/${library}/include
${Live555_ROOT}/live/${library}/include
/usr/include/${library}
/usr/local/include/${library}
/usr/include/${lowercase_library}
/usr/local/include/${lowercase_library}
)
if (Live555_${library}_INCLUDE_DIR)
list(APPEND _Live555_INCLUDE_DIRS ${Live555_${library}_INCLUDE_DIR})
else()
set(_Live555_FOUND OFF)
endif ()
foreach (mode DEBUG RELEASE)
find_library(Live555_${library}_LIBRARY_${mode}
NAMES
${library}
${lowercase_library}
PATHS
${Live555_ROOT}/lib/${mode}
${Live555_ROOT}/${library}
)
if (Live555_${library}_LIBRARY_${mode})
if (${mode} STREQUAL RELEASE)
list(APPEND _Live555_LIBRARIES optimized ${Live555_${library}_LIBRARY_${mode}})
elseif (${mode} STREQUAL DEBUG)
list(APPEND _Live555_LIBRARIES debug ${Live555_${library}_LIBRARY_${mode}})
else ()
MESSAGE(STATUS no)
list(APPEND _Live555_LIBRARIES ${Live555_${library}_LIBRARY_${mode}})
endif()
else()
set(_Live555_FOUND OFF)
endif ()
endforeach ()
endforeach ()
if (_Live555_FOUND)
set(Live555_INCLUDE_DIRS ${_Live555_INCLUDE_DIRS} CACHE INTERNAL "")
set(Live555_LIBRARIES ${_Live555_LIBRARIES} CACHE INTERNAL "")
set(Live555_FOUND ${_Live555_FOUND} CACHE BOOL "" FORCE)
endif()
include(FindPackageHandleStandardArgs)
# handle the QUIETLY and REQUIRED arguments and set LOGGING_FOUND to TRUE
# if all listed variables are TRUE
find_package_handle_standard_args(Live555 DEFAULT_MSG Live555_INCLUDE_DIRS Live555_LIBRARIES Live555_FOUND)
# Tell cmake GUIs to ignore the "local" variables.
mark_as_advanced(Live555_INCLUDE_DIRS Live555_LIBRARIES Live555_FOUND)
endif (NOT Live555_FOUND)
if (Live555_FOUND)
message(STATUS "Found live555")
endif()

View File

@ -481,7 +481,8 @@ CREATE TABLE `Monitors` (
`DecoderHWAccelDevice` varchar(255),
`SaveJPEGs` TINYINT NOT NULL DEFAULT '3' ,
`VideoWriter` TINYINT NOT NULL DEFAULT '0',
`OutputCodec` enum('h264','mjpeg','mpeg1','mpeg2'),
`OutputCodec` int(10) unsigned NOT NULL default 0,
`Encoder` enum('auto','h264','libx264','h264_omx','h264_vaapi','mjpeg','mpeg1','mpeg2'),
`OutputContainer` enum('auto','mp4','mkv'),
`EncoderParameters` TEXT,
`RecordAudio` TINYINT NOT NULL DEFAULT '0',

View File

@ -176,16 +176,17 @@ BEGIN
WHERE Id=OLD.MonitorId;
END IF;
END IF;
ELSEIF ( NEW.Archived AND diff ) THEN
ELSE
IF ( NEW.Archived AND diff ) THEN
UPDATE Events_Archived SET DiskSpace=NEW.DiskSpace WHERE EventId=NEW.Id;
END IF;
END IF;
IF ( diff ) THEN
UPDATE Monitors SET TotalEventDiskSpace = COALESCE(TotalEventDiskSpace,0) - COALESCE(OLD.DiskSpace,0) + COALESCE(NEW.DiskSpace,0) WHERE Id=OLD.MonitorId;
END IF;
END;
//
delimiter ;

View File

@ -10,3 +10,18 @@ SET @s = (SELECT IF(
PREPARE stmt FROM @s;
EXECUTE stmt;
ALTER TABLE `Monitors` MODIFY `OutputCodec` INT UNSIGNED default 0;
SET @s = (SELECT IF(
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = DATABASE()
AND table_name = 'Monitors'
AND column_name = 'Encoder'
) > 0,
"SELECT 'Column Encoder already exists in Monitors'",
"ALTER TABLE `Monitors` ADD `Encoder` enum('auto','h264','h264_omx','mjpeg','mpeg1','mpeg2') AFTER `OutputCodec`"
));
PREPARE stmt FROM @s;
EXECUTE stmt;

1
db/zm_update-1.35.17.sql Normal file
View File

@ -0,0 +1 @@
ALTER TABLE `Monitors` MODIFY `Encoder` enum('auto','h264','libx264', 'h264_omx', 'h264_vaapi', 'mjpeg','mpeg1','mpeg2');

View File

@ -28,7 +28,7 @@
%global _hardened_build 1
Name: zoneminder
Version: 1.35.16
Version: 1.35.17
Release: 1%{?dist}
Summary: A camera monitoring and analysis tool
Group: System Environment/Daemons

View File

@ -84,6 +84,8 @@ if [ "$1" = "configure" ]; then
else
echo "Not doing database upgrade due to remote db server ($ZM_DB_HOST)."
fi
echo "Done Updating; starting ZoneMinder."
deb-systemd-invoke restart zoneminder.service
fi

View File

@ -947,6 +947,19 @@ our @options = (
type => $types{integer},
category => 'network',
},
{
name => 'ZM_MIN_RTSP_PORT',
default => '',
description => 'Start of port range to contact for RTSP streaming video.',
help => q`
The beginng of a port range that will be used to offer
RTSP streaming of live captured video.
Each monitor will use this value plus the Monitor Id to stream
content. So a value of 2000 here will cause a stream for Monitor 1 to
hit port 2001.`,
type => $types{integer},
category => 'network',
},
{
name => 'ZM_MIN_RTP_PORT',
default => '40200',

View File

@ -147,6 +147,8 @@ our $mem_data = {
last_write_index => { type=>'uint32', seq=>$mem_seq++ },
last_read_index => { type=>'uint32', seq=>$mem_seq++ },
state => { type=>'uint32', seq=>$mem_seq++ },
capture_fps => { type=>'double', seq=>$mem_seq++ },
analysis_fps => { type=>'double', seq=>$mem_seq++ },
last_event => { type=>'uint64', seq=>$mem_seq++ },
action => { type=>'uint32', seq=>$mem_seq++ },
brightness => { type=>'int32', seq=>$mem_seq++ },
@ -214,7 +216,7 @@ sub zmMemInit {
|| $member_data->{type} eq 'bool4'
) {
$member_data->{size} = $member_data->{align} = 4;
} elsif ($member_data->{type} eq 'int16'
} elsif ( $member_data->{type} eq 'int16'
|| $member_data->{type} eq 'uint16'
) {
$member_data->{size} = $member_data->{align} = 2;
@ -223,6 +225,8 @@ sub zmMemInit {
|| $member_data->{type} eq 'bool1'
) {
$member_data->{size} = $member_data->{align} = 1;
} elsif ( $member_data->{type} eq 'double' ) {
$member_data->{size} = $member_data->{align} = 8;
} elsif ( $member_data->{type} =~ /^u?int8\[(\d+)\]$/ ) {
$member_data->{size} = $1;
$member_data->{align} = 1;
@ -236,7 +240,7 @@ sub zmMemInit {
$offset += ($member_data->{align} - ($offset%$member_data->{align}));
}
$member_data->{offset} = $offset;
$offset += $member_data->{size}
$offset += $member_data->{size};
}
$section_data->{size} = $offset - $section_data->{offset};
}
@ -322,7 +326,7 @@ sub zmMemRead {
my $size = $mem_data->{$section}->{contents}->{$element}->{size};
if (!defined $offset || !defined $type || !defined $size) {
Error('Invalid field:'.$field.' setting to undef and exiting zmMemRead');
Error('Invalid field:'.$field.' setting to undef and exiting zmMemRead offset:'.$offset.' type:'.$type.' size:'.$size);
zmMemInvalidate($monitor);
return undef;
}
@ -355,7 +359,9 @@ sub zmMemRead {
} elsif ( $type eq 'int8' ) {
( $value ) = unpack('c', $data);
} elsif ( $type eq 'uint8' || $type eq 'bool1' ) {
( $value ) = unpack( 'C', $data );
( $value ) = unpack('C', $data);
} elsif ( $type eq 'double' ) {
( $value ) = unpack('d', $data);
} elsif ( $type =~ /^int8\[\d+\]$/ ) {
( $value ) = unpack('Z'.$size, $data);
} elsif ( $type =~ /^uint8\[\d+\]$/ ) {

View File

@ -92,7 +92,7 @@ delete @ENV{qw(IFS CDPATH ENV BASH_ENV)};
my @daemons = (
'zmc',
'zma',
#'zma',
'zmfilter.pl',
'zmaudit.pl',
'zmtrigger.pl',
@ -239,7 +239,7 @@ use Sys::MemInfo qw(totalmem freemem totalswap freeswap);
use ZoneMinder::Server qw(CpuLoad);
#use Data::Dumper;
use constant KILL_DELAY => 60; # seconds to wait between sending TERM and sending KILL
use constant KILL_DELAY => 10; # seconds to wait between sending TERM and sending KILL
our %cmd_hash;
our %pid_hash;

View File

@ -216,9 +216,6 @@ if ( $command =~ /^(?:start|restart)$/ ) {
} else {
runCommand("zmdc.pl start zmc -m $monitor->{Id}");
}
if ( $monitor->{Function} ne 'Monitor' ) {
runCommand("zmdc.pl start zma -m $monitor->{Id}");
}
if ( $Config{ZM_OPT_CONTROL} ) {
if ( $monitor->{Controllable} ) {
runCommand("zmdc.pl start zmcontrol.pl --id $monitor->{Id}");

View File

@ -123,7 +123,7 @@ while( 1 ) {
) ? (3/$monitor->{MaxFPS})
: $Config{ZM_WATCH_MAX_DELAY}
;
my $image_delay = $now-$capture_time;
my $image_delay = $now - $capture_time;
Debug("Monitor $monitor->{Id} last captured $image_delay seconds ago, max is $max_image_delay");
if ( $image_delay > $max_image_delay ) {
Warning("Restarting capture daemon for "
@ -138,9 +138,6 @@ while( 1 ) {
}
if ( $restart ) {
# Because zma depends on zmc, and zma can hold the shm in place, preventing zmc from using the space in /dev/shm,
# we need to stop zma before restarting zmc.
runCommand("zmdc.pl stop zma -m $$monitor{Id}") if $monitor->{Function} ne 'Monitor';
my $command;
if ( $monitor->{Type} eq 'Local' ) {
$command = "zmdc.pl restart zmc -d $monitor->{Device}";
@ -148,7 +145,6 @@ while( 1 ) {
$command = "zmdc.pl restart zmc -m $monitor->{Id}";
}
runCommand($command);
runCommand("zmdc.pl start zma -m $$monitor{Id}") if $monitor->{Function} ne 'Monitor';
} elsif ( $monitor->{Function} ne 'Monitor' ) {
# Now check analysis daemon
$restart = 0;
@ -160,7 +156,7 @@ while( 1 ) {
Error("Error reading shared data for $$monitor{Id} $$monitor{Name}");
} elsif ( !$image_time ) {
# We can't get the last capture time so can't be sure it's died.
$restart = 1;
#$restart = 1;
Error("Last analyse time for $$monitor{Id} $$monitor{Name} was zero.");
} else {
@ -181,8 +177,13 @@ while( 1 ) {
}
if ( $restart ) {
Info("Restarting analysis daemon for $$monitor{Id} $$monitor{Name}");
my $command = 'zmdc.pl restart zma -m '.$monitor->{Id};
Info("Restarting analysis daemon for $$monitor{Id} $$monitor{Name}\n");
my $command;
if ( $monitor->{Type} eq 'Local' ) {
$command = "zmdc.pl restart zmc -d $monitor->{Device}";
} else {
$command = "zmdc.pl restart zmc -m $monitor->{Id}";
}
runCommand($command);
} # end if restart
} # end if check analysis daemon

View File

@ -3,16 +3,78 @@
# Create files from the .in files
configure_file(zm_config_data.h.in "${CMAKE_CURRENT_BINARY_DIR}/zm_config_data.h" @ONLY)
# Group together all the source files that are used by all the binaries (zmc, zma, zmu, zms etc)
set(ZM_BIN_SRC_FILES zm_box.cpp zm_buffer.cpp zm_camera.cpp zm_comms.cpp zm_config.cpp zm_coord.cpp zm_curl_camera.cpp zm.cpp zm_db.cpp zm_logger.cpp zm_event.cpp zm_frame.cpp zm_eventstream.cpp zm_exception.cpp zm_file_camera.cpp zm_ffmpeg_input.cpp zm_ffmpeg_camera.cpp zm_group.cpp zm_image.cpp zm_jpeg.cpp zm_libvlc_camera.cpp zm_libvnc_camera.cpp zm_local_camera.cpp zm_monitor.cpp zm_monitorstream.cpp zm_ffmpeg.cpp zm_font.cpp zm_mpeg.cpp zm_packet.cpp zm_packetqueue.cpp zm_poly.cpp zm_regexp.cpp zm_remote_camera.cpp zm_remote_camera_http.cpp zm_remote_camera_nvsocket.cpp zm_remote_camera_rtsp.cpp zm_rtp.cpp zm_rtp_ctrl.cpp zm_rtp_data.cpp zm_rtp_source.cpp zm_rtsp.cpp zm_rtsp_auth.cpp zm_sdp.cpp zm_signal.cpp zm_stream.cpp zm_swscale.cpp zm_thread.cpp zm_time.cpp zm_timer.cpp zm_user.cpp zm_utils.cpp zm_video.cpp zm_videostore.cpp zm_zone.cpp zm_storage.cpp zm_fifo.cpp zm_crypt.cpp)
# Group together all the source files that are used by all the binaries (zmc, zmu, zms etc)
set(ZM_BIN_SRC_FILES
zm_analysis_thread.cpp
zm_box.cpp
zm_buffer.cpp
zm_camera.cpp
zm_comms.cpp
zm_config.cpp
zm_coord.cpp
zm_curl_camera.cpp
zm_crypt.cpp
zm.cpp
zm_db.cpp
zm_logger.cpp
zm_event.cpp
zm_eventstream.cpp
zm_exception.cpp
zm_fifo.cpp
zm_file_camera.cpp
zm_font.cpp
zm_frame.cpp
zm_group.cpp
zm_image.cpp
zm_jpeg.cpp
zm_libvlc_camera.cpp
zm_libvnc_camera.cpp
zm_local_camera.cpp
zm_monitor.cpp
zm_monitorstream.cpp
zm_ffmpeg.cpp
zm_ffmpeg_camera.cpp
zm_ffmpeg_input.cpp
zm_mpeg.cpp
zm_packet.cpp
zm_packetqueue.cpp
zm_poly.cpp
zm_regexp.cpp
zm_remote_camera.cpp
zm_remote_camera_http.cpp
zm_remote_camera_nvsocket.cpp
zm_remote_camera_rtsp.cpp
zm_rtp.cpp
zm_rtp_ctrl.cpp
zm_rtp_data.cpp
zm_rtp_source.cpp
zm_rtsp.cpp
zm_rtsp_auth.cpp
zm_rtsp_server_thread.cpp
zm_rtsp_server_adts_source.cpp
zm_rtsp_server_h264_device_source.cpp
zm_rtsp_server_device_source.cpp
zm_rtsp_server_server_media_subsession.cpp
zm_rtsp_server_unicast_server_media_subsession.cpp
zm_sdp.cpp
zm_signal.cpp
zm_stream.cpp
zm_swscale.cpp
zm_thread.cpp
zm_time.cpp
zm_timer.cpp
zm_user.cpp
zm_utils.cpp
zm_video.cpp
zm_videostore.cpp
zm_zone.cpp
zm_storage.cpp)
# A fix for cmake recompiling the source files for every target.
add_library(zm STATIC ${ZM_BIN_SRC_FILES})
link_directories(libbcrypt)
add_executable(zmc zmc.cpp)
add_executable(zma zma.cpp)
add_executable(zmu zmu.cpp)
add_executable(zms zms.cpp)
@ -21,16 +83,15 @@ include_directories(libbcrypt/include/bcrypt)
include_directories(jwt-cpp/include/jwt-cpp)
target_link_libraries(zmc zm ${ZM_EXTRA_LIBS} ${ZM_BIN_LIBS} ${CMAKE_DL_LIBS})
target_link_libraries(zma zm ${ZM_EXTRA_LIBS} ${ZM_BIN_LIBS} ${CMAKE_DL_LIBS})
target_link_libraries(zmu zm ${ZM_EXTRA_LIBS} ${ZM_BIN_LIBS} ${CMAKE_DL_LIBS} bcrypt)
target_link_libraries(zms zm ${ZM_EXTRA_LIBS} ${ZM_BIN_LIBS} ${CMAKE_DL_LIBS} bcrypt)
# Generate man files for the binaries destined for the bin folder
FOREACH(CBINARY zma zmc zmu)
FOREACH(CBINARY zmc zmu)
POD2MAN(${CMAKE_CURRENT_SOURCE_DIR}/${CBINARY}.cpp ${CBINARY} 8 ${ZM_MANPAGE_DEST_PREFIX})
ENDFOREACH(CBINARY zma zmc zmu)
ENDFOREACH(CBINARY zmc zmu)
install(TARGETS zmc zma zmu RUNTIME DESTINATION "${CMAKE_INSTALL_FULL_BINDIR}" PERMISSIONS OWNER_WRITE OWNER_READ OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
install(TARGETS zmc zmu RUNTIME DESTINATION "${CMAKE_INSTALL_FULL_BINDIR}" PERMISSIONS OWNER_WRITE OWNER_READ OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
install(TARGETS zms RUNTIME DESTINATION "${ZM_CGIDIR}" PERMISSIONS OWNER_WRITE OWNER_READ OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
install(CODE "execute_process(COMMAND ln -sf zms nph-zms WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})" )
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/nph-zms DESTINATION "${ZM_CGIDIR}")

View File

@ -0,0 +1,47 @@
#include "zm_analysis_thread.h"
AnalysisThread::AnalysisThread(Monitor *p_monitor) {
monitor = p_monitor;
terminate = false;
//sigemptyset(&block_set);
}
AnalysisThread::~AnalysisThread() {
Debug(2, "THREAD: deleteing analysis thread");
}
int AnalysisThread::run() {
Debug(2, "AnalysisThread::run()");
useconds_t analysis_rate = monitor->GetAnalysisRate();
unsigned int analysis_update_delay = monitor->GetAnalysisUpdateDelay();
time_t last_analysis_update_time, cur_time;
monitor->UpdateAdaptiveSkip();
last_analysis_update_time = time(0);
while ( !(terminate or zm_terminate) ) {
// Some periodic updates are required for variable capturing framerate
if ( analysis_update_delay ) {
cur_time = time(0);
if ( (unsigned int)( cur_time - last_analysis_update_time ) > analysis_update_delay ) {
analysis_rate = monitor->GetAnalysisRate();
monitor->UpdateAdaptiveSkip();
last_analysis_update_time = cur_time;
}
}
Debug(2, "Analyzing");
if ( !monitor->Analyse() ) {
Debug(2, "uSleeping for %d", (monitor->Active()?ZM_SAMPLE_RATE:ZM_SUSPENDED_RATE));
usleep(monitor->Active() ? ZM_SAMPLE_RATE : ZM_SUSPENDED_RATE);
} else if ( analysis_rate ) {
Debug(2, "uSleeping for %d", analysis_rate);
usleep(analysis_rate);
} else {
Debug(2, "Not Sleeping");
}
} // end while ! terminate
return 0;
} // end in AnalysisThread::run()

29
src/zm_analysis_thread.h Normal file
View File

@ -0,0 +1,29 @@
#ifndef ZM_ANALYSIS_THREAD_H
#define ZM_ANALYSIS_THREAD_H
#include "zm_thread.h"
#include <signal.h>
#include "zm_monitor.h"
class AnalysisThread : public Thread {
private:
bool terminate;
sigset_t block_set;
Monitor *monitor;
public:
explicit AnalysisThread(Monitor *);
~AnalysisThread();
int run();
void stop() {
terminate = true;
}
bool stopped() const {
return terminate;
}
};
#endif

View File

@ -35,6 +35,7 @@ Camera::Camera(
bool p_record_audio
) :
monitor_id(p_monitor_id),
monitor(nullptr),
type(p_type),
width(p_width),
height(p_height),
@ -46,6 +47,12 @@ Camera::Camera(
contrast(p_contrast),
capture(p_capture),
record_audio(p_record_audio),
mVideoStreamId(-1),
mAudioStreamId(-1),
mVideoCodecContext(nullptr),
mAudioCodecContext(nullptr),
video_stream(nullptr),
oc(nullptr),
bytes(0)
{
linesize = width * colours;
@ -54,8 +61,6 @@ Camera::Camera(
Debug(2, "New camera id: %d width: %d line size: %d height: %d colours: %d subpixelorder: %d capture: %d",
monitor_id, width, linesize, height, colours, subpixelorder, capture);
monitor = nullptr;
}
Camera::~Camera() {

View File

@ -24,6 +24,7 @@
#include <sys/ioctl.h>
#include "zm_image.h"
#include "zm_packet.h"
class Camera;
@ -53,10 +54,29 @@ protected:
int contrast;
bool capture;
bool record_audio;
int mVideoStreamId;
int mAudioStreamId;
AVCodecContext *mVideoCodecContext;
AVCodecContext *mAudioCodecContext;
AVStream *video_stream;
AVFormatContext *oc;
unsigned int bytes;
public:
Camera( unsigned int p_monitor_id, SourceType p_type, unsigned int p_width, unsigned int p_height, int p_colours, int p_subpixelorder, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
Camera(
unsigned int p_monitor_id,
SourceType p_type,
unsigned int p_width,
unsigned int p_height,
int p_colours,
int p_subpixelorder,
int p_brightness,
int p_contrast,
int p_hue,
int p_colour,
bool p_capture,
bool p_record_audio
);
virtual ~Camera();
unsigned int getId() const { return monitor_id; }
@ -91,11 +111,17 @@ public:
//return (type == FFMPEG_SRC )||(type == REMOTE_SRC);
}
virtual AVStream *get_VideoStream() { return nullptr; };
virtual AVStream *get_AudioStream() { return nullptr; };
virtual AVCodecContext *get_VideoCodecContext() { return nullptr; };
virtual AVCodecContext *get_AudioCodecContext() { return nullptr; };
int get_VideoStreamId() { return mVideoStreamId; };
int get_AudioStreamId() { return mAudioStreamId; };
virtual int PrimeCapture() { return 0; }
virtual int PreCapture() = 0;
virtual int Capture(Image &image) = 0;
virtual int Capture(ZMPacket &p) = 0;
virtual int PostCapture() = 0;
virtual int CaptureAndRecord(Image &image, timeval recording, char* event_directory) = 0;
virtual int Close() = 0;
};

View File

@ -28,6 +28,7 @@
#include <glob.h>
#include "zm_utils.h"
#include "zm_config.h"
// Note that Error and Debug calls won't actually go anywhere unless you
// set the relevant ENV vars because the logger gets it's setting from the

View File

@ -163,7 +163,7 @@ int cURLCamera::PreCapture() {
return( 0 );
}
int cURLCamera::Capture( Image &image ) {
int cURLCamera::Capture( ZMPacket &zm_packet ) {
bool frameComplete = false;
/* MODE_STREAM specific variables */
@ -192,7 +192,7 @@ int cURLCamera::Capture( Image &image ) {
nRet = pthread_cond_wait(&data_available_cond,&shareddata_mutex);
if ( nRet != 0 ) {
Error("Failed waiting for available data condition variable: %s",strerror(nRet));
return -20;
return -1;
}
}
@ -295,7 +295,7 @@ int cURLCamera::Capture( Image &image ) {
need_more_data = true;
} else {
/* All good. decode the image */
image.DecodeJpeg(databuffer.extract(frame_content_length), frame_content_length, colours, subpixelorder);
zm_packet.image->DecodeJpeg(databuffer.extract(frame_content_length), frame_content_length, colours, subpixelorder);
frameComplete = true;
}
}
@ -305,7 +305,7 @@ int cURLCamera::Capture( Image &image ) {
nRet = pthread_cond_wait(&data_available_cond,&shareddata_mutex);
if(nRet != 0) {
Error("Failed waiting for available data condition variable: %s",strerror(nRet));
return -18;
return -1;
}
need_more_data = false;
}
@ -315,7 +315,7 @@ int cURLCamera::Capture( Image &image ) {
if (!single_offsets.empty()) {
if( (single_offsets.front() > 0) && (databuffer.size() >= single_offsets.front()) ) {
/* Extract frame */
image.DecodeJpeg(databuffer.extract(single_offsets.front()), single_offsets.front(), colours, subpixelorder);
zm_packet.image->DecodeJpeg(databuffer.extract(single_offsets.front()), single_offsets.front(), colours, subpixelorder);
single_offsets.pop_front();
frameComplete = true;
} else {
@ -329,7 +329,7 @@ int cURLCamera::Capture( Image &image ) {
nRet = pthread_cond_wait(&request_complete_cond,&shareddata_mutex);
if(nRet != 0) {
Error("Failed waiting for request complete condition variable: %s",strerror(nRet));
return -19;
return -1;
}
}
} else {
@ -344,7 +344,7 @@ int cURLCamera::Capture( Image &image ) {
unlock();
if(!frameComplete)
return -1;
return 0;
return 1;
}
@ -354,12 +354,6 @@ int cURLCamera::PostCapture() {
return( 0 );
}
int cURLCamera::CaptureAndRecord( Image &image, struct timeval recording, char* event_directory ) {
Error("Capture and Record not implemented for the cURL camera type");
// Nothing to do here
return( 0 );
}
size_t cURLCamera::data_callback(void *buffer, size_t size, size_t nmemb, void *userdata) {
lock();

View File

@ -77,9 +77,8 @@ public:
int PrimeCapture();
int PreCapture();
int Capture( Image &image );
int Capture( ZMPacket &p );
int PostCapture();
int CaptureAndRecord( Image &image, struct timeval recording, char* event_directory );
size_t data_callback(void *buffer, size_t size, size_t nmemb, void *userdata);
size_t header_callback(void *buffer, size_t size, size_t nmemb, void *userdata);

View File

@ -50,31 +50,54 @@ Event::Event(
Monitor *p_monitor,
struct timeval p_start_time,
const std::string &p_cause,
const StringSetMap &p_noteSetMap,
bool p_videoEvent ) :
const StringSetMap &p_noteSetMap
) :
id(0),
monitor(p_monitor),
start_time(p_start_time),
end_time({0,0}),
cause(p_cause),
noteSetMap(p_noteSetMap),
videoEvent(p_videoEvent),
videowriter(nullptr)
frames(0),
alarm_frames(0),
alarm_frame_written(false),
tot_score(0),
max_score(0),
//path(""),
//snapshit_file(),
//alarm_file(""),
videoStore(nullptr),
//video_name(""),
//video_file(""),
last_db_frame(0),
have_video_keyframe(false),
//scheme
save_jpegs(0)
{
std::string notes;
createNotes(notes);
struct timeval now;
gettimeofday(&now, 0);
bool untimedEvent = false;
if ( !start_time.tv_sec ) {
untimedEvent = true;
Warning("Event has zero time, setting to now");
start_time = now;
} else if ( start_time.tv_sec > now.tv_sec ) {
char buffer[26];
char buffer_now[26];
struct tm* tm_info;
tm_info = localtime(&start_time.tv_sec);
strftime(buffer, 26, "%Y:%m:%d %H:%M:%S", tm_info);
tm_info = localtime(&now.tv_sec);
strftime(buffer_now, 26, "%Y:%m:%d %H:%M:%S", tm_info);
Error(
"StartDateTime in the future %u.%u > %u.%u",
start_time.tv_sec, start_time.tv_usec, now.tv_sec, now.tv_usec
"StartDateTime in the future starttime %u.%u >? now %u.%u difference %d\n%s\n%s",
start_time.tv_sec, start_time.tv_usec, now.tv_sec, now.tv_usec,
(now.tv_sec-start_time.tv_sec),
buffer, buffer_now
);
start_time = now;
}
@ -85,12 +108,16 @@ Event::Event(
state_id = atoi(dbrow[0]);
}
// Copy it in case opening the mp4 doesn't work we can set it to another value
save_jpegs = monitor->GetOptSaveJPEGs();
Storage * storage = monitor->getStorage();
char sql[ZM_SQL_MED_BUFSIZ];
snprintf(sql, sizeof(sql), "INSERT INTO Events "
"( MonitorId, StorageId, Name, StartDateTime, Width, Height, Cause, Notes, StateId, Orientation, Videoed, DefaultVideo, SaveJPEGs, Scheme )"
" VALUES ( %u, %u, 'New Event', from_unixtime( %ld ), %u, %u, '%s', '%s', %u, %d, %d, '%s', %d, '%s' )",
snprintf(sql, sizeof(sql),
"INSERT INTO `Events` "
"( `MonitorId`, `StorageId`, `Name`, `StartDateTime`, `Width`, `Height`, `Cause`, `Notes`, `StateId`, `Orientation`, `Videoed`, `DefaultVideo`, `SaveJPEGs`, `Scheme` )"
" VALUES "
"( %d, %d, 'New Event', from_unixtime( %ld ), %d, %d, '%s', '%s', %d, %d, %d, '%s', %d, '%s' )",
monitor->Id(),
storage->Id(),
start_time.tv_sec,
@ -100,11 +127,12 @@ Event::Event(
notes.c_str(),
state_id,
monitor->getOrientation(),
videoEvent,
( monitor->GetOptVideoWriter() != 0 ? 1 : 0 ),
( monitor->GetOptVideoWriter() != 0 ? "video.mp4" : "" ),
monitor->GetOptSaveJPEGs(),
storage->SchemeString().c_str()
);
db_mutex.lock();
while ( mysql_query(&dbconn, sql) ) {
db_mutex.unlock();
@ -171,16 +199,6 @@ Event::Event(
Debug(1, "Using storage area at %s", path.c_str());
db_mutex.unlock();
if ( untimedEvent ) {
Warning("Event %" PRIu64 " has zero time, setting to current", id);
}
end_time.tv_sec = 0;
frames = 0;
alarm_frames = 0;
tot_score = 0;
max_score = 0;
alarm_frame_written = false;
last_db_frame = 0;
video_name = "";
snapshot_file = path + "/snapshot.jpg";
@ -189,7 +207,16 @@ Event::Event(
/* Save as video */
if ( monitor->GetOptVideoWriter() != 0 ) {
video_name = stringtf("%" PRIu64 "-%s", id, "video.mp4");
std::string container = monitor->OutputContainer();
if ( container == "auto" || container == "" ) {
if ( monitor->OutputCodec() == AV_CODEC_ID_H264 ) {
container = "mp4";
} else {
container = "mkv";
}
}
video_name = stringtf("%" PRIu64 "-%s.%s", id, "video", container.c_str());
snprintf(sql, sizeof(sql), "UPDATE Events SET DefaultVideo = '%s' WHERE Id=%" PRIu64, video_name.c_str(), id);
db_mutex.lock();
if ( mysql_query(&dbconn, sql) ) {
@ -200,47 +227,32 @@ Event::Event(
db_mutex.unlock();
video_file = path + "/" + video_name;
Debug(1, "Writing video file to %s", video_file.c_str());
Camera * camera = monitor->getCamera();
videoStore = new VideoStore(
video_file.c_str(),
container.c_str(),
camera->get_VideoStream(),
camera->get_VideoCodecContext(),
( monitor->RecordAudio() ? camera->get_AudioStream() : nullptr ),
( monitor->RecordAudio() ? camera->get_AudioCodecContext() : nullptr ),
monitor );
/* X264 MP4 video writer */
if ( monitor->GetOptVideoWriter() == Monitor::X264ENCODE ) {
#if ZM_HAVE_VIDEOWRITER_X264MP4
videowriter = new X264MP4Writer(video_file.c_str(),
monitor->Width(),
monitor->Height(),
monitor->Colours(),
monitor->SubpixelOrder(),
monitor->GetOptEncoderParamsVec());
#else
Error("ZoneMinder was not compiled with the X264 MP4 video writer, check dependencies (x264 and mp4v2)");
#endif
if ( videowriter != nullptr ) {
/* Open the video stream */
int nRet = videowriter->Open();
if ( nRet != 0 ) {
Error("Failed opening video stream");
delete videowriter;
videowriter = nullptr;
}
}
}
} else {
/* No video object */
videowriter = nullptr;
if ( !videoStore->open() ) {
delete videoStore;
videoStore = nullptr;
save_jpegs |= 1; // Turn on jpeg storage
}
} // end if GetOptVideoWriter
} // Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string &p_cause, const StringSetMap &p_noteSetMap, bool p_videoEvent )
Event::~Event() {
// We close the videowriter first, because if we finish the event, we might try to view the file, but we aren't done writing it yet.
/* Close the video file */
if ( videowriter != nullptr ) {
int nRet = videowriter->Close();
if ( nRet != 0 ) {
Error("Failed closing video stream");
}
delete videowriter;
videowriter = nullptr;
if ( videoStore != nullptr ) {
Debug(2, "Deleting video store");
delete videoStore;
videoStore = nullptr;
}
// endtime is set in AddFrame, so SHOULD be set to the value of the last frame timestamp.
@ -311,6 +323,7 @@ Event::~Event() {
} // Event::~Event()
void Event::createNotes(std::string &notes) {
if ( !notes.empty() ) {
notes.clear();
for ( StringSetMap::const_iterator mapIter = noteSetMap.begin(); mapIter != noteSetMap.end(); ++mapIter ) {
notes += mapIter->first;
@ -322,6 +335,9 @@ void Event::createNotes(std::string &notes) {
notes += *setIter;
}
}
} else {
notes = "";
}
} // void Event::createNotes(std::string &notes)
bool Event::WriteFrameImage(
@ -350,38 +366,12 @@ bool Event::WriteFrameImage(
}
return rc;
}
} // end Event::WriteFrameImage( Image *image, struct timeval timestamp, const char *event_file, bool alarm_frame )
bool Event::WriteFrameVideo(
const Image *image,
const struct timeval timestamp,
VideoWriter* videow) const {
const Image* frameimg = image;
Image ts_image;
bool Event::WritePacket(ZMPacket &packet) {
/* Checking for invalid parameters */
if ( videow == nullptr ) {
Error("NULL Video object");
if ( videoStore->writePacket(&packet) < 0 )
return false;
}
/* If the image does not contain a timestamp, add the timestamp */
if ( !config.timestamp_on_capture ) {
ts_image = *image;
monitor->TimestampImage(&ts_image, &timestamp);
frameimg = &ts_image;
}
/* Calculate delta time */
struct DeltaTimeval delta_time3;
DELTA_TIMEVAL(delta_time3, timestamp, start_time, DT_PREC_3);
unsigned int timeMS = (delta_time3.sec * delta_time3.prec) + delta_time3.fsec;
/* Encode and write the frame */
if ( videowriter->Encode(frameimg, timeMS) != 0 ) {
Error("Failed encoding video frame");
}
return true;
} // bool Event::WriteFrameVideo
@ -469,7 +459,7 @@ void Event::updateNotes(const StringSetMap &newNoteSetMap) {
if ( mysql_stmt_bind_param(stmt, bind) ) {
Error("Unable to bind sql '%s': %s", sql, mysql_stmt_error(stmt));
}
}
} // end if ! stmt
strncpy(notesStr, notes.c_str(), sizeof(notesStr));
@ -506,11 +496,16 @@ void Event::AddFramesInternal(int n_frames, int start_frame, Image **images, str
if ( timestamps[i]->tv_sec <= 0 ) {
Debug(1, "Not adding pre-capture frame %d, zero or less than 0 timestamp", i);
continue;
} else if ( timestamps[i]->tv_sec < 0 ) {
Warning( "Not adding pre-capture frame %d, negative timestamp", i );
continue;
} else {
Debug( 3, "Adding pre-capture frame %d, timestamp = (%d), start_time=(%d)", i, timestamps[i]->tv_sec, start_time.tv_sec );
}
frames++;
if ( monitor->GetOptSaveJPEGs() & 1 ) {
if ( save_jpegs & 1 ) {
std::string event_file = stringtf(staticConfig.capture_file_format, path.c_str(), frames);
Debug(1, "Writing pre-capture frame %d", frames);
WriteFrameImage(images[i], *(timestamps[i]), event_file.c_str());
@ -523,10 +518,6 @@ void Event::AddFramesInternal(int n_frames, int start_frame, Image **images, str
WriteFrameImage(images[i], *(timestamps[i]), snapshot_file.c_str());
}
if ( videowriter != nullptr ) {
WriteFrameVideo(images[i], *(timestamps[i]), videowriter);
}
struct DeltaTimeval delta_time;
DELTA_TIMEVAL(delta_time, *(timestamps[i]), start_time, DT_PREC_2);
// Delta is Decimal(8,2) so 6 integer digits and 2 decimal digits
@ -564,6 +555,26 @@ void Event::AddFramesInternal(int n_frames, int start_frame, Image **images, str
end_time = *timestamps[n_frames-1];
} // void Event::AddFramesInternal(int n_frames, int start_frame, Image **images, struct timeval **timestamps)
void Event::AddPacket(ZMPacket *packet) {
have_video_keyframe = have_video_keyframe || ( ( packet->codec_type == AVMEDIA_TYPE_VIDEO ) && packet->keyframe );
Debug(2, "have_video_keyframe %d codec_type %d == video? %d packet keyframe %d",
have_video_keyframe, packet->codec_type, (packet->codec_type == AVMEDIA_TYPE_VIDEO), packet->keyframe);
dumpPacket(&packet->packet, "Adding to event");
if ( videoStore ) {
if ( have_video_keyframe ) {
videoStore->writePacket(packet);
} else {
Debug(2, "No video keyframe yet, not writing");
}
//FIXME if it fails, we should write a jpeg
}
if ( packet->codec_type == AVMEDIA_TYPE_VIDEO )
AddFrame(packet->image, *(packet->timestamp), packet->score, packet->analysis_image);
end_time = *packet->timestamp;
return;
}
void Event::WriteDbFrames() {
char *frame_insert_values_ptr = (char *)&frame_insert_sql + 90; // 90 == strlen(frame_insert_sql);
@ -625,19 +636,24 @@ void Event::AddFrame(Image *image, struct timeval timestamp, int score, Image *a
}
frames++;
bool write_to_db = false;
FrameType frame_type = score>0?ALARM:(score<0?BULK:NORMAL);
// < 0 means no motion detection is being done.
if ( score < 0 )
score = 0;
tot_score += score;
if ( score > (int)max_score )
max_score = score;
if ( monitor->GetOptSaveJPEGs() & 1 ) {
if ( image ) {
if ( save_jpegs & 1 ) {
std::string event_file = stringtf(staticConfig.capture_file_format, path.c_str(), frames);
Debug(1, "Writing capture frame %d to %s", frames, event_file.c_str());
if ( !WriteFrameImage(image, timestamp, event_file.c_str()) ) {
Error("Failed to write frame image");
}
}
} // end if save_jpegs
// If this is the first frame, we should add a thumbnail to the event directory
if ( (frames == 1) || (score > (int)max_score) ) {
@ -655,42 +671,32 @@ void Event::AddFrame(Image *image, struct timeval timestamp, int score, Image *a
}
alarm_frames++;
tot_score += score;
if ( score > (int)max_score )
max_score = score;
if ( alarm_image ) {
if ( monitor->GetOptSaveJPEGs() & 2 ) {
if ( alarm_image and ( save_jpegs & 2 ) ) {
std::string event_file = stringtf(staticConfig.analyse_file_format, path.c_str(), frames);
Debug(1, "Writing analysis frame %d", frames);
if ( ! WriteFrameImage(alarm_image, timestamp, event_file.c_str(), true) ) {
Error("Failed to write analysis frame image");
}
}
}
} // end if frame_type == ALARM
} // end if is an alarm frame
} // end if has image
if ( videowriter != nullptr ) {
WriteFrameVideo(image, timestamp, videowriter);
}
bool db_frame = ( frame_type != BULK ) || (frames==1) || ((frames%config.bulk_frame_interval)==0) ;
if ( db_frame ) {
struct DeltaTimeval delta_time;
DELTA_TIMEVAL(delta_time, timestamp, start_time, DT_PREC_2);
Debug(1, "Frame delta is %d.%d - %d.%d = %d.%d",
start_time.tv_sec, start_time.tv_usec, timestamp.tv_sec, timestamp.tv_usec, delta_time.sec, delta_time.fsec);
double fps = monitor->get_fps();
bool db_frame = ( frame_type != BULK ) || (frames==1) || ((frames%config.bulk_frame_interval)==0) ;
if ( db_frame ) {
// The idea is to write out 1/sec
frame_data.push(new Frame(id, frames, frame_type, timestamp, delta_time, score));
double fps = monitor->get_capture_fps();
if ( write_to_db
or
(frame_data.size() >= MAX_DB_FRAMES)
or
(frame_type==BULK)
(frame_type == BULK)
or
( fps and (frame_data.size() > fps) )
) {

View File

@ -34,13 +34,15 @@
#include <set>
#include <map>
#include <queue>
#include <string>
#include "zm.h"
#include "zm_image.h"
#include "zm_stream.h"
#include "zm_video.h"
#include "zm_packet.h"
#include "zm_storage.h"
class VideoStore;
class Zone;
class Monitor;
class EventStream;
@ -82,7 +84,6 @@ class Event {
struct timeval end_time;
std::string cause;
StringSetMap noteSetMap;
bool videoEvent;
int frames;
int alarm_frames;
bool alarm_frame_written;
@ -91,16 +92,14 @@ class Event {
std::string path;
std::string snapshot_file;
std::string alarm_file;
VideoStore *videoStore;
VideoWriter* videowriter;
FILE* timecodes_fd;
std::string video_name;
std::string video_file;
std::string timecodes_name;
std::string timecodes_file;
int last_db_frame;
bool have_video_keyframe; // a flag to tell us if we have had a video keyframe when writing an mp4. The first frame SHOULD be a video keyframe.
Storage::Schemes scheme;
int save_jpegs;
void createNotes(std::string &notes);
@ -112,8 +111,8 @@ class Event {
Monitor *p_monitor,
struct timeval p_start_time,
const std::string &p_cause,
const StringSetMap &p_noteSetMap,
bool p_videoEvent=false);
const StringSetMap &p_noteSetMap
);
~Event();
uint64_t Id() const { return id; }
@ -124,6 +123,8 @@ class Event {
const struct timeval &StartTime() const { return start_time; }
const struct timeval &EndTime() const { return end_time; }
void AddPacket(ZMPacket *p);
bool WritePacket(ZMPacket &p);
bool SendFrameImage(const Image *image, bool alarm_frame=false);
bool WriteFrameImage(
Image *image,
@ -131,11 +132,6 @@ class Event {
const char *event_file,
bool alarm_frame=false
) const;
bool WriteFrameVideo(
const Image *image,
const struct timeval timestamp,
VideoWriter* videow
) const;
void updateNotes(const StringSetMap &stringSetMap);
@ -176,6 +172,7 @@ class Event {
return pre_alarm_count;
}
static void EmptyPreAlarmFrames() {
#if 0
while ( pre_alarm_count > 0 ) {
int i = pre_alarm_count - 1;
delete pre_alarm_data[i].image;
@ -186,6 +183,7 @@ class Event {
}
pre_alarm_count--;
}
#endif
pre_alarm_count = 0;
}
static void AddPreAlarmFrame(
@ -194,15 +192,18 @@ class Event {
int score=0,
Image *alarm_frame=nullptr
) {
#if 0
pre_alarm_data[pre_alarm_count].image = new Image(*image);
pre_alarm_data[pre_alarm_count].timestamp = timestamp;
pre_alarm_data[pre_alarm_count].score = score;
if ( alarm_frame ) {
pre_alarm_data[pre_alarm_count].alarm_frame = new Image(*alarm_frame);
}
#endif
pre_alarm_count++;
}
void SavePreAlarmFrames() {
#if 0
for ( int i = 0; i < pre_alarm_count; i++ ) {
AddFrame(
pre_alarm_data[i].image,
@ -210,6 +211,7 @@ class Event {
pre_alarm_data[i].score,
pre_alarm_data[i].alarm_frame);
}
#endif
EmptyPreAlarmFrames();
}
};

View File

@ -30,6 +30,13 @@
#include "zm_sendfile.h"
const std::string EventStream::StreamMode_Strings[4] = {
"None",
"Single",
"All",
"Gapless"
};
bool EventStream::loadInitialEventData(int monitor_id, time_t event_time) {
static char sql[ZM_SQL_SML_BUFSIZ];
@ -82,7 +89,10 @@ bool EventStream::loadInitialEventData(int monitor_id, time_t event_time) {
return true;
} // bool EventStream::loadInitialEventData( int monitor_id, time_t event_time )
bool EventStream::loadInitialEventData(uint64_t init_event_id, unsigned int init_frame_id) {
bool EventStream::loadInitialEventData(
uint64_t init_event_id,
unsigned int init_frame_id
) {
loadEventData(init_event_id);
if ( init_frame_id ) {
@ -213,7 +223,7 @@ bool EventStream::loadEventData(uint64_t event_id) {
event_data->event_id);
}
updateFrameRate((double)event_data->frame_count/event_data->duration);
updateFrameRate((event_data->frame_count and event_data->duration) ? (double)event_data->frame_count/event_data->duration : 1);
snprintf(sql, sizeof(sql), "SELECT `FrameId`, unix_timestamp(`TimeStamp`), `Delta` "
"FROM `Frames` WHERE `EventId` = %" PRIu64 " ORDER BY `FrameId` ASC", event_id);
@ -340,8 +350,7 @@ void EventStream::processCommand(const CmdMsg *msg) {
curr_frame_id = 1;
} else {
Debug(1, "mode is %s, current frame is %ld, frame count is %ld, last frame id is %ld",
(mode == MODE_SINGLE ? "single" : "not single"),
curr_frame_id, event_data->frame_count );
StreamMode_Strings[(int)mode], curr_frame_id, event_data->frame_count );
}
replay_rate = ZM_RATE_BASE;
@ -588,7 +597,7 @@ void EventStream::processCommand(const CmdMsg *msg) {
if ( (MsgCommand)msg->msg_data[0] == CMD_QUIT )
exit(0);
updateFrameRate((double)event_data->frame_count/event_data->duration);
updateFrameRate((event_data->frame_count and event_data->duration) ? (double)event_data->frame_count/event_data->duration : 1);
} // void EventStream::processCommand(const CmdMsg *msg)
bool EventStream::checkEventLoaded() {
@ -663,7 +672,7 @@ bool EventStream::checkEventLoaded() {
mysql_free_result(result);
forceEventChange = false;
} else {
Debug(2, "Pausing because mode is %d", mode);
Debug(2, "Pausing because mode is %s", StreamMode_Strings[mode].c_str());
if ( curr_frame_id <= 0 )
curr_frame_id = 1;
else
@ -756,7 +765,7 @@ bool EventStream::sendFrame(int delta_us) {
// when stored as an mp4, we just have the rotation as a flag in the headers
// so we need to rotate it before outputting
if (
(monitor->GetOptVideoWriter() == Monitor::H264PASSTHROUGH)
(monitor->GetOptVideoWriter() == Monitor::PASSTHROUGH)
and
(event_data->Orientation != Monitor::ROTATE_0)
) {
@ -836,7 +845,7 @@ void EventStream::runStream() {
exit(0);
}
updateFrameRate((double)event_data->frame_count/event_data->duration);
updateFrameRate((event_data->frame_count and event_data->duration) ? (double)event_data->frame_count/event_data->duration : 1);
gettimeofday(&start, nullptr);
uint64_t start_usec = start.tv_sec * 1000000 + start.tv_usec;
uint64_t last_frame_offset = 0;
@ -997,10 +1006,10 @@ void EventStream::runStream() {
delta_us = ((1000000 * ZM_RATE_BASE)/((base_fps?base_fps:1)*(replay_rate?abs(replay_rate*2):2)));
Debug(2, "Sleeping %d because 1000000 * ZM_RATE_BASE(%d) / ( base_fps (%f), replay_rate(%d)",
(unsigned long)((1000000 * ZM_RATE_BASE)/((base_fps?base_fps:1)*abs(replay_rate*2))),
delta_us,
ZM_RATE_BASE,
(base_fps?base_fps:1),
(replay_rate?abs(replay_rate*2):0)
(base_fps ? base_fps : 1),
(replay_rate ? abs(replay_rate*2) : 0)
);
if ( delta_us > 0 ) {
if ( delta_us > MAX_SLEEP_USEC ) {

View File

@ -37,9 +37,11 @@ extern "C" {
}
#endif
class EventStream : public StreamBase {
public:
typedef enum { MODE_NONE, MODE_SINGLE, MODE_ALL, MODE_ALL_GAPLESS } StreamMode;
static const std::string StreamMode_Strings[4];
protected:
struct FrameData {
@ -84,13 +86,13 @@ class EventStream : public StreamBase {
EventData *event_data;
protected:
bool loadEventData( uint64_t event_id );
bool loadInitialEventData( uint64_t init_event_id, unsigned int init_frame_id );
bool loadInitialEventData( int monitor_id, time_t event_time );
bool loadEventData(uint64_t event_id);
bool loadInitialEventData(uint64_t init_event_id, unsigned int init_frame_id);
bool loadInitialEventData(int monitor_id, time_t event_time);
bool checkEventLoaded();
void processCommand( const CmdMsg *msg );
bool sendFrame( int delta_us );
void processCommand(const CmdMsg *msg);
bool sendFrame(int delta_us);
public:
EventStream() :
@ -128,11 +130,9 @@ class EventStream : public StreamBase {
ffmpeg_input = nullptr;
}
}
void setStreamStart( uint64_t init_event_id, unsigned int init_frame_id );
void setStreamStart( int monitor_id, time_t event_time );
void setStreamMode( StreamMode p_mode ) {
mode = p_mode;
}
void setStreamStart(uint64_t init_event_id, unsigned int init_frame_id);
void setStreamStart(int monitor_id, time_t event_time);
void setStreamMode(StreamMode p_mode) { mode = p_mode; }
void runStream() override;
Image *getImage();
private:

View File

@ -290,7 +290,7 @@ static void zm_log_fps(double d, const char *postfix) {
}
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
void zm_dump_codecpar ( const AVCodecParameters *par ) {
void zm_dump_codecpar(const AVCodecParameters *par) {
Debug(1, "Dumping codecpar codec_type(%d %s) codec_id(%d %s) codec_tag(%" PRIu32 ") width(%d) height(%d) bit_rate(%" PRIu64 ") format(%d %s)",
par->codec_type,
av_get_media_type_string(par->codec_type),
@ -385,6 +385,9 @@ void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
zm_log_fps(av_q2d(st->avg_frame_rate), "fps");
if (tbn)
zm_log_fps(1 / av_q2d(st->time_base), "stream tb numerator");
} else if ( codec->codec_type == AVMEDIA_TYPE_AUDIO ) {
Debug(1, "profile %d channels %d sample_rate %d",
codec->profile, codec->channels, codec->sample_rate);
}
if (st->disposition & AV_DISPOSITION_DEFAULT)
@ -425,6 +428,26 @@ int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt) {
return 0;
}
void fix_deprecated_pix_fmt(AVCodecContext *ctx) {
// Fix deprecated formats
switch ( ctx->pix_fmt ) {
case AV_PIX_FMT_YUVJ422P :
ctx->pix_fmt = AV_PIX_FMT_YUV422P;
break;
case AV_PIX_FMT_YUVJ444P :
ctx->pix_fmt = AV_PIX_FMT_YUV444P;
break;
case AV_PIX_FMT_YUVJ440P :
ctx->pix_fmt = AV_PIX_FMT_YUV440P;
break;
case AV_PIX_FMT_NONE :
case AV_PIX_FMT_YUVJ420P :
default:
ctx->pix_fmt = AV_PIX_FMT_YUV420P;
break;
}
}
#if LIBAVCODEC_VERSION_CHECK(56, 8, 0, 60, 100)
#else
unsigned int zm_av_packet_ref( AVPacket *dst, AVPacket *src ) {
@ -528,7 +551,7 @@ int zm_receive_packet(AVCodecContext *context, AVPacket &packet) {
Error("Error encoding (%d) (%s)", ret,
av_err2str(ret));
}
return 0;
return ret;
}
return 1;
#else
@ -536,8 +559,9 @@ int zm_receive_packet(AVCodecContext *context, AVPacket &packet) {
int ret = avcodec_encode_audio2(context, &packet, nullptr, &got_packet);
if ( ret < 0 ) {
Error("Error encoding (%d) (%s)", ret, av_err2str(ret));
return ret;
}
return got_packet;
return got_packet; // 1
#endif
} // end int zm_receive_packet(AVCodecContext *context, AVPacket &packet)
@ -636,7 +660,7 @@ void dumpPacket(AVStream *stream, AVPacket *pkt, const char *text) {
", size: %d, stream_index: %d, flags: %04x, keyframe(%d) pos: %" PRId64
", duration: %"
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
PRId64
PRIu64
#else
"d"
#endif
@ -653,6 +677,17 @@ void dumpPacket(AVStream *stream, AVPacket *pkt, const char *text) {
Debug(2, "%s:%d:%s: %s", __FILE__, __LINE__, text, b);
}
void zm_free_codec( AVCodecContext **ctx ) {
if ( *ctx ) {
avcodec_close(*ctx);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// We allocate and copy in newer ffmpeg, so need to free it
avcodec_free_context(ctx);
#endif
*ctx = NULL;
} // end if
}
void dumpPacket(AVPacket *pkt, const char *text) {
char b[10240];

View File

@ -323,14 +323,15 @@ void zm_dump_codecpar(const AVCodecParameters *par);
);
#if LIBAVUTIL_VERSION_CHECK(54, 4, 0, 74, 100)
#define zm_dump_video_frame(frame,text) Debug(1, "%s: format %d %s %dx%d linesize:%dx%d pts: %" PRId64, \
#define zm_dump_video_frame(frame, text) Debug(1, "%s: format %d %s %dx%d linesize:%dx%d pts: %" PRId64 " keyframe: %d", \
text, \
frame->format, \
av_get_pix_fmt_name((AVPixelFormat)frame->format), \
frame->width, \
frame->height, \
frame->linesize[0], frame->linesize[1], \
frame->pts \
frame->pts, \
frame->key_frame \
);
#else
@ -346,8 +347,8 @@ void zm_dump_codecpar(const AVCodecParameters *par);
#endif
#if LIBAVCODEC_VERSION_CHECK(56, 8, 0, 60, 100)
#define zm_av_packet_unref( packet ) av_packet_unref( packet )
#define zm_av_packet_ref( dst, src ) av_packet_ref( dst, src )
#define zm_av_packet_unref(packet) av_packet_unref(packet)
#define zm_av_packet_ref(dst, src) av_packet_ref(dst, src)
#else
unsigned int zm_av_packet_ref( AVPacket *dst, AVPacket *src );
#define zm_av_packet_unref( packet ) av_free_packet( packet )
@ -355,11 +356,17 @@ void zm_dump_codecpar(const AVCodecParameters *par);
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb);
#endif
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
#define zm_avcodec_decode_video( context, rawFrame, frameComplete, packet ) \
avcodec_send_packet( context, packet ); \
avcodec_receive_frame( context, rawFrame );
#else
#if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
#define zm_avcodec_decode_video( context, rawFrame, frameComplete, packet ) avcodec_decode_video2( context, rawFrame, frameComplete, packet )
#else
#define zm_avcodec_decode_video(context, rawFrame, frameComplete, packet ) avcodec_decode_video( context, rawFrame, frameComplete, packet->data, packet->size)
#endif
#endif
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
#define zm_av_frame_alloc() av_frame_alloc()
@ -372,6 +379,7 @@ void zm_dump_codecpar(const AVCodecParameters *par);
#endif
int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt);
void fix_deprecated_pix_fmt(AVCodecContext *);
bool is_video_stream(const AVStream *);
bool is_audio_stream(const AVStream *);
@ -416,4 +424,5 @@ int zm_add_samples_to_fifo(AVAudioFifo *fifo, AVFrame *frame);
int zm_get_samples_from_fifo(AVAudioFifo *fifo, AVFrame *frame);
#endif // ZM_FFMPEG_H

View File

@ -30,14 +30,12 @@ extern "C" {
#if HAVE_LIBAVUTIL_HWCONTEXT_H
#include "libavutil/hwcontext.h"
#endif
#include "libavutil/pixdesc.h"
}
#ifndef AV_ERROR_MAX_STRING_SIZE
#define AV_ERROR_MAX_STRING_SIZE 64
#endif
#include <string>
#include <locale>
#if HAVE_LIBAVUTIL_HWCONTEXT_H
#if LIBAVCODEC_VERSION_CHECK(57, 89, 0, 89, 0)
@ -130,7 +128,7 @@ FfmpegCamera::FfmpegCamera(
hwaccel_device(p_hwaccel_device)
{
if ( capture ) {
Initialise();
FFMPEGInit();
}
mFormatContext = nullptr;
@ -144,9 +142,6 @@ FfmpegCamera::FfmpegCamera(
mFrame = nullptr;
frameCount = 0;
mCanCapture = false;
videoStore = nullptr;
have_video_keyframe = false;
packetqueue = nullptr;
error_count = 0;
use_hwaccel = true;
#if HAVE_LIBAVUTIL_HWCONTEXT_H
@ -199,19 +194,9 @@ FfmpegCamera::FfmpegCamera(
FfmpegCamera::~FfmpegCamera() {
Close();
if ( capture ) {
Terminate();
}
FFMPEGDeInit();
}
void FfmpegCamera::Initialise() {
FFMPEGInit();
}
void FfmpegCamera::Terminate() {
}
int FfmpegCamera::PrimeCapture() {
if ( mCanCapture ) {
Debug(1, "Priming capture from %s, Closing", mPath.c_str());
@ -225,33 +210,19 @@ int FfmpegCamera::PrimeCapture() {
}
int FfmpegCamera::PreCapture() {
// If Reopen was called, then ffmpeg is closed and we need to reopen it.
if ( !mCanCapture )
return OpenFfmpeg();
// Nothing to do here
return 0;
}
int FfmpegCamera::Capture(Image &image) {
if ( !mCanCapture ) {
int FfmpegCamera::Capture(ZMPacket &zm_packet) {
if ( !mCanCapture )
return -1;
}
int ret;
// If the reopen thread has a value, but mCanCapture != 0, then we have just
// reopened the connection to the device, and we can clean up the thread.
int frameComplete = false;
while ( !frameComplete && !zm_terminate ) {
ret = av_read_frame(mFormatContext, &packet);
if ( ret < 0 ) {
if ( (ret = av_read_frame(mFormatContext, &packet)) < 0 ) {
if (
// Check if EOF.
(
ret == AVERROR_EOF
||
(mFormatContext->pb && mFormatContext->pb->eof_reached)
) ||
(ret == AVERROR_EOF || (mFormatContext->pb && mFormatContext->pb->eof_reached)) ||
// Check for Connection failure.
(ret == -110)
) {
@ -263,79 +234,17 @@ int FfmpegCamera::Capture(Image &image) {
}
return -1;
}
dumpPacket(mFormatContext->streams[packet.stream_index], &packet, "ffmpeg_camera in");
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
zm_packet.codec_type = mFormatContext->streams[packet.stream_index]->codecpar->codec_type;
#else
zm_packet.codec_type = mFormatContext->streams[packet.stream_index]->codec->codec_type;
#endif
bytes += packet.size;
int keyframe = packet.flags & AV_PKT_FLAG_KEY;
if ( keyframe )
have_video_keyframe = true;
Debug(5, "Got packet from stream %d dts (%d) pts(%d)",
packet.stream_index, packet.pts, packet.dts);
// What about audio stream? Maybe someday we could do sound detection...
if (
(packet.stream_index == mVideoStreamId)
&&
(keyframe || have_video_keyframe)
) {
ret = zm_send_packet_receive_frame(mVideoCodecContext, mRawFrame, packet);
if ( ret < 0 ) {
if ( AVERROR(EAGAIN) != ret ) {
Warning("Unable to receive frame %d: code %d %s. error count is %d",
frameCount, ret, av_make_error_string(ret).c_str(), error_count);
error_count += 1;
if ( error_count > 100 ) {
Error("Error count over 100, going to close and re-open stream");
return -1;
}
}
zm_packet.set_packet(&packet);
zm_av_packet_unref(&packet);
continue;
}
frameComplete = 1;
zm_dump_video_frame(mRawFrame, "raw frame from decoder");
#if HAVE_LIBAVUTIL_HWCONTEXT_H
#if LIBAVCODEC_VERSION_CHECK(57, 89, 0, 89, 0)
if (
(hw_pix_fmt != AV_PIX_FMT_NONE)
&&
(mRawFrame->format == hw_pix_fmt)
) {
/* retrieve data from GPU to CPU */
ret = av_hwframe_transfer_data(hwFrame, mRawFrame, 0);
if ( ret < 0 ) {
Error("Unable to transfer frame at frame %d: %s, continuing",
frameCount, av_make_error_string(ret).c_str());
zm_av_packet_unref(&packet);
continue;
}
zm_dump_video_frame(hwFrame, "After hwtransfer");
hwFrame->pts = mRawFrame->pts;
input_frame = hwFrame;
} else {
#endif
#endif
input_frame = mRawFrame;
#if HAVE_LIBAVUTIL_HWCONTEXT_H
#if LIBAVCODEC_VERSION_CHECK(57, 89, 0, 89, 0)
}
#endif
#endif
if ( transfer_to_image(image, mFrame, input_frame) < 0 ) {
zm_av_packet_unref(&packet);
return -1;
}
frameCount++;
} else {
Debug(4, "Different stream_index %d", packet.stream_index);
} // end if packet.stream_index == mVideoStreamId
zm_av_packet_unref(&packet);
} // end while ! frameComplete
return frameComplete ? 1 : 0;
return 1;
} // FfmpegCamera::Capture
int FfmpegCamera::PostCapture() {
@ -346,7 +255,6 @@ int FfmpegCamera::PostCapture() {
int FfmpegCamera::OpenFfmpeg() {
int ret;
have_video_keyframe = false;
error_count = 0;
// Open the input, not necessarily a file
@ -416,6 +324,7 @@ int FfmpegCamera::OpenFfmpeg() {
}
av_dict_free(&opts);
Debug(1, "Finding stream info");
#if !LIBAVFORMAT_VERSION_CHECK(53, 6, 0, 6, 0)
ret = av_find_stream_info(mFormatContext);
#else
@ -449,6 +358,7 @@ int FfmpegCamera::OpenFfmpeg() {
}
}
} // end foreach stream
if ( mVideoStreamId == -1 ) {
Error("Unable to locate video stream in %s", mPath.c_str());
return -1;
@ -456,19 +366,17 @@ int FfmpegCamera::OpenFfmpeg() {
Debug(3, "Found video stream at index %d, audio stream at index %d",
mVideoStreamId, mAudioStreamId);
packetqueue = new zm_packetqueue(
(mVideoStreamId > mAudioStreamId) ? mVideoStreamId : mAudioStreamId);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// mVideoCodecContext = avcodec_alloc_context3(NULL);
// avcodec_parameters_to_context(mVideoCodecContext,
// mFormatContext->streams[mVideoStreamId]->codecpar);
mVideoCodecContext = avcodec_alloc_context3(nullptr);
avcodec_parameters_to_context(mVideoCodecContext,
mFormatContext->streams[mVideoStreamId]->codecpar);
// this isn't copied.
// mVideoCodecContext->time_base =
// mFormatContext->streams[mVideoStreamId]->codec->time_base;
mVideoCodecContext->time_base =
mFormatContext->streams[mVideoStreamId]->codec->time_base;
#else
#endif
mVideoCodecContext = mFormatContext->streams[mVideoStreamId]->codec;
//mVideoCodecContext = mFormatContext->streams[mVideoStreamId]->codec;
#ifdef CODEC_FLAG2_FAST
mVideoCodecContext->flags2 |= CODEC_FLAG2_FAST | CODEC_FLAG_LOW_DELAY;
#endif
@ -495,8 +403,8 @@ int FfmpegCamera::OpenFfmpeg() {
if ( use_hwaccel && (hwaccel_name != "") ) {
#if HAVE_LIBAVUTIL_HWCONTEXT_H
// 3.2 doesn't seem to have all the bits in place, so let's require 3.3 and up
#if LIBAVCODEC_VERSION_CHECK(57, 89, 0, 89, 0)
// Print out available types
#if LIBAVCODEC_VERSION_CHECK(57, 89, 0, 89, 0)
// Print out available types
enum AVHWDeviceType type = AV_HWDEVICE_TYPE_NONE;
while ( (type = av_hwdevice_iterate_types(type)) != AV_HWDEVICE_TYPE_NONE )
Debug(1, "%s", av_hwdevice_get_type_name(type));
@ -509,22 +417,24 @@ int FfmpegCamera::OpenFfmpeg() {
Debug(1, "Found hwdevice %s", av_hwdevice_get_type_name(type));
}
#if LIBAVUTIL_VERSION_CHECK(56, 22, 0, 14, 0)
#if LIBAVUTIL_VERSION_CHECK(56, 22, 0, 14, 0)
// Get hw_pix_fmt
for ( int i = 0;; i++ ) {
const AVCodecHWConfig *config = avcodec_get_hw_config(mVideoCodec, i);
if ( !config ) {
Debug(1, "Decoder %s does not support device type %s.",
mVideoCodec->name, av_hwdevice_get_type_name(type));
Debug(1, "Decoder %s does not support config %d.",
mVideoCodec->name, i);
break;
}
if ( (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX)
&& (config->device_type == type)
) {
hw_pix_fmt = config->pix_fmt;
break;
Debug(1, "Decoder %s does support our type %s.",
mVideoCodec->name, av_hwdevice_get_type_name(type));
//break;
} else {
Debug(1, "decoder %s hwConfig doesn't match our type: %s != %s, pix_fmt %s.",
Debug(1, "Decoder %s hwConfig doesn't match our type: %s != %s, pix_fmt %s.",
mVideoCodec->name,
av_hwdevice_get_type_name(type),
av_hwdevice_get_type_name(config->device_type),
@ -532,36 +442,43 @@ int FfmpegCamera::OpenFfmpeg() {
);
}
} // end foreach hwconfig
#else
#else
hw_pix_fmt = find_fmt_by_hw_type(type);
#endif
#endif
if ( hw_pix_fmt != AV_PIX_FMT_NONE ) {
Debug(1, "Selected hw_pix_fmt %d %s",
hw_pix_fmt, av_get_pix_fmt_name(hw_pix_fmt));
mVideoCodecContext->hwaccel_flags |= AV_HWACCEL_FLAG_IGNORE_LEVEL;
//if (!lavc_param->check_hw_profile)
mVideoCodecContext->hwaccel_flags |= AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH;
ret = av_hwdevice_ctx_create(&hw_device_ctx, type,
(hwaccel_device != "" ? hwaccel_device.c_str(): nullptr), nullptr, 0);
(hwaccel_device != "" ? hwaccel_device.c_str() : nullptr), nullptr, 0);
if ( ret < 0 and hwaccel_device != "" ) {
ret = av_hwdevice_ctx_create(&hw_device_ctx, type, nullptr, nullptr, 0);
}
if ( ret < 0 ) {
Error("Failed to create hwaccel device. %s",av_make_error_string(ret).c_str());
Error("Failed to create hwaccel device. %s", av_make_error_string(ret).c_str());
hw_pix_fmt = AV_PIX_FMT_NONE;
} else {
Debug(1, "Created hwdevice for %s", hwaccel_device.c_str());
mVideoCodecContext->get_format = get_hw_format;
mVideoCodecContext->hw_device_ctx = av_buffer_ref(hw_device_ctx);
hwFrame = zm_av_frame_alloc();
}
} else {
Debug(1, "Failed to find suitable hw_pix_fmt.");
}
#else
#else
Debug(1, "AVCodec not new enough for hwaccel");
#endif
#endif
#else
Warning("HWAccel support not compiled in.");
#endif
} // end if hwaccel_name
// Open the codec
#if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0)
ret = avcodec_open(mVideoCodecContext, mVideoCodec);
#else
@ -604,50 +521,16 @@ int FfmpegCamera::OpenFfmpeg() {
zm_dump_stream_format(mFormatContext, mAudioStreamId, 0, 0);
// Open the codec
#if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0)
if ( avcodec_open(mAudioCodecContext, mAudioCodec) < 0 ) {
if ( avcodec_open(mAudioCodecContext, mAudioCodec) < 0 )
#else
if ( avcodec_open2(mAudioCodecContext, mAudioCodec, nullptr) < 0 ) {
if ( avcodec_open2(mAudioCodecContext, mAudioCodec, nullptr) < 0 )
#endif
{
Error("Unable to open codec for audio stream from %s", mPath.c_str());
return -1;
}
zm_dump_codec(mAudioCodecContext);
} // end if find decoder
} // end if have audio_context
// Allocate space for the native video frame
mRawFrame = zm_av_frame_alloc();
// Allocate space for the converted video frame
mFrame = zm_av_frame_alloc();
if ( mRawFrame == nullptr || mFrame == nullptr ) {
Error("Unable to allocate frame for %s", mPath.c_str());
return -1;
}
mFrame->width = width;
mFrame->height = height;
#if HAVE_LIBSWSCALE
if ( !sws_isSupportedInput(mVideoCodecContext->pix_fmt) ) {
Error("swscale does not support the codec format for input: %s",
av_get_pix_fmt_name(mVideoCodecContext->pix_fmt)
);
return -1;
}
if ( !sws_isSupportedOutput(imagePixFormat) ) {
Error("swscale does not support the target format: %s",
av_get_pix_fmt_name(imagePixFormat)
);
return -1;
}
#else // HAVE_LIBSWSCALE
Fatal("You must compile ffmpeg with the --enable-swscale "
"option to use ffmpeg cameras");
#endif // HAVE_LIBSWSCALE
} // end if opened
} // end if found decoder
} // end if have audio stream
if (
((unsigned int)mVideoCodecContext->width != width)
@ -660,11 +543,10 @@ int FfmpegCamera::OpenFfmpeg() {
mCanCapture = true;
return 0;
return 1;
} // int FfmpegCamera::OpenFfmpeg()
int FfmpegCamera::Close() {
mCanCapture = false;
if ( mFrame ) {
@ -682,22 +564,10 @@ int FfmpegCamera::Close() {
}
#endif
#if HAVE_LIBSWSCALE
if ( mConvertContext ) {
sws_freeContext(mConvertContext);
mConvertContext = nullptr;
}
#endif
if ( videoStore ) {
delete videoStore;
videoStore = nullptr;
}
if ( mVideoCodecContext ) {
avcodec_close(mVideoCodecContext);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// avcodec_free_context(&mVideoCodecContext);
avcodec_free_context(&mVideoCodecContext);
#endif
mVideoCodecContext = nullptr; // Freed by av_close_input_file
}
@ -724,345 +594,9 @@ int FfmpegCamera::Close() {
mFormatContext = nullptr;
}
if ( packetqueue ) {
delete packetqueue;
packetqueue = nullptr;
}
return 0;
} // end FfmpegCamera::Close
// Function to handle capture and store
int FfmpegCamera::CaptureAndRecord(
Image &image,
timeval recording,
char* event_file
) {
if ( !mCanCapture ) {
return -1;
}
int ret;
struct timeval video_buffer_duration = monitor->GetVideoBufferDuration();
int frameComplete = false;
while ( !frameComplete ) {
av_init_packet(&packet);
ret = av_read_frame(mFormatContext, &packet);
if ( ret < 0 ) {
if (
// Check if EOF.
(
(ret == AVERROR_EOF) ||
(mFormatContext->pb && mFormatContext->pb->eof_reached)
) ||
// Check for Connection failure.
(ret == -110)
) {
Info("Unable to read packet from stream %d: error %d \"%s\".",
packet.stream_index, ret, av_make_error_string(ret).c_str());
} else {
Error("Unable to read packet from stream %d: error %d \"%s\".",
packet.stream_index, ret, av_make_error_string(ret).c_str());
}
return -1;
}
int keyframe = packet.flags & AV_PKT_FLAG_KEY;
bytes += packet.size;
dumpPacket(
mFormatContext->streams[packet.stream_index],
&packet,
"Captured Packet");
if ( packet.dts == AV_NOPTS_VALUE ) {
packet.dts = packet.pts;
}
// Video recording
if ( recording.tv_sec ) {
uint32_t last_event_id = monitor->GetLastEventId();
uint32_t video_writer_event_id = monitor->GetVideoWriterEventId();
if ( last_event_id != video_writer_event_id ) {
Debug(2, "Have change of event. last_event(%d), our current (%d)",
last_event_id, video_writer_event_id);
if ( videoStore ) {
Info("Re-starting video storage module");
// I don't know if this is important or not... but I figure we might
// as well write this last packet out to the store before closing it.
// Also don't know how much it matters for audio.
if ( packet.stream_index == mVideoStreamId ) {
// Write the packet to our video store
int ret = videoStore->writeVideoFramePacket(&packet);
if ( ret < 0 ) { // Less than zero and we skipped a frame
Warning("Error writing last packet to videostore.");
}
} // end if video
delete videoStore;
videoStore = nullptr;
have_video_keyframe = false;
monitor->SetVideoWriterEventId(0);
} // end if videoStore
} // end if end of recording
if ( last_event_id && !videoStore ) {
// Instantiate the video storage module
packetqueue->dumpQueue();
if ( record_audio ) {
if ( mAudioStreamId == -1 ) {
Debug(3, "Record Audio on but no audio stream found");
videoStore = new VideoStore((const char *) event_file, "mp4",
mFormatContext->streams[mVideoStreamId],
nullptr,
this->getMonitor());
} else {
Debug(3, "Video module initiated with audio stream");
videoStore = new VideoStore((const char *) event_file, "mp4",
mFormatContext->streams[mVideoStreamId],
mFormatContext->streams[mAudioStreamId],
this->getMonitor());
}
} else {
if ( mAudioStreamId >= 0 ) {
Debug(3, "Record_audio is false so exclude audio stream");
}
videoStore = new VideoStore((const char *) event_file, "mp4",
mFormatContext->streams[mVideoStreamId],
nullptr,
this->getMonitor());
} // end if record_audio
if ( !videoStore->open() ) {
delete videoStore;
videoStore = nullptr;
} else {
monitor->SetVideoWriterEventId(last_event_id);
// Need to write out all the frames from the last keyframe?
// No... need to write out all frames from when the event began.
// Due to PreEventFrames, this could be more than
// since the last keyframe.
unsigned int packet_count = 0;
ZMPacket *queued_packet;
struct timeval video_offset = {0};
// Clear all packets that predate the moment when the recording began
packetqueue->clear_unwanted_packets(
&recording, 0, mVideoStreamId);
while ( (queued_packet = packetqueue->popPacket()) ) {
AVPacket *avp = queued_packet->av_packet();
// compute time offset between event start and first frame in video
if ( packet_count == 0 ) {
monitor->SetVideoWriterStartTime(queued_packet->timestamp);
timersub(&queued_packet->timestamp, &recording, &video_offset);
Info("Event video offset is %.3f sec (<0 means video starts early)",
video_offset.tv_sec + video_offset.tv_usec*1e-6);
}
packet_count += 1;
// Write the packet to our video store
Debug(2, "Writing queued packet stream: %d KEY %d, remaining (%d)",
avp->stream_index,
avp->flags & AV_PKT_FLAG_KEY,
packetqueue->size());
if ( avp->stream_index == mVideoStreamId ) {
ret = videoStore->writeVideoFramePacket(avp);
have_video_keyframe = true;
} else if ( avp->stream_index == mAudioStreamId ) {
ret = videoStore->writeAudioFramePacket(avp);
} else {
Warning("Unknown stream id in queued packet (%d)",
avp->stream_index);
ret = -1;
}
if ( ret < 0 ) {
// Less than zero and we skipped a frame
}
delete queued_packet;
} // end while packets in the packetqueue
Debug(2, "Wrote %d queued packets", packet_count);
} // end if videostore is open or not
} // end if ! was recording
} else {
// Not recording
if ( videoStore ) {
Debug(1, "Deleting videoStore instance");
delete videoStore;
videoStore = nullptr;
have_video_keyframe = false;
monitor->SetVideoWriterEventId(0);
}
} // end if recording or not
// Buffer video packets, since we are not recording.
// All audio packets are keyframes, so only if it's a video keyframe
if ( packet.stream_index == mVideoStreamId ) {
if ( keyframe ) {
Debug(3, "Clearing queue");
if (video_buffer_duration.tv_sec > 0 || video_buffer_duration.tv_usec > 0) {
packetqueue->clearQueue(&video_buffer_duration, mVideoStreamId);
}
else {
packetqueue->clearQueue(monitor->GetPreEventCount(), mVideoStreamId);
}
if (
packetqueue->packet_count(mVideoStreamId)
>=
monitor->GetImageBufferCount()
) {
Warning(
"ImageBufferCount %d is too small. "
"Needs to be at least %d. "
"Either increase it or decrease time between keyframes",
monitor->GetImageBufferCount(),
packetqueue->packet_count(mVideoStreamId)+1);
}
packetqueue->queuePacket(&packet);
} else if ( packetqueue->size() ) {
// it's a keyframe or we already have something in the queue
packetqueue->queuePacket(&packet);
}
} else if ( packet.stream_index == mAudioStreamId ) {
// Ensure that the queue always begins with a video keyframe
if ( record_audio && packetqueue->size() ) {
packetqueue->queuePacket(&packet);
}
} // end if packet type
if ( packet.stream_index == mVideoStreamId ) {
if ( (have_video_keyframe || keyframe) && videoStore ) {
int ret = videoStore->writeVideoFramePacket(&packet);
if ( ret < 0 ) {
// Less than zero and we skipped a frame
Error("Unable to write video packet code: %d, framecount %d: %s",
ret, frameCount, av_make_error_string(ret).c_str());
} else {
have_video_keyframe = true;
}
} // end if keyframe or have_video_keyframe
if ( monitor->DecodingEnabled() ) {
ret = zm_send_packet_receive_frame(mVideoCodecContext, mRawFrame, packet);
if ( ret < 0 ) {
if ( AVERROR(EAGAIN) != ret ) {
Warning("Unable to receive frame %d: code %d %s. error count is %d",
frameCount, ret, av_make_error_string(ret).c_str(), error_count);
error_count += 1;
if ( error_count > 100 ) {
Error("Error count over 100, going to close and re-open stream");
return -1;
}
#if HAVE_LIBAVUTIL_HWCONTEXT_H
#if LIBAVCODEC_VERSION_CHECK(57, 89, 0, 89, 0)
if ( (ret == AVERROR_INVALIDDATA ) && (hw_pix_fmt != AV_PIX_FMT_NONE) ) {
use_hwaccel = false;
return -1;
}
#endif
#endif
}
zm_av_packet_unref(&packet);
continue;
}
if ( error_count > 0 ) error_count--;
zm_dump_video_frame(mRawFrame, "raw frame from decoder");
#if HAVE_LIBAVUTIL_HWCONTEXT_H
#if LIBAVCODEC_VERSION_CHECK(57, 89, 0, 89, 0)
if (
(hw_pix_fmt != AV_PIX_FMT_NONE)
&&
(mRawFrame->format == hw_pix_fmt)
) {
/* retrieve data from GPU to CPU */
ret = av_hwframe_transfer_data(hwFrame, mRawFrame, 0);
if ( ret < 0 ) {
Error("Unable to transfer frame at frame %d: %s, continuing",
frameCount, av_make_error_string(ret).c_str());
zm_av_packet_unref(&packet);
continue;
}
zm_dump_video_frame(hwFrame, "After hwtransfer");
hwFrame->pts = mRawFrame->pts;
input_frame = hwFrame;
} else {
#endif
#endif
input_frame = mRawFrame;
#if HAVE_LIBAVUTIL_HWCONTEXT_H
#if LIBAVCODEC_VERSION_CHECK(57, 89, 0, 89, 0)
}
#endif
#endif
if ( transfer_to_image(image, mFrame, input_frame) < 0 ) {
Error("Failed to transfer from frame to image");
zm_av_packet_unref(&packet);
return -1;
}
} // end if don't need to do decode
frameComplete = 1;
frameCount++;
} else if ( packet.stream_index == mAudioStreamId ) {
// FIXME best way to copy all other streams
frameComplete = 1;
if ( videoStore ) {
if ( record_audio ) {
if ( have_video_keyframe ) {
// Write the packet to our video store
// FIXME no relevance of last key frame
int ret = videoStore->writeAudioFramePacket(&packet);
if ( ret < 0 ) {
// Less than zero and we skipped a frame
Warning("Failure to write audio packet.");
zm_av_packet_unref(&packet);
return 0;
}
} else {
Debug(3, "Not recording audio because no video keyframe");
}
} else {
Debug(4, "Not doing recording of audio packet");
}
} else {
Debug(4, "Have audio packet, but not recording atm");
}
zm_av_packet_unref(&packet);
return 0;
} else {
#if LIBAVUTIL_VERSION_CHECK(56, 23, 0, 23, 0)
Debug(3, "Some other stream index %d, %s",
packet.stream_index,
av_get_media_type_string(
mFormatContext->streams[packet.stream_index]->codecpar->codec_type)
);
#else
Debug(3, "Some other stream index %d", packet.stream_index);
#endif
} // end if is video or audio or something else
// the packet contents are ref counted... when queuing, we allocate another
// packet and reference it with that one, so we should always need to unref
// here, which should not affect the queued version.
zm_av_packet_unref(&packet);
} // end while ! frameComplete
return frameCount;
} // end FfmpegCamera::CaptureAndRecord
int FfmpegCamera::transfer_to_image(
Image &image,
AVFrame *output_frame,

View File

@ -25,7 +25,6 @@
#include "zm_buffer.h"
#include "zm_ffmpeg.h"
#include "zm_videostore.h"
#include "zm_packetqueue.h"
#if HAVE_LIBAVUTIL_HWCONTEXT_H
typedef struct DecodeContext {
@ -41,6 +40,8 @@ class FfmpegCamera : public Camera {
std::string mPath;
std::string mMethod;
std::string mOptions;
std::string encoder_options;
std::string hwaccel_name;
std::string hwaccel_device;
@ -50,15 +51,12 @@ class FfmpegCamera : public Camera {
#if HAVE_LIBAVFORMAT
AVFormatContext *mFormatContext;
int mVideoStreamId;
int mAudioStreamId;
AVCodecContext *mVideoCodecContext;
AVCodecContext *mAudioCodecContext;
AVCodec *mVideoCodec;
AVCodec *mAudioCodec;
AVFrame *mRawFrame;
AVFrame *mFrame;
_AVPIXELFORMAT imagePixFormat;
AVFrame *input_frame; // Use to point to mRawFrame or hwFrame;
AVFrame *hwFrame; // Will also be used to indicate if hwaccel is in use
@ -77,10 +75,6 @@ class FfmpegCamera : public Camera {
bool mCanCapture;
#endif // HAVE_LIBAVFORMAT
VideoStore *videoStore;
zm_packetqueue *packetqueue;
bool have_video_keyframe;
#if HAVE_LIBSWSCALE
struct SwsContext *mConvertContext;
#endif
@ -112,14 +106,22 @@ class FfmpegCamera : public Camera {
const std::string &Options() const { return mOptions; }
const std::string &Method() const { return mMethod; }
void Initialise();
void Terminate();
int PrimeCapture();
int PreCapture();
int Capture( Image &image );
int CaptureAndRecord( Image &image, timeval recording, char* event_directory );
int Capture(ZMPacket &p);
int PostCapture();
AVStream *get_VideoStream() {
if ( mVideoStreamId != -1 )
return mFormatContext->streams[mVideoStreamId];
return nullptr;
}
AVStream *get_AudioStream() {
if ( mAudioStreamId != -1 )
return mFormatContext->streams[mAudioStreamId];
return nullptr;
}
AVCodecContext *get_VideoCodecContext() { return mVideoCodecContext; };
AVCodecContext *get_AudioCodecContext() { return mAudioCodecContext; };
private:
static int FfmpegInterruptCallback(void*ctx);
int transfer_to_image(Image &i, AVFrame *output_frame, AVFrame *input_frame);

View File

@ -14,28 +14,29 @@ FFmpeg_Input::FFmpeg_Input() {
}
FFmpeg_Input::~FFmpeg_Input() {
if ( streams ) {
for ( unsigned int i = 0; i < input_format_context->nb_streams; i += 1 ) {
avcodec_close(streams[i].context);
avcodec_free_context(&streams[i].context);
}
delete[] streams;
streams = nullptr;
if ( input_format_context ) {
Close();
}
if ( frame ) {
av_frame_free(&frame);
frame = nullptr;
}
if ( input_format_context ) {
#if !LIBAVFORMAT_VERSION_CHECK(53, 17, 0, 25, 0)
av_close_input_file(input_format_context);
#else
avformat_close_input(&input_format_context);
#endif
input_format_context = nullptr;
}
} // end ~FFmpeg_Input()
int FFmpeg_Input::Open(
const AVStream * video_in_stream,
const AVStream * audio_in_stream
) {
video_stream_id = video_in_stream->index;
int max_stream_index = video_in_stream->index;
if ( audio_in_stream ) {
max_stream_index = video_in_stream->index > audio_in_stream->index ? video_in_stream->index : audio_in_stream->index;
audio_stream_id = audio_in_stream->index;
}
streams = new stream[max_stream_index];
}
int FFmpeg_Input::Open(const char *filepath) {
int error;
@ -43,8 +44,8 @@ int FFmpeg_Input::Open(const char *filepath) {
/** Open the input file to read from it. */
error = avformat_open_input(&input_format_context, filepath, nullptr, nullptr);
if ( error < 0 ) {
Error("Could not open input file '%s' (error '%s')\n",
filepath, av_make_error_string(error).c_str() );
Error("Could not open input file '%s' (error '%s')",
filepath, av_make_error_string(error).c_str());
input_format_context = nullptr;
return error;
}
@ -119,6 +120,30 @@ int FFmpeg_Input::Open(const char *filepath) {
return 0;
} // end int FFmpeg_Input::Open( const char * filepath )
int FFmpeg_Input::Close( ) {
if ( streams ) {
for ( unsigned int i = 0; i < input_format_context->nb_streams; i += 1 ) {
avcodec_close(streams[i].context);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
avcodec_free_context(&streams[i].context);
streams[i].context = nullptr;
#endif
}
delete[] streams;
streams = nullptr;
}
if ( input_format_context ) {
#if !LIBAVFORMAT_VERSION_CHECK(53, 17, 0, 25, 0)
av_close_input_file(input_format_context);
#else
avformat_close_input(&input_format_context);
#endif
input_format_context = nullptr;
}
return 1;
} // end int FFmpeg_Input::Close()
AVFrame *FFmpeg_Input::get_frame(int stream_id) {
int frameComplete = false;
AVPacket packet;
@ -142,8 +167,10 @@ AVFrame *FFmpeg_Input::get_frame(int stream_id) {
}
dumpPacket(input_format_context->streams[packet.stream_index], &packet, "Received packet");
if ( (stream_id < 0) || (packet.stream_index == stream_id) ) {
Debug(3, "Packet is for our stream (%d)", packet.stream_index);
if ( (stream_id >= 0) && (packet.stream_index != stream_id) ) {
Debug(1,"Packet is not for our stream (%d)", packet.stream_index );
continue;
}
AVCodecContext *context = streams[packet.stream_index].context;
@ -169,11 +196,10 @@ AVFrame *FFmpeg_Input::get_frame(int stream_id) {
}
frameComplete = 1;
} // end if it's the right stream
zm_av_packet_unref(&packet);
} // end while ! frameComplete
} // end while !frameComplete
return frame;
} // end AVFrame *FFmpeg_Input::get_frame
@ -220,7 +246,11 @@ AVFrame *FFmpeg_Input::get_frame(int stream_id, double at) {
}
// Have to grab a frame to update our current frame to know where we are
get_frame(stream_id);
if ( is_video_stream(input_format_context->streams[stream_id]) ) {
zm_dump_video_frame(frame, "frame->pts > seek_target, got");
} else {
zm_dump_frame(frame, "frame->pts > seek_target, got");
}
} else if ( last_seek_request == seek_target ) {
// paused case, sending keepalives
return frame;
@ -230,7 +260,11 @@ AVFrame *FFmpeg_Input::get_frame(int stream_id, double at) {
// Seeking seems to typically seek to a keyframe, so then we have to decode until we get the frame we want.
if ( frame->pts <= seek_target ) {
if ( is_video_stream(input_format_context->streams[stream_id]) ) {
zm_dump_video_frame(frame, "pts <= seek_target");
} else {
zm_dump_frame(frame, "pts <= seek_target");
}
while ( frame && (frame->pts < seek_target) ) {
if ( !get_frame(stream_id) ) {
Warning("Got no frame. returning nothing");

View File

@ -20,6 +20,7 @@ class FFmpeg_Input {
~FFmpeg_Input();
int Open( const char *filename );
int Open( const AVStream *, const AVStream * );
int Close();
AVFrame *get_frame( int stream_id=-1 );
AVFrame *get_frame( int stream_id, double at );

180
src/zm_ffmpeg_output.cpp Normal file
View File

@ -0,0 +1,180 @@
#include "zm_ffmpeg_input.h"
#include "zm_logger.h"
#include "zm_ffmpeg.h"
FFmpeg_Output::FFmpeg_Output() {
input_format_context = NULL;
video_stream_id = -1;
audio_stream_id = -1;
av_register_all();
avcodec_register_all();
}
FFmpeg_Output::~FFmpeg_Output() {
}
int FFmpeg_Output::Open( const char *filepath ) {
int error;
/** Open the input file to read from it. */
if ( (error = avformat_open_input( &input_format_context, filepath, NULL, NULL)) < 0 ) {
Error("Could not open input file '%s' (error '%s')\n",
filepath, av_make_error_string(error).c_str() );
input_format_context = NULL;
return error;
}
/** Get information on the input file (number of streams etc.). */
if ( (error = avformat_find_stream_info(input_format_context, NULL)) < 0 ) {
Error( "Could not open find stream info (error '%s')\n",
av_make_error_string(error).c_str() );
avformat_close_input(&input_format_context);
return error;
}
for ( unsigned int i = 0; i < input_format_context->nb_streams; i += 1 ) {
if ( is_video_stream( input_format_context->streams[i] ) ) {
zm_dump_stream_format(input_format_context, i, 0, 0);
if ( video_stream_id == -1 ) {
video_stream_id = i;
// if we break, then we won't find the audio stream
} else {
Warning( "Have another video stream." );
}
} else if ( is_audio_stream( input_format_context->streams[i] ) ) {
if ( audio_stream_id == -1 ) {
audio_stream_id = i;
} else {
Warning( "Have another audio stream." );
}
}
streams[i].frame_count = 0;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
streams[i].context = avcodec_alloc_context3( NULL );
avcodec_parameters_to_context( streams[i].context, input_format_context->streams[i]->codecpar );
#else
streams[i].context = input_format_context->streams[i]->codec;
#endif
if ( !(streams[i].codec = avcodec_find_decoder(streams[i].context->codec_id)) ) {
Error( "Could not find input codec\n");
avformat_close_input(&input_format_context);
return AVERROR_EXIT;
} else {
Debug(1, "Using codec (%s) for stream %d", streams[i].codec->name, i );
}
if ((error = avcodec_open2( streams[i].context, streams[i].codec, NULL)) < 0) {
Error( "Could not open input codec (error '%s')\n",
av_make_error_string(error).c_str() );
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
avcodec_free_context( &streams[i].context );
#endif
avformat_close_input(&input_format_context);
return error;
}
} // end foreach stream
if ( video_stream_id == -1 )
Error( "Unable to locate video stream in %s", filepath );
if ( audio_stream_id == -1 )
Debug( 3, "Unable to locate audio stream in %s", filepath );
return 0;
} // end int FFmpeg_Output::Open( const char * filepath )
AVFrame *FFmpeg_Output::get_frame( int stream_id ) {
Debug(1, "Getting frame from stream %d", stream_id );
int frameComplete = false;
AVPacket packet;
av_init_packet( &packet );
AVFrame *frame = zm_av_frame_alloc();
char errbuf[AV_ERROR_MAX_STRING_SIZE];
while ( !frameComplete ) {
int ret = av_read_frame( input_format_context, &packet );
if ( ret < 0 ) {
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
if (
// Check if EOF.
(ret == AVERROR_EOF || (input_format_context->pb && input_format_context->pb->eof_reached)) ||
// Check for Connection failure.
(ret == -110)
) {
Info( "av_read_frame returned %s.", errbuf );
return NULL;
}
Error( "Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, ret, errbuf );
return NULL;
}
if ( (stream_id < 0 ) || ( packet.stream_index == stream_id ) ) {
Debug(1,"Packet is for our stream (%d)", packet.stream_index );
AVCodecContext *context = streams[packet.stream_index].context;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
ret = avcodec_send_packet( context, &packet );
if ( ret < 0 ) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to send packet at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
zm_av_packet_unref( &packet );
continue;
} else {
Debug(1, "Success getting a packet");
}
#if HAVE_AVUTIL_HWCONTEXT_H
if ( hwaccel ) {
ret = avcodec_receive_frame( context, hwFrame );
if ( ret < 0 ) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to receive frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
zm_av_packet_unref( &packet );
continue;
}
ret = av_hwframe_transfer_data(frame, hwFrame, 0);
if (ret < 0) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to transfer frame at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
zm_av_packet_unref( &packet );
continue;
}
} else {
#endif
Debug(1,"Getting a frame?");
ret = avcodec_receive_frame( context, frame );
if ( ret < 0 ) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to send packet at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
zm_av_packet_unref( &packet );
continue;
}
#if HAVE_AVUTIL_HWCONTEXT_H
}
#endif
frameComplete = 1;
# else
ret = zm_avcodec_decode_video( streams[packet.stream_index].context, frame, &frameComplete, &packet );
if ( ret < 0 ) {
av_strerror( ret, errbuf, AV_ERROR_MAX_STRING_SIZE );
Error( "Unable to decode frame at frame %d: %s, continuing", streams[packet.stream_index].frame_count, errbuf );
zm_av_packet_unref( &packet );
continue;
}
#endif
} // end if it's the right stream
zm_av_packet_unref( &packet );
} // end while ! frameComplete
return frame;
} // end AVFrame *FFmpeg_Output::get_frame

46
src/zm_ffmpeg_output.h Normal file
View File

@ -0,0 +1,46 @@
#ifndef ZM_FFMPEG_INPUT_H
#define ZM_FFMPEG_INPUT_H
#ifdef __cplusplus
extern "C" {
#endif
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavcodec/avcodec.h"
#ifdef __cplusplus
}
#endif
class FFmpeg_Output {
public:
FFmpeg_Output();
~FFmpeg_Output();
int Open( const char *filename );
int Close();
AVFrame *put_frame( int stream_id=-1 );
AVFrame *put_packet( int stream_id=-1 );
int get_video_stream_id() {
return video_stream_id;
}
int get_audio_stream_id() {
return audio_stream_id;
}
private:
typedef struct {
AVCodecContext *context;
AVCodec *codec;
int frame_count;
} stream;
stream streams[2];
int video_stream_id;
int audio_stream_id;
AVFormatContext *input_format_context;
};
#endif

View File

@ -94,8 +94,8 @@ int FileCamera::PreCapture() {
return 0;
}
int FileCamera::Capture(Image &image) {
return image.ReadJpeg(path, colours, subpixelorder)?1:-1;
int FileCamera::Capture( ZMPacket &zm_packet ) {
return zm_packet.image->ReadJpeg(path, colours, subpixelorder) ? 1 : -1;
}
int FileCamera::PostCapture() {

View File

@ -23,7 +23,7 @@
#include "zm_camera.h"
#include "zm_buffer.h"
#include "zm_regexp.h"
#include "zm_packetqueue.h"
#include "zm_packet.h"
#include <sys/param.h>
@ -36,7 +36,19 @@ protected:
char path[PATH_MAX];
public:
FileCamera( int p_id, const char *p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
FileCamera(
int p_id,
const char *p_path,
int p_width,
int p_height,
int p_colours,
int p_brightness,
int p_contrast,
int p_hue,
int p_colour,
bool p_capture,
bool p_record_audio
);
~FileCamera();
const char *Path() const { return( path ); }
@ -44,10 +56,9 @@ public:
void Initialise();
void Terminate();
int PreCapture();
int Capture( Image &image );
int Capture( ZMPacket &p );
int PostCapture();
int CaptureAndRecord( Image &image, timeval recording, char* event_directory ) {return(0);};
int Close() { return 0; };
int Close() { return 0; };
};
#endif // ZM_FILE_CAMERA_H

View File

@ -166,6 +166,7 @@ Image::Image(int p_width, int p_height, int p_colours, int p_subpixelorder, uint
AllocImgBuffer(size);
}
text[0] = '\0';
imagePixFormat = AVPixFormat();
update_function_pointers();
}
@ -193,59 +194,107 @@ Image::Image(int p_width, int p_linesize, int p_height, int p_colours, int p_sub
AllocImgBuffer(size);
}
text[0] = '\0';
imagePixFormat = AVPixFormat();
update_function_pointers();
}
Image::Image(const AVFrame *frame) {
AVFrame *dest_frame = zm_av_frame_alloc();
text[0] = '\0';
width = frame->width;
height = frame->height;
pixels = width*height;
zm_dump_video_frame(frame, "Image.Assign(frame)");
// FIXME
colours = ZM_COLOUR_RGB32;
subpixelorder = ZM_SUBPIX_ORDER_RGBA;
imagePixFormat = AV_PIX_FMT_RGBA;
//(AVPixelFormat)frame->format;
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
size = av_image_get_buffer_size(AV_PIX_FMT_RGBA, width, height, 32);
// av_image_get_linesize isn't aligned, so we have to do that.
linesize = FFALIGN(av_image_get_linesize(AV_PIX_FMT_RGBA, width, 0), 32);
#else
linesize = FFALIGN(av_image_get_linesize(AV_PIX_FMT_RGBA, width, 0), 1);
size = avpicture_get_size(AV_PIX_FMT_RGBA, width, height);
size = avpicture_get_size(AV_PIX_FMT_RGB0, width, height);
#endif
buffer = nullptr;
holdbuffer = 0;
AllocImgBuffer(size);
this->Assign(frame);
}
static void dont_free(void *opaque, uint8_t *data) {
}
int Image::PopulateFrame(AVFrame *frame) {
Debug(1, "PopulateFrame: width %d height %d linesize %d colours %d imagesize %d %s",
width, height, linesize, colours, size,
av_get_pix_fmt_name(imagePixFormat)
);
AVBufferRef *ref = av_buffer_create(buffer, size,
dont_free, /* Free callback */
nullptr, /* opaque */
0 /* flags */
);
if ( !ref ) {
Warning("Failed to create av_buffer ");
}
frame->buf[0] = ref;
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
av_image_fill_arrays(dest_frame->data, dest_frame->linesize,
buffer, AV_PIX_FMT_RGBA, width, height, 32);
// From what I've read, we should align the linesizes to 32bit so that ffmpeg can use SIMD instructions too.
int size = av_image_fill_arrays(
frame->data, frame->linesize,
buffer, imagePixFormat, width, height,
32 //alignment
);
if ( size < 0 ) {
Error("Problem setting up data pointers into image %s",
av_make_error_string(size).c_str());
return size;
}
#else
avpicture_fill( (AVPicture *)dest_frame, buffer,
AV_PIX_FMT_RGBA, width, height);
avpicture_fill((AVPicture *)frame, buffer,
imagePixFormat, width, height);
#endif
frame->width = width;
frame->height = height;
frame->format = imagePixFormat;
Debug(1, "PopulateFrame: width %d height %d linesize %d colours %d imagesize %d", width, height, linesize, colours, size);
zm_dump_video_frame(frame, "Image.Populate(frame)");
return 1;
}
void Image::Assign(const AVFrame *frame) {
/* Assume the dimensions etc are correct. FIXME */
// Desired format
AVPixelFormat format = (AVPixelFormat)AVPixFormat();
AVFrame *dest_frame = zm_av_frame_alloc();
PopulateFrame(dest_frame);
zm_dump_video_frame(frame, "source frame");
zm_dump_video_frame(dest_frame, "dest frame before convert");
#if HAVE_LIBSWSCALE
sws_convert_context = sws_getCachedContext(
sws_convert_context,
width,
height,
(AVPixelFormat)frame->format,
width, height,
AV_PIX_FMT_RGBA, SWS_BICUBIC, nullptr,
nullptr, nullptr);
width, height, (AVPixelFormat)frame->format,
width, height, format,
SWS_BICUBIC, nullptr, nullptr, nullptr);
if ( sws_convert_context == nullptr )
Fatal("Unable to create conversion context");
if ( sws_scale(sws_convert_context, frame->data, frame->linesize, 0, frame->height,
if ( sws_scale(sws_convert_context,
frame->data, frame->linesize, 0, frame->height,
dest_frame->data, dest_frame->linesize) < 0 )
Fatal("Unable to convert raw format %u to target format %u", frame->format, AV_PIX_FMT_RGBA);
#else // HAVE_LIBSWSCALE
Fatal("You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras");
#endif // HAVE_LIBSWSCALE
zm_dump_video_frame(dest_frame, "dest frame after convert");
av_frame_free(&dest_frame);
update_function_pointers();
} // end Image::Image(const AVFrame *frame)
@ -265,6 +314,7 @@ Image::Image(const Image &p_image) {
AllocImgBuffer(size);
(*fptr_imgbufcpy)(buffer, p_image.buffer, size);
strncpy(text, p_image.text, sizeof(text));
imagePixFormat = p_image.imagePixFormat;
update_function_pointers();
}
@ -679,6 +729,7 @@ void Image::Assign(
if ( new_buffer != buffer )
(*fptr_imgbufcpy)(buffer, new_buffer, size);
update_function_pointers();
}
void Image::Assign(const Image &image) {
@ -710,7 +761,7 @@ void Image::Assign(const Image &image) {
return;
}
} else {
if ( new_size > allocation || !buffer) {
if ( new_size > allocation || !buffer ) {
// DumpImgBuffer(); This is also done in AllocImgBuffer
AllocImgBuffer(new_size);
}
@ -1796,8 +1847,8 @@ Image *Image::Highlight( unsigned int n_images, Image *images[], const Rgb thres
return result;
}
/* New function to allow buffer re-using instead of allocating memory for the delta image every time */
void Image::Delta( const Image &image, Image* targetimage) const {
/* New function to allow buffer re-using instead of allocationg memory for the delta image every time */
void Image::Delta(const Image &image, Image* targetimage) const {
#ifdef ZM_IMAGE_PROFILING
struct timespec start,end,diff;
unsigned long long executetime;
@ -1940,8 +1991,7 @@ void Image::Annotate(
const Coord &coord,
const unsigned int size,
const Rgb fg_colour,
const Rgb bg_colour
) {
const Rgb bg_colour) {
strncpy(text, p_text, sizeof(text)-1);
unsigned int index = 0;
@ -1967,7 +2017,7 @@ void Image::Annotate(
const uint16_t char_width = font.GetCharWidth();
const uint16_t char_height = font.GetCharHeight();
const uint64_t *font_bitmap = font.GetBitmapData();
Debug(1, "Font size %d, char_width %d char_height %d", size, char_width, char_height);
Debug(4, "Font size %d, char_width %d char_height %d", size, char_width, char_height);
while ( (index < text_len) && (line_len = strcspn(line, "\n")) ) {
unsigned int line_width = line_len * char_width;
@ -5234,3 +5284,5 @@ __attribute__((noinline)) void std_deinterlace_4field_abgr(uint8_t* col1, uint8_
pncurrent += 4;
}
}

View File

@ -108,6 +108,7 @@ protected:
double _1_m;
static int CompareYX( const void *p1, const void *p2 ) {
// This is because these functions are passed to qsort
const Edge *e1 = reinterpret_cast<const Edge *>(p1), *e2 = reinterpret_cast<const Edge *>(p2);
if ( e1->min_y == e2->min_y )
return( int(e1->min_x - e2->min_x) );
@ -161,6 +162,7 @@ protected:
unsigned int size;
unsigned int subpixelorder;
unsigned long allocation;
_AVPIXELFORMAT imagePixFormat;
uint8_t *buffer;
int buffertype; /* 0=not ours, no need to call free(), 1=malloc() buffer, 2=new buffer */
int holdbuffer; /* Hold the buffer instead of replacing it with new one */
@ -171,8 +173,8 @@ public:
explicit Image(const char *filename);
Image(int p_width, int p_height, int p_colours, int p_subpixelorder, uint8_t *p_buffer=0, unsigned int padding=0);
Image(int p_width, int p_linesize, int p_height, int p_colours, int p_subpixelorder, uint8_t *p_buffer=0, unsigned int padding=0);
explicit Image( const Image &p_image );
explicit Image( const AVFrame *frame );
explicit Image(const Image &p_image);
explicit Image(const AVFrame *frame);
~Image();
static void Initialise();
@ -186,11 +188,30 @@ public:
inline unsigned int SubpixelOrder() const { return subpixelorder; }
inline unsigned int Size() const { return size; }
inline AVPixelFormat AVPixFormat() {
if ( colours == ZM_COLOUR_RGB32 ) {
return AV_PIX_FMT_RGBA;
} else if ( colours == ZM_COLOUR_RGB24 ) {
if ( subpixelorder == ZM_SUBPIX_ORDER_BGR){
return AV_PIX_FMT_BGR24;
} else {
return AV_PIX_FMT_RGB24;
}
} else if ( colours == ZM_COLOUR_GRAY8 ) {
return AV_PIX_FMT_GRAY8;
} else {
Error("Unknown colours (%d)",colours);
return AV_PIX_FMT_RGBA;
}
}
/* Internal buffer should not be modified from functions outside of this class */
inline const uint8_t* Buffer() const { return buffer; }
inline const uint8_t* Buffer( unsigned int x, unsigned int y= 0 ) const { return &buffer[(y*linesize)+x]; }
inline const uint8_t* Buffer(unsigned int x, unsigned int y=0) const { return &buffer[(y*linesize)+x]; }
/* Request writeable buffer */
uint8_t* WriteBuffer(const unsigned int p_width, const unsigned int p_height, const unsigned int p_colours, const unsigned int p_subpixelorder);
// Is only acceptable on a pre-allocated buffer
uint8_t* WriteBuffer() { return holdbuffer ? buffer : nullptr; };
inline int IsBufferHeld() const { return holdbuffer; }
inline void HoldBuffer(int tohold) { holdbuffer = tohold; }
@ -210,6 +231,7 @@ public:
const uint8_t* new_buffer,
const size_t buffer_size);
void Assign(const Image &image);
void Assign(const AVFrame *frame);
void AssignDirect(
const unsigned int p_width,
const unsigned int p_height,
@ -219,6 +241,8 @@ public:
const size_t buffer_size,
const int p_buffertype);
int PopulateFrame(AVFrame *frame);
inline void CopyBuffer(const Image &image) {
Assign(image);
}
@ -231,40 +255,39 @@ public:
return *this;
}
bool ReadRaw( const char *filename );
bool WriteRaw( const char *filename ) const;
bool ReadRaw(const char *filename);
bool WriteRaw(const char *filename) const;
bool ReadJpeg( const char *filename, unsigned int p_colours, unsigned int p_subpixelorder);
bool ReadJpeg(const char *filename, unsigned int p_colours, unsigned int p_subpixelorder);
bool WriteJpeg ( const char *filename) const;
bool WriteJpeg ( const char *filename, bool on_blocking_abort) const;
bool WriteJpeg ( const char *filename, int quality_override ) const;
bool WriteJpeg ( const char *filename, struct timeval timestamp ) const;
bool WriteJpeg ( const char *filename, int quality_override, struct timeval timestamp ) const;
bool WriteJpeg ( const char *filename, int quality_override, struct timeval timestamp, bool on_blocking_abort ) const;
bool WriteJpeg(const char *filename) const;
bool WriteJpeg(const char *filename, bool on_blocking_abort) const;
bool WriteJpeg(const char *filename, int quality_override) const;
bool WriteJpeg(const char *filename, struct timeval timestamp) const;
bool WriteJpeg(const char *filename, int quality_override, struct timeval timestamp) const;
bool WriteJpeg(const char *filename, int quality_override, struct timeval timestamp, bool on_blocking_abort) const;
bool DecodeJpeg( const JOCTET *inbuffer, int inbuffer_size, unsigned int p_colours, unsigned int p_subpixelorder);
bool EncodeJpeg( JOCTET *outbuffer, int *outbuffer_size, int quality_override=0 ) const;
bool DecodeJpeg(const JOCTET *inbuffer, int inbuffer_size, unsigned int p_colours, unsigned int p_subpixelorder);
bool EncodeJpeg(JOCTET *outbuffer, int *outbuffer_size, int quality_override=0) const;
#if HAVE_ZLIB_H
bool Unzip( const Bytef *inbuffer, unsigned long inbuffer_size );
bool Zip( Bytef *outbuffer, unsigned long *outbuffer_size, int compression_level=Z_BEST_SPEED ) const;
bool Unzip(const Bytef *inbuffer, unsigned long inbuffer_size);
bool Zip(Bytef *outbuffer, unsigned long *outbuffer_size, int compression_level=Z_BEST_SPEED) const;
#endif // HAVE_ZLIB_H
bool Crop( unsigned int lo_x, unsigned int lo_y, unsigned int hi_x, unsigned int hi_y );
bool Crop( const Box &limits );
bool Crop(unsigned int lo_x, unsigned int lo_y, unsigned int hi_x, unsigned int hi_y);
bool Crop(const Box &limits);
void Overlay( const Image &image );
void Overlay( const Image &image, unsigned int x, unsigned int y );
void Blend( const Image &image, int transparency=12 );
void Overlay(const Image &image);
void Overlay(const Image &image, unsigned int x, unsigned int y);
void Blend(const Image &image, int transparency=12);
static Image *Merge( unsigned int n_images, Image *images[] );
static Image *Merge( unsigned int n_images, Image *images[], double weight );
static Image *Highlight( unsigned int n_images, Image *images[], const Rgb threshold=RGB_BLACK, const Rgb ref_colour=RGB_RED );
//Image *Delta( const Image &image ) const;
void Delta( const Image &image, Image* targetimage) const;
const Coord centreCoord( const char *text, const int size ) const;
const Coord centreCoord(const char *text, const int size) const;
void MaskPrivacy( const unsigned char *p_bitmask, const Rgb pixel_colour=0x00222222 );
void Annotate( const char *p_text, const Coord &coord, const unsigned int size=1, const Rgb fg_colour=RGB_WHITE, const Rgb bg_colour=RGB_BLACK );
Image *HighlightEdges( Rgb colour, unsigned int p_colours, unsigned int p_subpixelorder, const Box *limits=0 );

View File

@ -263,8 +263,7 @@ int LibvlcCamera::PreCapture() {
}
// Should not return -1 as cancels capture. Always wait for image if available.
int LibvlcCamera::Capture(Image &image) {
int LibvlcCamera::Capture( ZMPacket &zm_packet ) {
// newImage is a mutex/condition based flag to tell us when there is an image available
while( !mLibvlcData.newImage.getValueImmediate() ) {
if (zm_terminate)
@ -273,17 +272,13 @@ int LibvlcCamera::Capture(Image &image) {
}
mLibvlcData.mutex.lock();
image.Assign(width, height, colours, subpixelorder, mLibvlcData.buffer, width * height * mBpp);
zm_packet.image->Assign(width, height, colours, subpixelorder, mLibvlcData.buffer, width * height * mBpp);
mLibvlcData.newImage.setValueImmediate(false);
mLibvlcData.mutex.unlock();
return 1;
}
int LibvlcCamera::CaptureAndRecord(Image &image, timeval recording, char* event_directory) {
return 0;
}
int LibvlcCamera::PostCapture() {
return 0;
}

View File

@ -31,8 +31,7 @@
#endif
// Used by libvlc callbacks
struct LibvlcPrivateData
{
struct LibvlcPrivateData {
uint8_t* buffer;
uint8_t* prevBuffer;
time_t prevTime;
@ -70,8 +69,7 @@ public:
int PrimeCapture();
int PreCapture();
int Capture( Image &image );
int CaptureAndRecord( Image &image, timeval recording, char* event_directory );
int Capture( ZMPacket &p );
int PostCapture();
int Close() { return 0; };
};

View File

@ -162,7 +162,7 @@ int VncCamera::PrimeCapture() {
mRfb = nullptr;
return -1;
}
if ( (mRfb->width != width) or (mRfb->height != height) ) {
if ( ((unsigned int)mRfb->width != width) or ((unsigned int)mRfb->height != height) ) {
Warning("Specified dimensions do not match screen size monitor: (%dx%d) != vnc: (%dx%d)",
width, height, mRfb->width, mRfb->height);
}
@ -182,11 +182,11 @@ int VncCamera::PreCapture() {
return res == TRUE ? 1 : -1;
}
int VncCamera::Capture(Image &image) {
int VncCamera::Capture(ZMPacket &zm_packet) {
if ( ! mVncData.buffer ) {
return 0;
}
uint8_t *directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
uint8_t *directbuffer = zm_packet.image->WriteBuffer(width, height, colours, subpixelorder);
int rc = scale.Convert(
mVncData.buffer,
mRfb->si.framebufferHeight * mRfb->si.framebufferWidth * 4,
@ -205,10 +205,6 @@ int VncCamera::PostCapture() {
return 0;
}
int VncCamera::CaptureAndRecord(Image &image, timeval recording, char* event_directory) {
return 0;
}
int VncCamera::Close() {
return 0;
}

View File

@ -49,9 +49,8 @@ public:
int PreCapture();
int PrimeCapture();
int Capture( Image &image );
int Capture(ZMPacket &packet);
int PostCapture();
int CaptureAndRecord( Image &image, timeval recording, char* event_directory );
int Close();
};

View File

@ -285,7 +285,6 @@ static const uint32_t prefered_gray8_formats[] = {
};
#endif
int LocalCamera::camera_count = 0;
int LocalCamera::channel_count = 0;
int LocalCamera::channels[VIDEO_MAX_FRAME];
@ -428,10 +427,6 @@ LocalCamera::LocalCamera(
/* RGB24 palette and 24bit target colourspace */
} else if ( palette == V4L2_PIX_FMT_RGB24 && colours == ZM_COLOUR_RGB24 ) {
conversion_type = 0;
subpixelorder = ZM_SUBPIX_ORDER_RGB;
/* BGR24 palette and 24bit target colourspace */
} else if ( palette == V4L2_PIX_FMT_BGR24 && colours == ZM_COLOUR_RGB24 ) {
conversion_type = 0;
subpixelorder = ZM_SUBPIX_ORDER_BGR;
@ -452,7 +447,7 @@ LocalCamera::LocalCamera(
#if HAVE_LIBSWSCALE
/* Try using swscale for the conversion */
conversion_type = 1;
Debug(2,"Using swscale for image conversion");
Debug(2, "Using swscale for image conversion");
if ( colours == ZM_COLOUR_RGB32 ) {
subpixelorder = ZM_SUBPIX_ORDER_RGBA;
imagePixFormat = AV_PIX_FMT_RGBA;
@ -595,15 +590,15 @@ LocalCamera::LocalCamera(
conversion_type = 2; /* Try ZM format conversions */
}
}
#else
/* Don't have swscale, see what we can do */
conversion_type = 2;
#endif
/* Our YUYV->Grayscale conversion is a lot faster than swscale's */
if ( colours == ZM_COLOUR_GRAY8 && (palette == VIDEO_PALETTE_YUYV || palette == VIDEO_PALETTE_YUV422) ) {
conversion_type = 2;
}
#else
/* Don't have swscale, see what we can do */
conversion_type = 2;
#endif
if ( conversion_type == 2 ) {
Debug(2,"Using ZM for image conversion");
if ( palette == VIDEO_PALETTE_RGB32 && colours == ZM_COLOUR_GRAY8 ) {
@ -645,17 +640,17 @@ LocalCamera::LocalCamera(
} else {
Fatal("Unable to find a suitable format conversion for the selected palette and target colorspace.");
}
}
} // end if conversion_type == 2
}
}
#endif // ZM_HAS_V4L1
last_camera = this;
Debug(3,"Selected subpixelorder: %u",subpixelorder);
Debug(3, "Selected subpixelorder: %u", subpixelorder);
#if HAVE_LIBSWSCALE
/* Initialize swscale stuff */
if ( capture && conversion_type == 1 ) {
if ( capture and (conversion_type == 1) ) {
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
tmpPicture = av_frame_alloc();
#else
@ -673,7 +668,10 @@ LocalCamera::LocalCamera(
Fatal("Image size mismatch. Required: %d Available: %u", pSize, imagesize);
}
imgConversionContext = sws_getContext(width, height, capturePixFormat, width, height, imagePixFormat, SWS_BICUBIC, nullptr, nullptr, nullptr);
imgConversionContext = sws_getContext(
width, height, capturePixFormat,
width, height, imagePixFormat, SWS_BICUBIC,
nullptr, nullptr, nullptr);
if ( !imgConversionContext ) {
Fatal("Unable to initialise image scaling context");
@ -681,8 +679,11 @@ LocalCamera::LocalCamera(
} else {
tmpPicture = nullptr;
imgConversionContext = nullptr;
}
} // end if capture and conversion_tye == swscale
#endif
mVideoStreamId = 0;
mAudioStreamId = -1;
video_stream = nullptr;
} // end LocalCamera::LocalCamera
LocalCamera::~LocalCamera() {
@ -691,23 +692,22 @@ LocalCamera::~LocalCamera() {
#if HAVE_LIBSWSCALE
/* Clean up swscale stuff */
if ( capture && conversion_type == 1 ) {
if ( capture && (conversion_type == 1) ) {
sws_freeContext(imgConversionContext);
imgConversionContext = nullptr;
av_frame_free(&tmpPicture);
}
#endif
}
if ( video_stream ) {
// Should also free streams
avformat_free_context(oc);
video_stream = nullptr;
}
} // end LocalCamera::~LocalCamera
void LocalCamera::Initialise() {
#if HAVE_LIBSWSCALE
if ( logDebugging() )
av_log_set_level(AV_LOG_DEBUG);
else
av_log_set_level(AV_LOG_QUIET);
#endif // HAVE_LIBSWSCALE
Debug(3, "Opening video device %s", device.c_str());
//if ( (vid_fd = open( device.c_str(), O_RDWR|O_NONBLOCK, 0 )) < 0 )
if ( (vid_fd = open(device.c_str(), O_RDWR, 0)) < 0 )
@ -781,7 +781,7 @@ void LocalCamera::Initialise() {
}
} else {
if ( vidioctl(vid_fd, VIDIOC_S_FMT, &v4l2_data.fmt) < 0 ) {
Fatal("Failed to set video format: %s", strerror(errno));
Error("Failed to set video format: %s", strerror(errno));
}
}
@ -807,6 +807,13 @@ void LocalCamera::Initialise() {
, v4l2_data.fmt.fmt.pix.priv
);
if ( v4l2_data.fmt.fmt.pix.width != width ) {
Warning("Failed to set requested width");
}
if ( v4l2_data.fmt.fmt.pix.height != height ) {
Warning("Failed to set requested height");
}
/* Buggy driver paranoia. */
unsigned int min;
min = v4l2_data.fmt.fmt.pix.width * 2;
@ -840,8 +847,8 @@ void LocalCamera::Initialise() {
if ( vidioctl(vid_fd, VIDIOC_G_JPEGCOMP, &jpeg_comp) < 0 ) {
Debug(3,"Failed to get updated JPEG compression options: %s", strerror(errno));
} else {
Debug(4, "JPEG quality: %d",jpeg_comp.quality);
Debug(4, "JPEG markers: %#x",jpeg_comp.jpeg_markers);
Debug(4, "JPEG quality: %d, markers: %#x",
jpeg_comp.quality, jpeg_comp.jpeg_markers);
}
}
}
@ -924,7 +931,8 @@ void LocalCamera::Initialise() {
#else
avpicture_fill(
(AVPicture *)capturePictures[i],
(uint8_t*)v4l2_data.buffers[i].start, capturePixFormat,
(uint8_t*)v4l2_data.buffers[i].start,
capturePixFormat,
v4l2_data.fmt.fmt.pix.width,
v4l2_data.fmt.fmt.pix.height
);
@ -1172,11 +1180,11 @@ void LocalCamera::Terminate() {
Error("Failed to munmap buffers: %s", strerror(errno));
delete[] v4l1_data.buffers;
}
} // end if using v4l1
#endif // ZM_HAS_V4L1
close(vid_fd);
} // end Terminate
} // end LocalCamera::Terminate
uint32_t LocalCamera::AutoSelectFormat(int p_colours) {
/* Automatic format selection */
@ -1268,13 +1276,16 @@ uint32_t LocalCamera::AutoSelectFormat(int p_colours) {
#endif /* ZM_HAS_V4L2 */
return selected_palette;
}
} //uint32_t LocalCamera::AutoSelectFormat(int p_colours)
#define capString(test,prefix,yesString,noString,capability) \
(test) ? (prefix yesString " " capability "\n") : (prefix noString " " capability "\n")
bool LocalCamera::GetCurrentSettings(const char *device, char *output, int version, bool verbose) {
bool LocalCamera::GetCurrentSettings(
const char *device,
char *output,
int version,
bool verbose) {
output[0] = 0;
char *output_ptr = output;
@ -1766,7 +1777,7 @@ bool LocalCamera::GetCurrentSettings(const char *device, char *output, int versi
return true;
}
int LocalCamera::Brightness( int p_brightness ) {
int LocalCamera::Brightness(int p_brightness) {
#if ZM_HAS_V4L2
if ( v4l_version == 2 ) {
struct v4l2_control vid_control;
@ -1820,7 +1831,7 @@ int LocalCamera::Brightness( int p_brightness ) {
return -1;
}
int LocalCamera::Hue( int p_hue ) {
int LocalCamera::Hue(int p_hue) {
#if ZM_HAS_V4L2
if ( v4l_version == 2 ) {
struct v4l2_control vid_control;
@ -1978,11 +1989,14 @@ int LocalCamera::PrimeCapture() {
Debug(2, "Priming capture");
#if ZM_HAS_V4L2
if ( v4l_version == 2 ) {
Debug(3, "Queueing buffers");
Debug(3, "Queueing (%d) buffers", v4l2_data.reqbufs.count);
for ( unsigned int frame = 0; frame < v4l2_data.reqbufs.count; frame++ ) {
struct v4l2_buffer vid_buf;
memset(&vid_buf, 0, sizeof(vid_buf));
if ( v4l2_data.fmt.type != V4L2_BUF_TYPE_VIDEO_CAPTURE ) {
Warning("Unknown type: (%d)", v4l2_data.fmt.type);
}
vid_buf.type = v4l2_data.fmt.type;
vid_buf.memory = v4l2_data.reqbufs.memory;
@ -2012,17 +2026,18 @@ int LocalCamera::PrimeCapture() {
}
}
#endif // ZM_HAS_V4L1
mVideoStreamId = 0;
return 0;
return 1;
} // end LocalCamera::PrimeCapture
int LocalCamera::PreCapture() {
//Debug(5, "Pre-capturing");
return 0;
return 1;
}
int LocalCamera::Capture(Image &image) {
Debug(3, "Capturing");
int LocalCamera::Capture(ZMPacket &zm_packet) {
// We assume that the avpacket is allocated, and just needs to be filled
static uint8_t* buffer = nullptr;
int buffer_bytesused = 0;
int capture_frame = -1;
@ -2057,6 +2072,7 @@ int LocalCamera::Capture(Image &image) {
}
return -1;
}
Debug(5, "Captured a frame");
v4l2_data.bufptr = &vid_buf;
capture_frame = v4l2_data.bufptr->index;
@ -2078,9 +2094,12 @@ int LocalCamera::Capture(Image &image) {
if ( (v4l2_data.fmt.fmt.pix.width * v4l2_data.fmt.fmt.pix.height) != (width * height) ) {
Fatal("Captured image dimensions differ: V4L2: %dx%d monitor: %dx%d",
v4l2_data.fmt.fmt.pix.width,v4l2_data.fmt.fmt.pix.height,width,height);
v4l2_data.fmt.fmt.pix.width, v4l2_data.fmt.fmt.pix.height, width, height);
}
} // end if v4l2
#if ZM_HAS_V4L1
else
#endif // ZM_HAS_V4L1
#endif // ZM_HAS_V4L2
#if ZM_HAS_V4L1
if ( v4l_version == 1 ) {
@ -2108,21 +2127,25 @@ int LocalCamera::Capture(Image &image) {
buffer = v4l1_data.bufptr+v4l1_data.frames.offsets[capture_frame];
}
#endif // ZM_HAS_V4L1
} /* prime capture */
if ( conversion_type != 0 ) {
if ( !zm_packet.image ) {
Debug(1, "Allocating image");
zm_packet.image = new Image(width, height, colours, subpixelorder);
}
Debug(3, "Performing format conversion");
if ( conversion_type != 0 ) {
Debug(3, "Performing format conversion %d", conversion_type);
/* Request a writeable buffer of the target image */
uint8_t* directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
uint8_t *directbuffer = zm_packet.image->WriteBuffer(width, height, colours, subpixelorder);
if ( directbuffer == nullptr ) {
Error("Failed requesting writeable buffer for the captured image.");
return -1;
}
#if HAVE_LIBSWSCALE
if ( conversion_type == 1 ) {
Debug(9, "Calling sws_scale to perform the conversion");
/* Use swscale to convert the image directly into the shared memory */
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
@ -2133,32 +2156,37 @@ int LocalCamera::Capture(Image &image) {
avpicture_fill( (AVPicture *)tmpPicture, directbuffer,
imagePixFormat, width, height );
#endif
sws_scale( imgConversionContext,
sws_scale(
imgConversionContext,
capturePictures[capture_frame]->data,
capturePictures[capture_frame]->linesize,
0,
height,
tmpPicture->data,
tmpPicture->linesize );
}
tmpPicture->linesize
);
} else
#endif
if ( conversion_type == 2 ) {
Debug(9, "Calling the conversion function");
/* Call the image conversion function and convert directly into the shared memory */
(*conversion_fptr)(buffer, directbuffer, pixels);
} else if ( conversion_type == 3 ) {
// Need to store the jpeg data too
Debug(9, "Decoding the JPEG image");
/* JPEG decoding */
image.DecodeJpeg(buffer, buffer_bytesused, colours, subpixelorder);
zm_packet.image->DecodeJpeg(buffer, buffer_bytesused, colours, subpixelorder);
}
} else {
Debug(3, "No format conversion performed. Assigning the image");
/* No conversion was performed, the image is in the V4L buffers and needs to be copied into the shared memory */
image.Assign( width, height, colours, subpixelorder, buffer, imagesize);
}
zm_packet.image->Assign(width, height, colours, subpixelorder, buffer, imagesize);
} // end if doing conversion or not
zm_packet.codec_type = AVMEDIA_TYPE_VIDEO;
zm_packet.keyframe = 1;
return 1;
} // end int LocalCamera::Capture()
@ -2177,7 +2205,7 @@ int LocalCamera::PostCapture() {
}
v4l2_std_id stdId = standards[next_channel];
if ( vidioctl( vid_fd, VIDIOC_S_STD, &stdId ) < 0 ) {
if ( vidioctl(vid_fd, VIDIOC_S_STD, &stdId) < 0 ) {
Error("Failed to set video format %d: %s", standards[next_channel], strerror(errno));
return -1;
}
@ -2192,6 +2220,9 @@ int LocalCamera::PostCapture() {
Error("Unable to requeue buffer due to not v4l2_data")
}
}
#if ZM_HAS_V4L1
else
#endif // ZM_HAS_V4L1
#endif // ZM_HAS_V4L2
#if ZM_HAS_V4L1
if ( v4l_version == 1 ) {
@ -2227,4 +2258,32 @@ int LocalCamera::PostCapture() {
return 0;
}
AVStream *LocalCamera::get_VideoStream() {
if ( ! video_stream ) {
oc = avformat_alloc_context();
Debug(1, "Allocating avstream");
video_stream = avformat_new_stream(oc, nullptr);
if ( video_stream ) {
video_stream->time_base = (AVRational){1, 1000000}; // microseconds as base frame rate
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
video_stream->codecpar->width = width;
video_stream->codecpar->height = height;
video_stream->codecpar->format = GetFFMPEGPixelFormat(colours, subpixelorder);
video_stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
video_stream->codecpar->codec_id = AV_CODEC_ID_NONE;
Debug(1, "Allocating avstream %p %p %d", video_stream, video_stream->codecpar, video_stream->codecpar->codec_id);
#else
video_stream->codec->width = width;
video_stream->codec->height = height;
video_stream->codec->pix_fmt = GetFFMPEGPixelFormat(colours, subpixelorder);
video_stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
video_stream->codec->codec_id = AV_CODEC_ID_NONE;
#endif
} else {
Error("Can't create video stream");
}
}
return video_stream;
}
#endif // ZM_HAS_V4L

View File

@ -23,7 +23,7 @@
#include "zm.h"
#include "zm_camera.h"
#include "zm_image.h"
#include "zm_packetqueue.h"
#include "zm_packet.h"
#if ZM_HAS_V4L
@ -49,18 +49,15 @@
// directly connect to the host machine and which are accessed
// via a video interface.
//
class LocalCamera : public Camera
{
class LocalCamera : public Camera {
protected:
#if ZM_HAS_V4L2
struct V4L2MappedBuffer
{
struct V4L2MappedBuffer {
void *start;
size_t length;
};
struct V4L2Data
{
struct V4L2Data {
v4l2_cropcap cropcap;
v4l2_crop crop;
v4l2_format fmt;
@ -71,8 +68,7 @@ protected:
#endif // ZM_HAS_V4L2
#if ZM_HAS_V4L1
struct V4L1Data
{
struct V4L1Data {
int active_frame;
video_mbuf frames;
video_mmap *buffers;
@ -160,12 +156,12 @@ public:
int PrimeCapture()override ;
int PreCapture()override ;
int Capture( Image &image )override ;
int Capture(ZMPacket &p) override;
int PostCapture()override ;
int CaptureAndRecord( Image &image, timeval recording, char* event_directory ) override {return(0);};
int Close() override { return 0; };
static bool GetCurrentSettings( const char *device, char *output, int version, bool verbose );
AVStream* get_VideoStream();
};
#endif // ZM_HAS_V4L

View File

@ -36,6 +36,7 @@
#ifdef __FreeBSD__
#include <sys/thr.h>
#endif
#include <cstdarg>
bool Logger::smInitialised = false;
Logger *Logger::smInstance = nullptr;
@ -271,9 +272,7 @@ std::string Logger::strEnv(const std::string &name, const std::string &defaultVa
}
char *Logger::getTargettedEnv(const std::string &name) {
std::string envName;
envName = name+"_"+mId;
std::string envName = name+"_"+mId;
char *envPtr = getenv(envName.c_str());
if ( !envPtr && mId != mIdRoot ) {
envName = name+"_"+mIdRoot;
@ -523,8 +522,10 @@ void Logger::logPrint(bool hex, const char * const filepath, const int line, con
puts(logString);
fflush(stdout);
}
if ( level <= mFileLevel ) {
if ( !mLogFileFP ) {
// We do this here so that we only create the file if we ever write to it.
log_mutex.unlock();
openFile();
log_mutex.lock();
@ -536,20 +537,14 @@ void Logger::logPrint(bool hex, const char * const filepath, const int line, con
} else {
puts("Logging to file, but failed to open it\n");
}
#if 0
} else {
printf("Not writing to log file because level %d %s <= mFileLevel %d %s\nstring: %s\n",
level, smCodes[level].c_str(), mFileLevel, smCodes[mFileLevel].c_str(), logString);
#endif
}
*syslogEnd = '\0';
if ( level <= mDatabaseLevel ) {
char sql[ZM_SQL_MED_BUFSIZ];
char escapedString[(strlen(syslogStart)*2)+1];
} // end if level <= mFileLevel
if ( level <= mDatabaseLevel ) {
if ( !db_mutex.trylock() ) {
char escapedString[(strlen(syslogStart)*2)+1];
mysql_real_escape_string(&dbconn, escapedString, syslogStart, strlen(syslogStart));
char sql[ZM_SQL_MED_BUFSIZ];
snprintf(sql, sizeof(sql),
"INSERT INTO `Logs` "
"( `TimeKey`, `Component`, `ServerId`, `Pid`, `Level`, `Code`, `Message`, `File`, `Line` )"
@ -567,26 +562,26 @@ void Logger::logPrint(bool hex, const char * const filepath, const int line, con
} else {
Level tempDatabaseLevel = mDatabaseLevel;
databaseLevel(NOLOG);
Error("Can't insert log entry: sql(%s) error(db is locked)", logString);
Error("Can't insert log entry: sql(%s) error(%s)", syslogStart, mysql_error(&dbconn));
databaseLevel(tempDatabaseLevel);
}
}
db_mutex.unlock();
} // end if level <= mDatabaseLevel
if ( level <= mSyslogLevel ) {
int priority = smSyslogPriorities[level];
//priority |= LOG_DAEMON;
syslog(priority, "%s [%s] [%s]", classString, mId.c_str(), syslogStart);
*syslogEnd = '\0';
syslog(smSyslogPriorities[level], "%s [%s] [%s]", classString, mId.c_str(), syslogStart);
}
free(filecopy);
if ( level <= FATAL ) {
log_mutex.unlock();
if ( level <= FATAL ) {
logTerm();
zmDbClose();
if ( level <= PANIC )
abort();
exit(-1);
}
log_mutex.unlock();
} // end logPrint
void logInit(const char *name, const Logger::Options &options) {

File diff suppressed because it is too large Load Diff

View File

@ -30,6 +30,11 @@
#include "zm_rgb.h"
#include "zm_zone.h"
#include "zm_event.h"
#include "zm_video.h"
#include "zm_videostore.h"
#include "zm_packetqueue.h"
#include "zm_thread.h"
class Monitor;
#include "zm_group.h"
#include "zm_camera.h"
@ -75,6 +80,7 @@ public:
FFMPEG,
LIBVLC,
CURL,
NVSOCKET,
VNC,
} CameraType;
@ -98,8 +104,8 @@ public:
typedef enum {
DISABLED,
X264ENCODE,
H264PASSTHROUGH,
ENCODE,
PASSTHROUGH,
} VideoWriter;
protected:
@ -109,13 +115,15 @@ protected:
typedef enum { CLOSE_TIME, CLOSE_IDLE, CLOSE_ALARM } EventCloseMode;
/* sizeof(SharedData) expected to be 340 bytes on 32bit and 64bit */
/* sizeof(SharedData) expected to be 344 bytes on 32bit and 64bit */
typedef struct {
uint32_t size; /* +0 */
uint32_t last_write_index; /* +4 */
uint32_t last_read_index; /* +8 */
uint32_t state; /* +12 */
uint64_t last_event; /* +16 */
double capture_fps; // Current capturing fps
double analysis_fps; // Current analysis fps
uint64_t last_event_id; /* +16 */
uint32_t action; /* +24 */
int32_t brightness; /* +28 */
int32_t hue; /* +32 */
@ -175,26 +183,15 @@ protected:
char trigger_showtext[256];
} TriggerData;
/* sizeof(Snapshot) expected to be 16 bytes on 32bit and 32 bytes on 64bit */
struct Snapshot {
struct timeval *timestamp;
Image *image;
void* padding;
};
//TODO: Technically we can't exclude this struct when people don't have avformat as the Memory.pm module doesn't know about avformat
#if 1
//sizeOf(VideoStoreData) expected to be 4104 bytes on 32bit and 64bit
typedef struct {
uint32_t size;
uint64_t current_event;
char event_file[4096];
timeval recording; // used as both bool and a pointer to the timestamp when recording should begin
//uint32_t frameNumber;
} VideoStoreData;
#endif // HAVE_LIBAVFORMAT
class MonitorLink {
protected:
unsigned int id;
@ -217,25 +214,17 @@ protected:
volatile VideoStoreData *video_store_data;
int last_state;
uint64_t last_event;
uint64_t last_event_id;
public:
MonitorLink(unsigned int p_id, const char *p_name);
~MonitorLink();
inline unsigned int Id() const {
return id;
}
inline const char *Name() const {
return name;
}
inline unsigned int Id() const { return id; }
inline const char *Name() const { return name; }
inline bool isConnected() const {
return connected && shared_data->valid;
}
inline time_t getLastConnectTime() const {
return last_connect_time;
}
inline bool isConnected() const { return connected && shared_data->valid; }
inline time_t getLastConnectTime() const { return last_connect_time; }
inline uint32_t lastFrameScore() {
return shared_data->last_frame_score;
@ -259,26 +248,53 @@ protected:
Function function; // What the monitor is doing
bool enabled; // Whether the monitor is enabled or asleep
bool decoding_enabled; // Whether the monitor will decode h264/h265 packets
std::string protocol;
std::string method;
std::string options;
std::string host;
std::string port;
std::string user;
std::string pass;
std::string path;
char device[64];
int palette;
int channel;
int format;
unsigned int camera_width;
unsigned int camera_height;
unsigned int width; // Normally the same as the camera, but not if partly rotated
unsigned int height; // Normally the same as the camera, but not if partly rotated
bool v4l_multi_buffer;
unsigned int v4l_captures_per_frame;
Orientation orientation; // Whether the image has to be rotated at all
unsigned int deinterlacing;
bool videoRecording;
unsigned int deinterlacing_value;
std::string decoder_hwaccel_name;
std::string decoder_hwaccel_device;
bool videoRecording;
bool rtsp_describe;
int savejpegs;
int colours;
VideoWriter videowriter;
std::string encoderparams;
int output_codec;
std::string encoder;
std::string output_container;
std::vector<EncoderParameter_t> encoderparamsvec;
_AVPIXELFORMAT imagePixFormat;
unsigned int subpixelorder;
bool record_audio; // Whether to store the audio that we receive
int brightness; // The statically saved brightness of the camera
int contrast; // The statically saved contrast of the camera
int hue; // The statically saved hue of the camera
int colour; // The statically saved colour of the camera
char event_prefix[64]; // The prefix applied to event names as they are created
char label_format[64]; // The format of the timestamp on the images
Coord label_coord; // The coordinates of the timestamp on the images
@ -289,15 +305,14 @@ protected:
int warmup_count; // How many images to process before looking for events
int pre_event_count; // How many images to hold and prepend to an alarm event
int post_event_count; // How many unalarmed images must occur before the alarm state is reset
struct timeval video_buffer_duration; // How long a video segment to keep in buffer (set only if analysis fps != 0 )
int stream_replay_buffer; // How many frames to store to support DVR functions, IGNORED from this object, passed directly into zms now
int section_length; // How long events should last in continuous modes
int min_section_length; // Minimum event length when using event_close_mode == ALARM
bool adaptive_skip; // Whether to use the newer adaptive algorithm for this monitor
int frame_skip; // How many frames to skip in continuous modes
int motion_frame_skip; // How many frames to skip in motion detection
double capture_max_fps; // Target Capture FPS
double analysis_fps; // Target framerate for video analysis
double analysis_fps_limit; // Target framerate for video analysis
struct timeval video_buffer_duration; // How long a video segment to keep in buffer (set only if analysis fps != 0 )
unsigned int analysis_update_delay; // How long we wait before updating analysis parameters
int capture_delay; // How long we wait between capture frames
int alarm_capture_delay; // How long we wait between capture frames when in alarm state
@ -310,29 +325,25 @@ protected:
Rgb signal_check_colour; // The colour that the camera will emit when no video signal detected
bool embed_exif; // Whether to embed Exif data into each image frame or not
bool last_signal;
double fps;
unsigned int last_camera_bytes;
Image delta_image;
Image ref_image;
Image alarm_image; // Used in creating analysis images, will be initialized in Analysis
Image write_image; // Used when creating snapshot images
std::string diag_path_ref;
std::string diag_path_delta;
int capture_max_fps;
Purpose purpose; // What this monitor has been created to do
unsigned int last_camera_bytes;
int event_count;
int image_count;
int analysis_image_count; // How many frames have been processed by analysis thread.
int motion_frame_count; // How many frames have had motion detection performed on them.
int ready_count;
int first_alarm_count;
int last_alarm_count;
bool last_signal;
int last_section_mod;
int buffer_count;
int prealarm_count;
State state;
time_t start_time;
time_t last_fps_time;
double last_fps_time;
double last_analysis_fps_time;
time_t auto_resume_time;
unsigned int last_motion_score;
@ -350,14 +361,23 @@ protected:
TriggerData *trigger_data;
VideoStoreData *video_store_data;
Snapshot *image_buffer;
Snapshot next_buffer; /* Used by four field deinterlacing */
Snapshot *pre_event_buffer;
struct timeval *shared_timestamps;
unsigned char *shared_images;
ZMPacket *image_buffer;
ZMPacket next_buffer; /* Used by four field deinterlacing */
int video_stream_id; // will be filled in PrimeCapture
int audio_stream_id; // will be filled in PrimeCapture
Camera *camera;
Event *event;
Storage *storage;
VideoStore *videoStore;
zm_packetqueue *packetqueue;
packetqueue_iterator *analysis_it;
Mutex mutex;
int n_zones;
Zone **zones;
@ -372,59 +392,17 @@ protected:
std::vector<Group *> groups;
Image delta_image;
Image ref_image;
Image alarm_image; // Used in creating analysis images, will be initialized in Analysis
Image write_image; // Used when creating snapshot images
std::string diag_path_ref;
std::string diag_path_delta;
public:
explicit Monitor();
explicit Monitor(unsigned int p_id);
// OurCheckAlarms seems to be unused. Check it on zm_monitor.cpp for more info.
//bool OurCheckAlarms( Zone *zone, const Image *pImage );
Monitor(
unsigned int p_id,
const char *p_name,
unsigned int p_server_id,
unsigned int p_storage_id,
int p_function,
bool p_enabled,
bool p_decoding_enabled,
const char *p_linked_monitors,
Camera *p_camera,
int p_orientation,
unsigned int p_deinterlacing,
const std::string &p_decoder_hwaccel_name,
const std::string &p_decoder_hwaccel_device,
int p_savejpegs,
VideoWriter p_videowriter,
std::string p_encoderparams,
bool p_record_audio,
const char *p_event_prefix,
const char *p_label_format,
const Coord &p_label_coord,
int label_size,
int p_image_buffer_count,
int p_warmup_count,
int p_pre_event_count,
int p_post_event_count,
int p_stream_replay_buffer,
int p_alarm_frame_count,
int p_section_length,
int p_min_section_length,
int p_frame_skip,
int p_motion_frame_skip,
double p_capture_max_fps,
double p_analysis_fps,
unsigned int p_analysis_update_delay,
int p_capture_delay,
int p_alarm_capture_delay,
int p_fps_report_interval,
int p_ref_blend_perc,
int p_alarm_ref_blend_perc,
bool p_track_motion,
int p_signal_check_points,
Rgb p_signal_check_colour,
bool p_embed_exif,
Purpose p_purpose,
int p_n_zones=0,
Zone *p_zones[]=0
);
~Monitor();
void AddZones( int p_n_zones, Zone *p_zones[] );
@ -436,17 +414,19 @@ public:
inline int ShmValid() const {
return shared_data && shared_data->valid;
}
Camera *getCamera();
inline unsigned int Id() const { return id; }
inline const char *Name() const { return name; }
inline unsigned int ServerId() { return server_id; }
inline Storage *getStorage() {
if ( ! storage ) {
storage = new Storage( storage_id );
storage = new Storage(storage_id);
}
return storage;
}
inline Function GetFunction() const { return function; }
inline zm_packetqueue * GetPacketQueue() const { return packetqueue; }
inline bool Enabled() const {
if ( function <= MONITOR )
return false;
@ -457,9 +437,15 @@ public:
}
inline const char *EventPrefix() const { return event_prefix; }
inline bool Ready() const {
if ( function <= MONITOR )
if ( function <= MONITOR ) {
Error("Should not be calling Ready if the function doesn't include motion detection");
return false;
}
if ( image_count >= ready_count ) {
return true;
}
Debug(2, "Not ready because image_count(%d) <= ready_count(%d)", image_count, ready_count);
return false;
return( image_count > ready_count );
}
inline bool Active() const {
if ( function <= MONITOR )
@ -467,6 +453,13 @@ public:
return( enabled && shared_data->active );
}
inline bool Exif() const { return embed_exif; }
inline bool RecordAudio() { return record_audio; }
/*
inline Purpose Purpose() { return purpose };
inline Purpose Purpose( Purpose p ) { purpose = p; };
*/
Orientation getOrientation() const;
unsigned int Width() const { return width; }
@ -476,10 +469,15 @@ public:
int GetOptSaveJPEGs() const { return savejpegs; }
VideoWriter GetOptVideoWriter() const { return videowriter; }
const std::vector<EncoderParameter_t>* GetOptEncoderParamsVec() const { return &encoderparamsvec; }
const std::string GetOptEncoderParams() const { return encoderparams; }
//const std::vector<EncoderParameter_t>* GetEncoderParams() const { return &encoderparamsvec; }
const std::string &GetEncoderOptions() const { return encoderparams; }
const int OutputCodec() const { return output_codec; }
const std::string &Encoder() const { return encoder; }
const std::string &OutputContainer() const { return output_container; }
uint64_t GetVideoWriterEventId() const { return video_store_data->current_event; }
void SetVideoWriterEventId( unsigned long long p_event_id ) { video_store_data->current_event = p_event_id; }
void SetVideoWriterEventId( uint64_t p_event_id ) { video_store_data->current_event = p_event_id; }
struct timeval GetVideoWriterStartTime() const { return video_store_data->recording; }
void SetVideoWriterStartTime(const struct timeval &t) { video_store_data->recording = t; }
@ -488,7 +486,7 @@ public:
int GetImageBufferCount() const { return image_buffer_count; };
State GetState() const;
int GetImage( int index=-1, int scale=100 );
Snapshot *getSnapshot() const;
ZMPacket *getSnapshot( int index=-1 ) const;
struct timeval GetTimestamp( int index=-1 ) const;
void UpdateAdaptiveSkip();
useconds_t GetAnalysisRate();
@ -500,12 +498,15 @@ public:
unsigned int GetLastWriteIndex() const;
uint64_t GetLastEventId() const;
double GetFPS() const;
void UpdateAnalysisFPS();
void UpdateCaptureFPS();
void ForceAlarmOn( int force_score, const char *force_case, const char *force_text="" );
void ForceAlarmOff();
void CancelForced();
TriggerState GetTriggerState() const { return (TriggerState)(trigger_data?trigger_data->trigger_state:TRIGGER_CANCEL); }
inline time_t getStartupTime() const { return shared_data->startup_time; }
inline void setStartupTime( time_t p_time ) { shared_data->startup_time = p_time; }
void get_ref_image();
int LabelSize() const { return label_size; }
@ -520,12 +521,14 @@ public:
int actionColour( int p_colour=-1 );
int actionContrast( int p_contrast=-1 );
int PrimeCapture() const;
int PrimeCapture();
int PreCapture() const;
int Capture();
int PostCapture() const;
int Close();
void CheckAction();
unsigned int DetectMotion( const Image &comp_image, Event::StringSet &zoneSet );
// DetectBlack seems to be unused. Check it on zm_monitor.cpp for more info.
//unsigned int DetectBlack( const Image &comp_image, Event::StringSet &zoneSet );
@ -554,7 +557,7 @@ public:
static int LoadFfmpegMonitors(const char *file, Monitor **&monitors, Purpose purpose);
#endif // HAVE_LIBAVFORMAT
static Monitor *Load(unsigned int id, bool load_zones, Purpose purpose);
static Monitor *Load(MYSQL_ROW dbrow, bool load_zones, Purpose purpose);
void Load(MYSQL_ROW dbrow, bool load_zones, Purpose purpose);
//void writeStreamImage( Image *image, struct timeval *timestamp, int scale, int mag, int x, int y );
//void StreamImages( int scale=100, int maxfps=10, time_t ttl=0, int msq_id=0 );
//void StreamImagesRaw( int scale=100, int maxfps=10, time_t ttl=0 );
@ -562,8 +565,11 @@ public:
#if HAVE_LIBAVCODEC
//void StreamMpeg( const char *format, int scale=100, int maxfps=10, int bitrate=100000 );
#endif // HAVE_LIBAVCODEC
double get_fps( ) const {
return fps;
double get_capture_fps( ) const {
return shared_data ? shared_data->capture_fps : 0.0;
}
double get_analysis_fps( ) const {
return shared_data ? shared_data->analysis_fps : 0.0;
}
};

View File

@ -27,13 +27,12 @@
#include <arpa/inet.h>
#include <glob.h>
const int MAX_SLEEP_USEC=1000000; // 1 sec
const int MAX_SLEEP_USEC = 1000000; // 1 sec
bool MonitorStream::checkSwapPath(const char *path, bool create_path) {
struct stat stat_buf;
if ( stat(path, &stat_buf) < 0 ) {
if ( create_path && errno == ENOENT ) {
if ( create_path and (errno == ENOENT) ) {
Debug(3, "Swap path '%s' missing, creating", path);
if ( mkdir(path, 0755) ) {
Error("Can't mkdir %s: %s", path, strerror(errno));
@ -73,7 +72,7 @@ bool MonitorStream::checkSwapPath(const char *path, bool create_path) {
return false;
}
return true;
} // end bool MonitorStream::checkSwapPath( const char *path, bool create_path )
} // end bool MonitorStream::checkSwapPath(const char *path, bool create_path)
void MonitorStream::processCommand(const CmdMsg *msg) {
Debug(2, "Got message, type %d, msg %d", msg->msg_type, msg->msg_data[0]);
@ -196,7 +195,7 @@ void MonitorStream::processCommand(const CmdMsg *msg) {
}
break;
case CMD_ZOOMOUT :
Debug( 1, "Got ZOOM OUT command" );
Debug(1, "Got ZOOM OUT command");
switch ( zoom ) {
case 500:
zoom = 400;
@ -240,6 +239,8 @@ void MonitorStream::processCommand(const CmdMsg *msg) {
int id;
int state;
double fps;
double capture_fps;
double analysis_fps;
int buffer_level;
int rate;
double delay;
@ -253,6 +254,8 @@ void MonitorStream::processCommand(const CmdMsg *msg) {
status_data.id = monitor->Id();
if ( ! monitor->ShmValid() ) {
status_data.fps = 0.0;
status_data.capture_fps = 0.0;
status_data.analysis_fps = 0.0;
status_data.state = Monitor::UNKNOWN;
//status_data.enabled = monitor->shared_data->active;
status_data.enabled = false;
@ -260,6 +263,8 @@ void MonitorStream::processCommand(const CmdMsg *msg) {
status_data.buffer_level = 0;
} else {
status_data.fps = monitor->GetFPS();
status_data.capture_fps = monitor->get_capture_fps();
status_data.analysis_fps = monitor->get_analysis_fps();
status_data.state = monitor->shared_data->state;
//status_data.enabled = monitor->shared_data->active;
status_data.enabled = monitor->trigger_data->trigger_state!=Monitor::TRIGGER_OFF;
@ -274,7 +279,10 @@ void MonitorStream::processCommand(const CmdMsg *msg) {
status_data.rate = replay_rate;
status_data.delay = TV_2_FLOAT(now) - TV_2_FLOAT(last_frame_timestamp);
status_data.zoom = zoom;
Debug(2, "Buffer Level:%d, Delayed:%d, Paused:%d, Rate:%d, delay:%.3f, Zoom:%d, Enabled:%d Forced:%d",
Debug(2, "fps: %.2f capture_fps: %.2f analysis_fps: %.2f Buffer Level:%d, Delayed:%d, Paused:%d, Rate:%d, delay:%.3f, Zoom:%d, Enabled:%d Forced:%d",
status_data.fps,
status_data.capture_fps,
status_data.analysis_fps,
status_data.buffer_level,
status_data.delayed,
status_data.paused,
@ -299,7 +307,7 @@ void MonitorStream::processCommand(const CmdMsg *msg) {
Debug(2, "Number of bytes sent to (%s): (%d)", rem_addr.sun_path, nbytes);
// quit after sending a status, if this was a quit request
if ( (MsgCommand)msg->msg_data[0]==CMD_QUIT ) {
if ( (MsgCommand)msg->msg_data[0] == CMD_QUIT ) {
zm_terminate = true;
Debug(2, "Quitting");
return;
@ -312,14 +320,15 @@ void MonitorStream::processCommand(const CmdMsg *msg) {
bool MonitorStream::sendFrame(const char *filepath, struct timeval *timestamp) {
bool send_raw = ((scale>=ZM_SCALE_BASE)&&(zoom==ZM_SCALE_BASE));
if ( type != STREAM_JPEG )
send_raw = false;
if ( !config.timestamp_on_capture && timestamp )
if (
( type != STREAM_JPEG )
||
( (!config.timestamp_on_capture) && timestamp )
)
send_raw = false;
if ( !send_raw ) {
Image temp_image(filepath);
return sendFrame(&temp_image, timestamp);
} else {
int img_buffer_size = 0;
@ -686,13 +695,18 @@ void MonitorStream::runStream() {
Debug(2, "Sending frame index: %d: frame_mod: %d frame count: %d paused(%d) delayed(%d)",
index, frame_mod, frame_count, paused, delayed);
// Send the next frame
Monitor::Snapshot *snap = &monitor->image_buffer[index];
//
ZMPacket *snap = &monitor->image_buffer[index];
if ( !sendFrame(snap->image, snap->timestamp) ) {
Debug(2, "sendFrame failed, quiting.");
zm_terminate = true;
break;
}
// Perhaps we should use NOW instead.
last_frame_timestamp = *(snap->timestamp);
//frame_sent = true;
//
if ( frame_count == 0 ) {
// Chrome will not display the first frame until it receives another.
// Firefox is fine. So just send the first frame twice.
@ -702,16 +716,6 @@ void MonitorStream::runStream() {
break;
}
}
// Perhaps we should use NOW instead.
last_frame_timestamp = *snap->timestamp;
/*
memcpy(
&last_frame_timestamp,
snap->timestamp,
sizeof(last_frame_timestamp)
);
*/
// frame_sent = true;
temp_read_index = temp_write_index;
} else {
@ -745,7 +749,7 @@ void MonitorStream::runStream() {
if ( buffered_playback && !paused ) {
if ( monitor->shared_data->valid ) {
if ( monitor->image_buffer[index].timestamp->tv_sec ) {
if ( monitor->shared_timestamps[index].tv_sec ) {
int temp_index = temp_write_index%temp_image_buffer_count;
Debug(2, "Storing frame %d", temp_index);
if ( !temp_image_buffer[temp_index].valid ) {
@ -757,8 +761,11 @@ void MonitorStream::runStream() {
temp_index);
temp_image_buffer[temp_index].valid = true;
}
memcpy(&(temp_image_buffer[temp_index].timestamp), monitor->image_buffer[index].timestamp, sizeof(temp_image_buffer[0].timestamp));
monitor->image_buffer[index].image->WriteJpeg(temp_image_buffer[temp_index].file_name, config.jpeg_file_quality);
temp_image_buffer[temp_index].timestamp = monitor->shared_timestamps[index];
monitor->image_buffer[index].image->WriteJpeg(
temp_image_buffer[temp_index].file_name,
config.jpeg_file_quality
);
temp_write_index = MOD_ADD(temp_write_index, 1, temp_image_buffer_count);
if ( temp_write_index == temp_read_index ) {
// Go back to live viewing
@ -844,7 +851,7 @@ void MonitorStream::SingleImage(int scale) {
int img_buffer_size = 0;
static JOCTET img_buffer[ZM_MAX_IMAGE_SIZE];
Image scaled_image;
Monitor::Snapshot *snap = monitor->getSnapshot();
ZMPacket *snap = &(monitor->image_buffer[monitor->shared_data->last_write_index]);
Image *snap_image = snap->image;
if ( scale != ZM_SCALE_BASE ) {
@ -862,11 +869,11 @@ void MonitorStream::SingleImage(int scale) {
"Content-Type: image/jpeg\r\n\r\n",
img_buffer_size);
fwrite(img_buffer, img_buffer_size, 1, stdout);
}
} // end void MonitorStream::SingleImage(int scale)
void MonitorStream::SingleImageRaw(int scale) {
Image scaled_image;
Monitor::Snapshot *snap = monitor->getSnapshot();
ZMPacket *snap = monitor->getSnapshot();
Image *snap_image = snap->image;
if ( scale != ZM_SCALE_BASE ) {
@ -883,7 +890,7 @@ void MonitorStream::SingleImageRaw(int scale) {
"Content-Type: image/x-rgb\r\n\r\n",
snap_image->Size());
fwrite(snap_image->Buffer(), snap_image->Size(), 1, stdout);
}
} // end void MonitorStream::SingleImageRaw(int scale)
#ifdef HAVE_ZLIB_H
void MonitorStream::SingleImageZip(int scale) {
@ -891,7 +898,7 @@ void MonitorStream::SingleImageZip(int scale) {
static Bytef img_buffer[ZM_MAX_IMAGE_SIZE];
Image scaled_image;
Monitor::Snapshot *snap = monitor->getSnapshot();
ZMPacket *snap = monitor->getSnapshot();
Image *snap_image = snap->image;
if ( scale != ZM_SCALE_BASE ) {
@ -909,5 +916,5 @@ void MonitorStream::SingleImageZip(int scale) {
"Content-Type: image/x-rgbz\r\n\r\n",
img_buffer_size);
fwrite(img_buffer, img_buffer_size, 1, stdout);
}
} // end void MonitorStream::SingleImageZip(int scale)
#endif // HAVE_ZLIB_H

View File

@ -59,8 +59,14 @@ class MonitorStream : public StreamBase {
public:
MonitorStream() :
temp_image_buffer(nullptr), temp_image_buffer_count(0), temp_read_index(0), temp_write_index(0),
ttl(0), playback_buffer(0), delayed(false), frame_count(0) {
temp_image_buffer(nullptr),
temp_image_buffer_count(0),
temp_read_index(0),
temp_write_index(0),
ttl(0),
playback_buffer(0),
delayed(false),
frame_count(0) {
}
void setStreamBuffer(int p_playback_buffer) {
playback_buffer = p_playback_buffer;

View File

@ -23,28 +23,285 @@
#include <sys/time.h>
using namespace std;
AVPixelFormat target_format = AV_PIX_FMT_NONE;
ZMPacket::ZMPacket( AVPacket *p ) {
frame = nullptr;
image = nullptr;
av_init_packet( &packet );
if ( zm_av_packet_ref( &packet, p ) < 0 ) {
Error("error refing packet");
}
gettimeofday( &timestamp, nullptr );
ZMPacket::ZMPacket() :
keyframe(0),
in_frame(nullptr),
out_frame(nullptr),
timestamp(nullptr),
buffer(nullptr),
image(nullptr),
analysis_image(nullptr),
score(-1),
codec_type(AVMEDIA_TYPE_UNKNOWN),
image_index(-1),
codec_imgsize(0)
{
av_init_packet(&packet);
packet.size = 0; // So we can detect whether it has been filled.
}
ZMPacket::ZMPacket( AVPacket *p, struct timeval *t ) {
frame = nullptr;
image = nullptr;
av_init_packet( &packet );
if ( zm_av_packet_ref( &packet, p ) < 0 ) {
ZMPacket::ZMPacket(ZMPacket &p) :
keyframe(0),
in_frame(nullptr),
out_frame(nullptr),
timestamp(nullptr),
buffer(nullptr),
image(nullptr),
analysis_image(nullptr),
score(-1),
codec_type(AVMEDIA_TYPE_UNKNOWN),
image_index(-1),
codec_imgsize(0)
{
av_init_packet(&packet);
if ( zm_av_packet_ref(&packet, &p.packet) < 0 ) {
Error("error refing packet");
}
timestamp = *t;
timestamp = new struct timeval;
*timestamp = *p.timestamp;
}
ZMPacket::~ZMPacket() {
zm_av_packet_unref( &packet );
zm_av_packet_unref(&packet);
if ( in_frame ) {
av_frame_free(&in_frame);
}
if ( out_frame ) {
av_frame_free(&out_frame);
}
if ( buffer ) {
av_freep(&buffer);
}
if ( analysis_image ) {
delete analysis_image;
analysis_image = nullptr;
}
if ( image ) {
delete image;
image = nullptr;
}
if ( timestamp ) {
delete timestamp;
timestamp = nullptr;
}
#if 0
if ( image ) {
if ( image->IsBufferHeld() ) {
// Don't free the mmap'd image
} else {
delete image;
image = nullptr;
delete timestamp;
timestamp = nullptr;
}
} else {
if ( timestamp ) {
delete timestamp;
timestamp = nullptr;
}
}
#endif
}
// deprecated
void ZMPacket::reset() {
zm_av_packet_unref(&packet);
if ( in_frame ) {
av_frame_free(&in_frame);
}
if ( out_frame ) {
av_frame_free(&out_frame);
}
if ( buffer ) {
av_freep(&buffer);
}
if ( analysis_image ) {
delete analysis_image;
analysis_image = nullptr;
}
#if 0
if ( (! image) && timestamp ) {
delete timestamp;
timestamp = NULL;
}
#endif
score = -1;
keyframe = 0;
}
int ZMPacket::decode(AVCodecContext *ctx) {
Debug(4, "about to decode video, image_index is (%d)", image_index);
if ( in_frame ) {
Error("Already have a frame?");
} else {
in_frame = zm_av_frame_alloc();
}
int ret = zm_send_packet_receive_frame(ctx, in_frame, packet);
if ( ret < 0 ) {
if ( AVERROR(EAGAIN) != ret ) {
Warning("Unable to receive frame : code %d %s.",
ret, av_make_error_string(ret).c_str());
}
av_frame_free(&in_frame);
return 0;
}
#if HAVE_LIBAVUTIL_HWCONTEXT_H
#if LIBAVCODEC_VERSION_CHECK(57, 89, 0, 89, 0)
if ( (ctx->sw_pix_fmt != in_frame->format) ) {
Debug(1, "Have different format %s != %s.",
av_get_pix_fmt_name(ctx->pix_fmt),
av_get_pix_fmt_name(ctx->sw_pix_fmt)
);
if ( target_format == AV_PIX_FMT_NONE and ctx->hw_frames_ctx and (image->Colours() == 4) ) {
// Look for rgb0 in list of supported formats
enum AVPixelFormat *formats;
if ( 0 <= av_hwframe_transfer_get_formats(
ctx->hw_frames_ctx,
AV_HWFRAME_TRANSFER_DIRECTION_FROM,
&formats,
0
) ) {
for (int i = 0; formats[i] != AV_PIX_FMT_NONE; i++) {
Debug(1, "Available dest formats %d %s",
formats[i],
av_get_pix_fmt_name(formats[i])
);
if ( formats[i] == AV_PIX_FMT_RGB0 ) {
target_format = formats[i];
break;
} // endif RGB0
} // end foreach support format
av_freep(&formats);
} // endif success getting list of formats
} // end if target_format not set
AVFrame *new_frame = zm_av_frame_alloc();
if ( target_format != AV_PIX_FMT_NONE ) {
if ( 1 and image ) {
if ( 0 > image->PopulateFrame(new_frame) ) {
delete new_frame;
new_frame = zm_av_frame_alloc();
delete image;
image = nullptr;
}
} else {
delete image;
image = nullptr;
}
new_frame->format = target_format;
}
/* retrieve data from GPU to CPU */
zm_dump_video_frame(in_frame, "Before hwtransfer");
ret = av_hwframe_transfer_data(new_frame, in_frame, 0);
if ( ret < 0 ) {
Error("Unable to transfer frame: %s, continuing",
av_make_error_string(ret).c_str());
av_frame_free(&in_frame);
av_frame_free(&new_frame);
return 0;
}
new_frame->pts = in_frame->pts;
zm_dump_video_frame(new_frame, "After hwtransfer");
if ( new_frame->format == AV_PIX_FMT_RGB0 ) {
new_frame->format = AV_PIX_FMT_RGBA;
zm_dump_video_frame(new_frame, "After hwtransfer setting to rgba");
}
av_frame_free(&in_frame);
in_frame = new_frame;
} else {
Debug(2, "Same pix format %s so not hwtransferring. sw_pix_fmt is %s",
av_get_pix_fmt_name(ctx->pix_fmt),
av_get_pix_fmt_name(ctx->sw_pix_fmt)
);
if ( image ) {
image->Assign(in_frame);
}
}
#endif
#endif
return 1;
} // end ZMPacket::decode
Image *ZMPacket::get_image(Image *i) {
if ( !in_frame ) {
Error("Can't get image without frame.. maybe need to decode first");
return nullptr;
}
if ( !image ) {
if ( !i ) {
Error("Need a pre-allocated image buffer");
return nullptr;
}
image = i;
}
image->Assign(in_frame);
return image;
}
Image *ZMPacket::set_image(Image *i) {
image = i;
return image;
}
AVPacket *ZMPacket::set_packet(AVPacket *p) {
if ( zm_av_packet_ref(&packet, p) < 0 ) {
Error("error refing packet");
}
//dumpPacket(&packet, "zmpacket:");
gettimeofday(timestamp, nullptr);
keyframe = p->flags & AV_PKT_FLAG_KEY;
return &packet;
}
AVFrame *ZMPacket::get_out_frame(const AVCodecContext *ctx) {
if ( !out_frame ) {
out_frame = zm_av_frame_alloc();
if ( !out_frame ) {
Error("Unable to allocate a frame");
return nullptr;
}
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
codec_imgsize = av_image_get_buffer_size(
ctx->pix_fmt,
ctx->width,
ctx->height, 32);
buffer = (uint8_t *)av_malloc(codec_imgsize);
av_image_fill_arrays(
out_frame->data,
out_frame->linesize,
buffer,
ctx->pix_fmt,
ctx->width,
ctx->height,
32);
#else
codec_imgsize = avpicture_get_size(
ctx->pix_fmt,
ctx->width,
ctx->height);
buffer = (uint8_t *)av_malloc(codec_imgsize);
avpicture_fill(
(AVPicture *)out_frame,
buffer,
ctx->pix_fmt,
ctx->width,
ctx->height
);
#endif
out_frame->width = ctx->width;
out_frame->height = ctx->height;
out_frame->format = ctx->pix_fmt;
}
return out_frame;
} // end AVFrame *ZMPacket::get_out_frame( AVCodecContext *ctx );

View File

@ -27,20 +27,57 @@ extern "C" {
#ifdef __FreeBSD__
#include <sys/time.h>
#endif // __FreeBSD__
#include "zm_image.h"
#include "zm_thread.h"
#include <mutex>
class ZMPacket {
public:
std::recursive_mutex mutex;
int keyframe;
AVPacket packet; // Input packet, undecoded
AVFrame *frame; // Input image, decoded
Image *image; // Our internal image oject representing this frame
struct timeval timestamp;
AVFrame *in_frame; // Input image, decoded Theoretically only filled if needed.
AVFrame *out_frame; // output image, Only filled if needed.
struct timeval *timestamp;
uint8_t *buffer; // buffer used in image
Image *image;
Image *analysis_image;
int score;
AVMediaType codec_type;
int image_index;
int codec_imgsize;
public:
AVPacket *av_packet() { return &packet; }
ZMPacket( AVPacket *packet, struct timeval *timestamp );
explicit ZMPacket( AVPacket *packet );
AVPacket *set_packet(AVPacket *p) ;
AVFrame *av_frame() { return out_frame; }
Image *get_image(Image *i=nullptr);
Image *set_image(Image *);
int is_keyframe() { return keyframe; };
int decode( AVCodecContext *ctx );
void reset();
explicit ZMPacket(Image *image);
explicit ZMPacket(ZMPacket &packet);
ZMPacket();
~ZMPacket();
void lock() {
Debug(4,"Locking packet %d", this->image_index);
mutex.lock();
Debug(4,"packet %d locked", this->image_index);
};
bool trylock() {
Debug(4,"TryLocking packet %d", this->image_index);
return mutex.try_lock();
};
void unlock() {
Debug(4,"packet %d unlocked", this->image_index);
mutex.unlock();
};
AVFrame *get_out_frame( const AVCodecContext *ctx );
int get_codec_imgsize() { return codec_imgsize; };
};
#endif /* ZM_PACKET_H */

View File

@ -17,204 +17,329 @@
//along with ZoneMinder. If not, see <http://www.gnu.org/licenses/>.
// PacketQueue must know about all iterators and manage them
#include "zm_packetqueue.h"
#include "zm_ffmpeg.h"
#include "zm_signal.h"
#include <sys/time.h>
#include "zm_time.h"
zm_packetqueue::zm_packetqueue( int p_max_stream_id ) {
max_stream_id = p_max_stream_id;
zm_packetqueue::zm_packetqueue(
int video_image_count,
int p_video_stream_id,
int p_audio_stream_id
):
video_stream_id(p_video_stream_id),
max_video_packet_count(video_image_count),
deleting(false)
{
max_stream_id = p_video_stream_id > p_audio_stream_id ? p_video_stream_id : p_audio_stream_id;
packet_counts = new int[max_stream_id+1];
for ( int i=0; i <= max_stream_id; ++i )
packet_counts[i] = 0;
}
zm_packetqueue::~zm_packetqueue() {
clearQueue();
deleting = true;
// Anyone waiting should immediately check deleting
condition.notify_all();
/* zma might be waiting. Must have exclusive access */
while ( !mutex.try_lock() ) {
Debug(4, "Waiting for exclusive access");
condition.notify_all();
}
while ( !pktQueue.empty() ) {
ZMPacket *packet = pktQueue.front();
pktQueue.pop_front();
delete packet;
}
delete[] packet_counts;
Debug(4, "Done in destructor");
packet_counts = nullptr;
mutex.unlock();
condition.notify_all();
}
bool zm_packetqueue::queuePacket(ZMPacket* zm_packet) {
/* Enqueues the given packet. Will maintain the it pointer and image packet counts.
* If we have reached our max image packet count, it will pop off as many packets as are needed.
* Thus it will ensure that the same packet never gets queued twice.
*/
if (
( zm_packet->packet.dts == AV_NOPTS_VALUE )
||
( packet_counts[zm_packet->packet.stream_index] <= 0 )
bool zm_packetqueue::queuePacket(ZMPacket* add_packet) {
Debug(4, "packetqueue queuepacket %p %d", add_packet, add_packet->image_index);
mutex.lock();
pktQueue.push_back(add_packet);
packet_counts[add_packet->packet.stream_index] += 1;
Debug(1, "packet counts for %d is %d",
add_packet->packet.stream_index,
packet_counts[add_packet->packet.stream_index]);
for (
std::list<packetqueue_iterator *>::iterator iterators_it = iterators.begin();
iterators_it != iterators.end();
++iterators_it
) {
Debug(2,"Inserting packet with dts %" PRId64 " because queue %d is empty (queue size: %d) or invalid dts",
zm_packet->packet.dts, zm_packet->packet.stream_index, packet_counts[zm_packet->packet.stream_index]
);
// No dts value, can't so much with it
pktQueue.push_back(zm_packet);
packet_counts[zm_packet->packet.stream_index] += 1;
return true;
packetqueue_iterator *iterator_it = *iterators_it;
if ( *iterator_it == pktQueue.end() ) {
Debug(4, "pointing it %p to back", iterator_it);
--(*iterator_it);
}
} // end foreach iterator
#if 0
std::list<ZMPacket *>::reverse_iterator it = pktQueue.rbegin();
// Scan through the queue looking for a packet for our stream with a dts <= ours.
while ( it != pktQueue.rend() ) {
AVPacket *av_packet = &((*it)->packet);
Debug(2, "Looking at packet with stream index (%d) with dts %" PRId64,
av_packet->stream_index, av_packet->dts);
if ( av_packet->stream_index == zm_packet->packet.stream_index ) {
if (
( av_packet->dts != AV_NOPTS_VALUE )
&&
( av_packet->dts <= zm_packet->packet.dts)
// Only do queueCleaning if we are adding a video keyframe, so that we guarantee that there is one.
// No good. Have to satisfy two conditions:
// 1. packetqueue starts with a video keyframe
// 2. Have minimum # of video packets
// 3. No packets can be locked
// 4. No iterator can point to one of the packets
//
// So start at the beginning, counting video packets until the next keyframe.
// Then if deleting those packets doesn't break 1 and 2, then go ahead and delete them.
if ( add_packet->packet.stream_index == video_stream_id
and
add_packet->keyframe
and
(packet_counts[video_stream_id] > max_video_packet_count)
) {
Debug(2, "break packet with stream index (%d) with dts %" PRId64,
(*it)->packet.stream_index, (*it)->packet.dts);
break;
}
} else { // Not same stream, compare timestamps
if ( tvDiffUsec(((*it)->timestamp, zm_packet->timestamp) ) <= 0 ) {
Debug(2, "break packet with stream index (%d) with dts %" PRId64,
(*it)->packet.stream_index, (*it)->packet.dts);
break;
}
}
packetqueue_iterator it = pktQueue.begin();
int video_stream_packets = 0;
// Since we have many packets in the queue, we should NOT be pointing at end so don't need to test for that
do {
it++;
} // end while not the end of the queue
if ( it != pktQueue.rend() ) {
Debug(2, "Found packet with stream index (%d) with dts %" PRId64 " <= %" PRId64,
(*it)->packet.stream_index, (*it)->packet.dts, zm_packet->packet.dts);
if ( it == pktQueue.rbegin() ) {
Debug(2,"Inserting packet with dts %" PRId64 " at end", zm_packet->packet.dts);
// No dts value, can't so much with it
pktQueue.push_back(zm_packet);
packet_counts[zm_packet->packet.stream_index] += 1;
return true;
ZMPacket *zm_packet = *it;
Debug(1, "Checking packet to see if we can delete them");
if ( zm_packet->packet.stream_index == video_stream_id ) {
if ( zm_packet->keyframe ) {
Debug(1, "Have a video keyframe so breaking out");
if ( !zm_packet->trylock() ) {
Debug(1, "Have locked packet %d", zm_packet->image_index);
video_stream_packets = max_video_packet_count;
}
// Convert to a forward iterator so that we can insert at end
std::list<ZMPacket *>::iterator f_it = it.base();
Debug(2, "Insert packet before packet with stream index (%d) with dts %" PRId64 " for dts %" PRId64,
(*f_it)->packet.stream_index, (*f_it)->packet.dts, zm_packet->packet.dts);
pktQueue.insert(f_it, zm_packet);
packet_counts[zm_packet->packet.stream_index] += 1;
return true;
zm_packet->unlock();
break;
}
Debug(1,"Unable to insert packet for stream %d with dts %" PRId64 " into queue.",
zm_packet->packet.stream_index, zm_packet->packet.dts);
#endif
pktQueue.push_back(zm_packet);
packet_counts[zm_packet->packet.stream_index] += 1;
video_stream_packets ++;
}
if ( !zm_packet->trylock() ) {
Debug(1, "Have locked packet %d", zm_packet->image_index);
video_stream_packets = max_video_packet_count;
break;
}
zm_packet->unlock();
for (
std::list<packetqueue_iterator *>::iterator iterators_it = iterators.begin();
iterators_it != iterators.end();
++iterators_it
) {
packetqueue_iterator *iterator_it = *iterators_it;
// Have to check each iterator and make sure it doesn't point to the packet we are about to delete
if ( *(*iterator_it) == zm_packet ) {
Debug(4, "Found IT at beginning of queue. Threads not keeping up");
video_stream_packets = max_video_packet_count;
}
} // end foreach iterator
} while ( *it != add_packet );
Debug(1, "Resulting video_stream_packets count %d, %d > %d, pointing at latest packet? %d",
video_stream_packets,
packet_counts[video_stream_id] - video_stream_packets, max_video_packet_count,
( *it == add_packet )
);
if (
packet_counts[video_stream_id] - video_stream_packets > max_video_packet_count
and
( *it != add_packet )
) {
Debug(1, "Deleting packets");
// It is enough to delete the packets tested above. A subsequent queuePacket can clear a second set
while ( pktQueue.begin() != it ) {
ZMPacket *zm_packet = *pktQueue.begin();
if ( !zm_packet ) {
Error("NULL zm_packet in queue");
continue;
}
Debug(1, "Deleting a packet with stream index:%d image_index:%d with keyframe:%d, video frames in queue:%d max: %d, queuesize:%d",
zm_packet->packet.stream_index, zm_packet->image_index, zm_packet->keyframe, packet_counts[video_stream_id], max_video_packet_count, pktQueue.size());
pktQueue.pop_front();
packet_counts[zm_packet->packet.stream_index] -= 1;
delete zm_packet;
}
} // end if have at least max_video_packet_count video packets remaining
} // end if this is a video keyframe
mutex.unlock();
// We signal on every packet because someday we may analyze sound
Debug(4, "packetqueue queuepacket, unlocked signalling");
condition.notify_all();
return true;
} // end bool zm_packetqueue::queuePacket(ZMPacket* zm_packet)
bool zm_packetqueue::queuePacket(AVPacket* av_packet) {
ZMPacket *zm_packet = new ZMPacket(av_packet);
return queuePacket(zm_packet);
}
ZMPacket* zm_packetqueue::popPacket( ) {
Debug(4, "pktQueue size %d", pktQueue.size());
if ( pktQueue.empty() ) {
return nullptr;
}
Debug(4, "poPacket Mutex locking");
mutex.lock();
ZMPacket *zm_packet = pktQueue.front();
for (
std::list<packetqueue_iterator *>::iterator iterators_it = iterators.begin();
iterators_it != iterators.end();
++iterators_it
) {
packetqueue_iterator *iterator_it = *iterators_it;
// Have to check each iterator and make sure it doesn't point to the packet we are about to delete
if ( *(*iterator_it) == zm_packet ) {
Debug(4, "Bumping it because it is at the front that we are deleting");
++(*iterators_it);
}
} // end foreach iterator
zm_packet->lock();
ZMPacket *packet = pktQueue.front();
pktQueue.pop_front();
packet_counts[packet->packet.stream_index] -= 1;
packet_counts[zm_packet->packet.stream_index] -= 1;
return packet;
}
mutex.unlock();
return zm_packet;
} // popPacket
/* Keeps frames_to_keep frames of the provided stream, which theoretically is the video stream
* Basically it starts at the end, moving backwards until it finds the minimum video frame.
* Then it should probably move forward to find a keyframe. The first video frame must always be a keyframe.
* So really frames_to_keep is a maximum which isn't so awesome.. maybe we should go back farther to find the keyframe in which case
* frames_to_keep in a minimum
*/
unsigned int zm_packetqueue::clearQueue(unsigned int frames_to_keep, int stream_id) {
Debug(3, "Clearing all but %d frames, queue has %d", frames_to_keep, pktQueue.size());
frames_to_keep += 1;
if ( pktQueue.empty() ) {
Debug(3, "Queue is empty");
return 0;
}
std::list<ZMPacket *>::reverse_iterator it;
ZMPacket *packet = nullptr;
// If size is <= frames_to_keep since it could contain audio, we can't possibly do anything
if ( pktQueue.size() <= frames_to_keep ) {
return 0;
}
Debug(5, "Locking in clearQueue");
mutex.lock();
for ( it = pktQueue.rbegin(); it != pktQueue.rend() && frames_to_keep; ++it ) {
ZMPacket *zm_packet = *it;
packetqueue_iterator it = pktQueue.end()--; // point to last element instead of end
ZMPacket *zm_packet = nullptr;
while ( (it != pktQueue.begin()) and frames_to_keep ) {
zm_packet = *it;
AVPacket *av_packet = &(zm_packet->packet);
Debug(4, "Looking at packet with stream index (%d) with keyframe (%d), frames_to_keep is (%d)",
av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ), frames_to_keep );
Debug(3, "Looking at packet with stream index (%d) with keyframe(%d), Image_index(%d) frames_to_keep is (%d)",
av_packet->stream_index, zm_packet->keyframe, zm_packet->image_index, frames_to_keep );
// Want frames_to_keep video keyframes. Otherwise, we may not have enough
if ( ( av_packet->stream_index == stream_id) ) {
//&& ( av_packet->flags & AV_PKT_FLAG_KEY ) ) {
if ( av_packet->stream_index == stream_id ) {
frames_to_keep --;
}
it --;
}
// Make sure we start on a keyframe
for ( ; it != pktQueue.rend(); ++it ) {
ZMPacket *zm_packet = *it;
AVPacket *av_packet = &(zm_packet->packet);
// Either at beginning or frames_to_keep == 0
Debug(5, "Looking for keyframe at packet with stream index (%d) with keyframe (%d), frames_to_keep is (%d)",
av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ), frames_to_keep);
// Want frames_to_keep video keyframes. Otherwise, we may not have enough
if ( (av_packet->stream_index == stream_id) && (av_packet->flags & AV_PKT_FLAG_KEY) ) {
Debug(4, "Found keyframe at packet with stream index (%d) with keyframe (%d), frames_to_keep is (%d)",
av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ), frames_to_keep);
break;
}
}
if ( it == pktQueue.begin() ) {
if ( frames_to_keep ) {
Debug(3, "Hit end of queue, still need (%d) video frames", frames_to_keep);
Warning("Couldn't remove any packets, needed %d", frames_to_keep);
}
if ( it != pktQueue.rend() ) {
// We want to keep this packet, so advance to the next
++it;
mutex.unlock();
return 0;
}
unsigned int delete_count = 0;
while ( it != pktQueue.rend() ) {
Debug(4, "Deleting a packet from the front, count is (%d)", delete_count);
packet = pktQueue.front();
int delete_count = 0;
// Else not at beginning, are pointing at packet before the last video packet
while ( pktQueue.begin() != it ) {
Debug(4, "Deleting a packet from the front, count is (%d), queue size is %d",
delete_count, pktQueue.size());
zm_packet = pktQueue.front();
for (
std::list<packetqueue_iterator *>::iterator iterators_it = iterators.begin();
iterators_it != iterators.end();
++iterators_it
) {
packetqueue_iterator *iterator_it = *iterators_it;
// Have to check each iterator and make sure it doesn't point to the packet we are about to delete
if ( *(*iterator_it) == zm_packet ) {
Debug(4, "Bumping it because it is at the front that we are deleting");
++(*iterators_it);
}
} // end foreach iterator
packet_counts[zm_packet->packet.stream_index] --;
pktQueue.pop_front();
packet_counts[packet->packet.stream_index] -= 1;
delete packet;
//if ( zm_packet->image_index == -1 )
delete zm_packet;
delete_count += 1;
}
packet = nullptr; // tidy up for valgrind
} // while our iterator is not the first packet
zm_packet = nullptr; // tidy up for valgrind
Debug(3, "Deleted %d packets, %d remaining", delete_count, pktQueue.size());
mutex.unlock();
return delete_count;
Debug(3, "Deleted packets, resulting size is %d", pktQueue.size());
mutex.unlock();
return delete_count;
} // end unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream_id )
void zm_packetqueue::clearQueue() {
Debug(4, "Clocking in clearQueue");
mutex.lock();
ZMPacket *packet = nullptr;
int delete_count = 0;
while ( !pktQueue.empty() ) {
packet = pktQueue.front();
packet_counts[packet->packet.stream_index] -= 1;
pktQueue.pop_front();
//if ( packet->image_index == -1 )
delete packet;
delete_count += 1;
}
Debug(3, "Deleted (%d) packets", delete_count );
for (
std::list<packetqueue_iterator *>::iterator iterators_it = iterators.begin();
iterators_it != iterators.end();
++iterators_it
) {
packetqueue_iterator *iterator_it = *iterators_it;
*iterator_it = pktQueue.begin();
} // end foreach iterator
mutex.unlock();
}
// clear queue keeping only specified duration of video -- return number of pkts removed
unsigned int zm_packetqueue::clearQueue(struct timeval *duration, int streamId) {
if (pktQueue.empty()) {
if ( pktQueue.empty() ) {
return 0;
}
struct timeval keep_from;
std::list<ZMPacket *>::reverse_iterator it;
it = pktQueue.rbegin();
Debug(4, "Locking in clearQueue");
mutex.lock();
timersub(&(*it)->timestamp, duration, &keep_from);
struct timeval keep_from;
std::list<ZMPacket *>::reverse_iterator it = pktQueue.rbegin();
struct timeval *t = (*it)->timestamp;
timersub(t, duration, &keep_from);
++it;
Debug(3, "Looking for frame before queue keep time with stream id (%d), queue has %d packets",
@ -222,18 +347,22 @@ unsigned int zm_packetqueue::clearQueue(struct timeval *duration, int streamId)
for ( ; it != pktQueue.rend(); ++it) {
ZMPacket *zm_packet = *it;
AVPacket *av_packet = &(zm_packet->packet);
if (av_packet->stream_index == streamId
&& timercmp( &zm_packet->timestamp, &keep_from, <= )) {
if (
(av_packet->stream_index == streamId)
and
timercmp(zm_packet->timestamp, &keep_from, <=)
) {
Debug(3, "Found frame before keep time with stream index %d at %d.%d",
av_packet->stream_index,
zm_packet->timestamp.tv_sec,
zm_packet->timestamp.tv_usec);
zm_packet->timestamp->tv_sec,
zm_packet->timestamp->tv_usec);
break;
}
}
if (it == pktQueue.rend()) {
if ( it == pktQueue.rend() ) {
Debug(1, "Didn't find a frame before queue preserve time. keeping all");
mutex.unlock();
return 0;
}
@ -241,31 +370,49 @@ unsigned int zm_packetqueue::clearQueue(struct timeval *duration, int streamId)
for ( ; it != pktQueue.rend(); ++it) {
ZMPacket *zm_packet = *it;
AVPacket *av_packet = &(zm_packet->packet);
if (av_packet->flags & AV_PKT_FLAG_KEY
&& av_packet->stream_index == streamId) {
if (
(av_packet->flags & AV_PKT_FLAG_KEY)
and
(av_packet->stream_index == streamId)
) {
Debug(3, "Found keyframe before start with stream index %d at %d.%d",
av_packet->stream_index,
zm_packet->timestamp.tv_sec,
zm_packet->timestamp.tv_usec );
zm_packet->timestamp->tv_sec,
zm_packet->timestamp->tv_usec );
break;
}
}
if ( it == pktQueue.rend() ) {
Debug(1, "Didn't find a keyframe before event starttime. keeping all" );
mutex.unlock();
return 0;
}
unsigned int deleted_frames = 0;
ZMPacket *zm_packet = nullptr;
while (distance(it, pktQueue.rend()) > 1) {
while ( distance(it, pktQueue.rend()) > 1 ) {
zm_packet = pktQueue.front();
for (
std::list<packetqueue_iterator *>::iterator iterators_it = iterators.begin();
iterators_it != iterators.end();
++iterators_it
) {
packetqueue_iterator *iterator_it = *iterators_it;
// Have to check each iterator and make sure it doesn't point to the packet we are about to delete
if ( *(*iterator_it) == zm_packet ) {
Debug(4, "Bumping it because it is at the front that we are deleting");
++(*iterators_it);
}
} // end foreach iterator
pktQueue.pop_front();
packet_counts[zm_packet->packet.stream_index] -= 1;
//if ( zm_packet->image_index == -1 )
delete zm_packet;
deleted_frames += 1;
}
zm_packet = nullptr;
Debug(3, "Deleted %d frames", deleted_frames);
mutex.unlock();
return deleted_frames;
}
@ -274,119 +421,161 @@ unsigned int zm_packetqueue::size() {
return pktQueue.size();
}
int zm_packetqueue::packet_count( int stream_id ) {
int zm_packetqueue::packet_count(int stream_id) {
return packet_counts[stream_id];
} // end int zm_packetqueue::packet_count( int stream_id )
} // end int zm_packetqueue::packet_count(int stream_id)
// Clear packets before the given timestamp.
// Must also take into account pre_event_count frames
void zm_packetqueue::clear_unwanted_packets(
timeval *recording_started,
int pre_event_count,
int mVideoStreamId) {
// Need to find the keyframe <= recording_started. Can get rid of audio packets.
if ( pktQueue.empty() )
return;
// Step 1 - find frame <= recording_started.
// Step 2 - go back pre_event_count
// Step 3 - find a keyframe
// Step 4 - pop packets until we get to the packet in step 3
std::list<ZMPacket *>::reverse_iterator it;
// Returns a packet. Packet will be locked
ZMPacket *zm_packetqueue::get_packet(packetqueue_iterator *it) {
if ( deleting or zm_terminate )
return nullptr;
// Step 1 - find frame <= recording_started.
Debug(3, "Looking for frame before start (%d.%d) recording stream id (%d), queue has %d packets",
recording_started->tv_sec, recording_started->tv_usec, mVideoStreamId, pktQueue.size());
for ( it = pktQueue.rbegin(); it != pktQueue.rend(); ++ it ) {
ZMPacket *zm_packet = *it;
AVPacket *av_packet = &(zm_packet->packet);
if (
( av_packet->stream_index == mVideoStreamId )
&&
timercmp( &(zm_packet->timestamp), recording_started, <= )
Debug(4, "Locking in get_packet using it %p queue end? %d, packet %p",
*it, (*it == pktQueue.end()), *(*it));
std::unique_lock<std::mutex> lck(mutex);
Debug(4, "Have Lock in get_packet");
while ( (!pktQueue.size()) or (*it == pktQueue.end()) ) {
if ( deleting or zm_terminate )
return nullptr;
Debug(2, "waiting. Queue size %d it == end? %d", pktQueue.size(), (*it == pktQueue.end()));
condition.wait(lck);
}
if ( deleting or zm_terminate )
return nullptr;
ZMPacket *p = *(*it);
if ( !p ) {
Error("Null p?!");
return nullptr;
}
Debug(3, "get_packet %p image_index: %d, about to lock packet", p, p->image_index);
while ( !(zm_terminate or deleting) and !p->trylock() ) {
Debug(3, "waiting. Queue size %d it == end? %d", pktQueue.size(), ( *it == pktQueue.end() ) );
condition.wait(lck);
}
Debug(2, "Locked packet, unlocking packetqueue mutex");
return p;
} // end ZMPacket *zm_packetqueue::get_packet(it)
bool zm_packetqueue::increment_it(packetqueue_iterator *it) {
Debug(2, "Incrementing %p, queue size %d, end? %d", it, pktQueue.size(), ((*it) == pktQueue.end()));
if ( (*it) == pktQueue.end() ) {
return false;
}
++(*it);
if ( *it != pktQueue.end() ) {
Debug(2, "Incrementing %p, %p still not at end %p, so returning true", it, *it, pktQueue.end());
return true;
}
return false;
} // end bool zm_packetqueue::increment_it(packetqueue_iterator *it)
// Increment it only considering packets for a given stream
bool zm_packetqueue::increment_it(packetqueue_iterator *it, int stream_id) {
Debug(2, "Incrementing %p, queue size %d, end? %d", it, pktQueue.size(), (*it == pktQueue.end()));
if ( *it == pktQueue.end() ) {
return false;
}
do {
++(*it);
} while ( (*it != pktQueue.end()) and ( (*(*it))->packet.stream_index != stream_id) );
if ( *it != pktQueue.end() ) {
Debug(2, "Incrementing %p, still not at end, so incrementing", it);
return true;
}
return false;
} // end bool zm_packetqueue::increment_it(packetqueue_iterator *it)
std::list<ZMPacket *>::iterator zm_packetqueue::get_event_start_packet_it(
std::list<ZMPacket *>::iterator snapshot_it,
unsigned int pre_event_count
) {
Debug(3, "Found frame before start with stream index %d at %d.%d",
av_packet->stream_index,
zm_packet->timestamp.tv_sec,
zm_packet->timestamp.tv_usec);
std::list<ZMPacket *>::iterator it = snapshot_it;
dumpPacket(&((*it)->packet));
// Step one count back pre_event_count frames as the minimum
// Do not assume that snapshot_it is video
// snapshot it might already point to the beginning
while ( ( it != pktQueue.begin() ) and pre_event_count ) {
Debug(1, "Previous packet pre_event_count %d stream_index %d keyframe %d", pre_event_count, (*it)->packet.stream_index, (*it)->keyframe);
dumpPacket(&((*it)->packet));
if ( (*it)->packet.stream_index == video_stream_id ) {
pre_event_count --;
if ( ! pre_event_count )
break;
}
Debug(3, "Not Found frame before start with stream index %d at %d.%d",
av_packet->stream_index,
zm_packet->timestamp.tv_sec,
zm_packet->timestamp.tv_usec);
it--;
}
// it either points to beginning or we have seen pre_event_count video packets.
if ( it == pktQueue.rend() ) {
Info("Didn't find a frame before event starttime. keeping all");
return;
}
Debug(1, "Seeking back %d frames", pre_event_count);
for ( ; pre_event_count && (it != pktQueue.rend()); ++ it ) {
ZMPacket *zm_packet = *it;
AVPacket *av_packet = &(zm_packet->packet);
if ( av_packet->stream_index == mVideoStreamId ) {
--pre_event_count;
}
}
if ( it == pktQueue.rend() ) {
Debug(1, "ran out of pre_event frames before event starttime. keeping all");
return;
}
Debug(3, "Looking for keyframe");
for ( ; it != pktQueue.rend(); ++ it ) {
ZMPacket *zm_packet = *it;
AVPacket *av_packet = &(zm_packet->packet);
if (
( av_packet->flags & AV_PKT_FLAG_KEY )
&&
( av_packet->stream_index == mVideoStreamId )
) {
Debug(3, "Found keyframe before start with stream index %d at %d.%d",
av_packet->stream_index,
zm_packet->timestamp.tv_sec,
zm_packet->timestamp.tv_usec );
break;
}
}
if ( it == pktQueue.rend() ) {
Debug(1, "Didn't find a keyframe before event starttime. keeping all" );
return;
}
ZMPacket *zm_packet = *it;
AVPacket *av_packet = &(zm_packet->packet);
Debug(3, "Found packet before start with stream index (%d) with keyframe (%d), distance(%d), size(%d)",
av_packet->stream_index,
( av_packet->flags & AV_PKT_FLAG_KEY ),
distance( it, pktQueue.rend() ),
pktQueue.size() );
unsigned int deleted_frames = 0;
ZMPacket *packet = nullptr;
while ( distance(it, pktQueue.rend()) > 1 ) {
//while ( pktQueue.rend() != it ) {
packet = pktQueue.front();
pktQueue.pop_front();
packet_counts[packet->packet.stream_index] -= 1;
delete packet;
deleted_frames += 1;
}
packet = nullptr; // tidy up for valgrind
zm_packet = pktQueue.front();
av_packet = &(zm_packet->packet);
if ( ( ! ( av_packet->flags & AV_PKT_FLAG_KEY ) ) || ( av_packet->stream_index != mVideoStreamId ) ) {
Error( "Done looking for keyframe. Deleted %d frames. Remaining frames in queue: %d stream of head packet is (%d), keyframe (%d), distance(%d), packets(%d)",
deleted_frames, pktQueue.size(), av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ), distance( it, pktQueue.rend() ), pktQueue.size() );
if ( it == pktQueue.begin() ) {
Debug(1, "Hit begin");
// hit end, the first packet in the queue should ALWAYS be a video keyframe.
// So we should be able to return it.
if ( pre_event_count ) {
if ( (*it)->image_index < (int)pre_event_count ) {
// probably just starting up
Debug(1, "Hit end of packetqueue before satisfying pre_event_count. Needed %d more video frames", pre_event_count);
} else {
Debug(1, "Done looking for keyframe. Deleted %d frames. Remaining frames in queue: %d stream of head packet is (%d), keyframe (%d), distance(%d), packets(%d)",
deleted_frames, pktQueue.size(), av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ), distance( it, pktQueue.rend() ), pktQueue.size() );
Warning("Hit end of packetqueue before satisfying pre_event_count. Needed %d more video frames", pre_event_count);
}
} // end void zm_packetqueue::clear_unwanted_packets( timeval *recording_started, int mVideoStreamId )
dumpPacket(&((*it)->packet));
}
return it;
}
// Not at beginning, so must be pointing at a video keyframe or maybe pre_event_count == 0
if ( (*it)->keyframe ) {
dumpPacket(&((*it)->packet), "Found video keyframe, Returning");
return it;
}
while ( it-- != pktQueue.begin() ) {
dumpPacket(&((*it)->packet), "No keyframe");
if ( (*it)->packet.stream_index == video_stream_id and (*it)->keyframe )
return it; // Success
}
if ( !(*it)->keyframe ) {
Warning("Hit end of packetqueue before satisfying pre_event_count. Needed %d more video frames", pre_event_count);
}
return it;
#if 0
std::list<ZMPacket *>::iterator it = snapshot_it.base();
// Step one count back pre_event_count frames as the minimum
// Do not assume that snapshot_it is video
while ( ( it++ != pktQueue.rend() ) and pre_event_count ) {
// Is video, maybe should compare stream_id instead
if ( *it->image_index != -1 ) {
pre_event_count --;
}
}
if ( it == pktQueue.rend() ) {
// hit end, the first packet in the queue should ALWAYS be a video keyframe.
// So we should be able to return it.
if ( pre_event_count )
Warning("Hit end of packetqueue before satisfying pre_event_count. Needed %d more video frames", pre_event_count);
return it.base();
}
if ( *it->keyframe ) {
return (it++).base();
}
while ( ( it++ != pktQueue.rend() ) and ! (*it)->keyframe ) { }
if ( it == pktQueue.rend() ) {
// hit end, the first packet in the queue should ALWAYS be a video keyframe.
// So we should be able to return it.
if ( pre_event_count )
Warning("Hit end of packetqueue before satisfying pre_event_count. Needed %d more video frames", pre_event_count);
return it.base();
}
return (it++).base();
#endif
}
void zm_packetqueue::dumpQueue() {
std::list<ZMPacket *>::reverse_iterator it;
@ -396,3 +585,43 @@ void zm_packetqueue::dumpQueue() {
dumpPacket(av_packet);
}
}
/* Returns an iterator to the first video keyframe in the queue.
* nullptr if no keyframe video packet exists.
*/
packetqueue_iterator * zm_packetqueue::get_video_it(bool wait) {
packetqueue_iterator *it = new packetqueue_iterator;
iterators.push_back(it);
std::unique_lock<std::mutex> lck(mutex);
*it = pktQueue.begin();
if ( wait ) {
while ( ((! pktQueue.size()) or (*it == pktQueue.end())) and !zm_terminate and !deleting ) {
Debug(2, "waiting. Queue size %d it == end? %d", pktQueue.size(), ( *it == pktQueue.end() ) );
condition.wait(lck);
*it = pktQueue.begin();
}
if ( deleting or zm_terminate ) {
delete it;
return nullptr;
}
}
while ( *it != pktQueue.end() ) {
ZMPacket *zm_packet = *(*it);
if ( !zm_packet ) {
Error("Null zmpacket in queue!?");
return nullptr;
}
Debug(1, "Packet keyframe %d for stream %d, so returning the it to it",
zm_packet->keyframe, zm_packet->packet.stream_index);
if ( zm_packet->keyframe and ( zm_packet->packet.stream_index == video_stream_id ) ) {
Debug(1, "Found a keyframe for stream %d, so returning the it to it", video_stream_id);
return it;
}
++(*it);
}
Debug(1, "DIdn't Found a keyframe for stream %d, so returning the it to it", video_stream_id);
return it;
}

View File

@ -25,17 +25,37 @@
//#include <boost/interprocess/allocators/allocator.hpp>
#include <list>
#include "zm_packet.h"
#include "zm_thread.h"
#include <mutex>
#include <condition_variable>
extern "C" {
#include <libavformat/avformat.h>
}
typedef std::list<ZMPacket *>::iterator packetqueue_iterator;
class zm_packetqueue {
public:
explicit zm_packetqueue(int max_stream_id);
public: // For now just to ease development
std::list<ZMPacket *> pktQueue;
std::list<ZMPacket *>::iterator analysis_it;
int video_stream_id;
int max_video_packet_count; // allow a negative value to someday mean unlimited
int max_stream_id;
int *packet_counts; /* packet count for each stream_id, to keep track of how many video vs audio packets are in the queue */
bool deleting;
std::list<packetqueue_iterator *> iterators;
std::mutex mutex;
std::condition_variable condition;
public:
zm_packetqueue(int p_max_video_packet_count, int p_video_stream_id, int p_audio_stream_id);
virtual ~zm_packetqueue();
bool queuePacket(AVPacket* packet, struct timeval *timestamp);
std::list<ZMPacket *>::const_iterator end() const { return pktQueue.end(); }
std::list<ZMPacket *>::const_iterator begin() const { return pktQueue.begin(); }
bool queuePacket(ZMPacket* packet);
bool queuePacket(AVPacket* packet);
ZMPacket * popPacket();
bool popVideoPacket(ZMPacket* packet);
bool popAudioPacket(ZMPacket* packet);
@ -44,13 +64,21 @@ public:
void clearQueue();
void dumpQueue();
unsigned int size();
unsigned int get_packet_count(int stream_id) const { return packet_counts[stream_id]; };
void clear_unwanted_packets(timeval *recording, int pre_event_count, int mVideoStreamId);
int packet_count(int stream_id);
private:
std::list<ZMPacket *> pktQueue;
int max_stream_id;
int *packet_counts; /* packet count for each stream_id, to keep track of how many video vs audio packets are in the queue */
bool increment_it(packetqueue_iterator *it);
bool increment_it(packetqueue_iterator *it, int stream_id);
ZMPacket *get_packet(packetqueue_iterator *);
packetqueue_iterator *get_video_it(bool wait);
packetqueue_iterator *get_stream_it(int stream_id);
std::list<ZMPacket *>::iterator get_event_start_packet_it(
packetqueue_iterator snapshot_it,
unsigned int pre_event_count
);
};
#endif /* ZM_PACKETQUEUE_H */

View File

@ -88,9 +88,8 @@ public:
virtual int Disconnect() = 0;
virtual int PreCapture() { return 0; };
virtual int PrimeCapture() { return 0; };
virtual int Capture( Image &image ) = 0;
virtual int Capture( ZMPacket &p ) = 0;
virtual int PostCapture() = 0;
virtual int CaptureAndRecord( Image &image, timeval recording, char* event_directory )=0;
int Read( int fd, char*buf, int size );
};

View File

@ -87,6 +87,7 @@ RemoteCameraHttp::RemoteCameraHttp(
if ( capture ) {
Initialise();
}
video_stream = NULL;
}
RemoteCameraHttp::~RemoteCameraHttp() {
@ -261,12 +262,12 @@ int RemoteCameraHttp::ReadData( Buffer &buffer, unsigned int bytes_expected ) {
// There can be lots of bytes available. I've seen 4MB or more. This will vastly inflate our buffer size unnecessarily.
if ( total_bytes_to_read > ZM_NETWORK_BUFSIZ ) {
total_bytes_to_read = ZM_NETWORK_BUFSIZ;
Debug(3, "Just getting 32K" );
Debug(4, "Just getting 32K" );
} else {
Debug(3, "Just getting %d", total_bytes_to_read );
Debug(4, "Just getting %d", total_bytes_to_read );
}
} // end if bytes_expected or not
Debug( 3, "Expecting %d bytes", total_bytes_to_read );
Debug( 4, "Expecting %d bytes", total_bytes_to_read );
int total_bytes_read = 0;
do {
@ -337,8 +338,7 @@ int RemoteCameraHttp::GetResponse() {
header_len = header_expr->MatchLength( 1 );
Debug(4, "Captured header (%d bytes):\n'%s'", header_len, header);
if ( status_expr->Match( header, header_len ) < 4 )
{
if ( status_expr->Match( header, header_len ) < 4 ) {
Error( "Unable to extract HTTP status from header" );
return( -1 );
}
@ -375,55 +375,43 @@ int RemoteCameraHttp::GetResponse() {
}
Debug( 3, "Got status '%d' (%s), http version %s", status_code, status_mesg, http_version );
if ( connection_expr->Match( header, header_len ) == 2 )
{
if ( connection_expr->Match( header, header_len ) == 2 ) {
connection_type = connection_expr->MatchString( 1 );
Debug( 3, "Got connection '%s'", connection_type );
}
if ( content_length_expr->Match( header, header_len ) == 2 )
{
if ( content_length_expr->Match( header, header_len ) == 2 ) {
content_length = atoi( content_length_expr->MatchString( 1 ) );
Debug( 3, "Got content length '%d'", content_length );
}
if ( content_type_expr->Match( header, header_len ) >= 2 )
{
if ( content_type_expr->Match( header, header_len ) >= 2 ) {
content_type = content_type_expr->MatchString( 1 );
Debug( 3, "Got content type '%s'\n", content_type );
if ( content_type_expr->MatchCount() > 2 )
{
if ( content_type_expr->MatchCount() > 2 ) {
content_boundary = content_type_expr->MatchString( 2 );
Debug( 3, "Got content boundary '%s'", content_boundary );
}
}
if ( !strcasecmp( content_type, "image/jpeg" ) || !strcasecmp( content_type, "image/jpg" ) )
{
if ( !strcasecmp( content_type, "image/jpeg" ) || !strcasecmp( content_type, "image/jpg" ) ) {
// Single image
mode = SINGLE_IMAGE;
format = JPEG;
state = CONTENT;
}
else if ( !strcasecmp( content_type, "image/x-rgb" ) )
{
} else if ( !strcasecmp( content_type, "image/x-rgb" ) ) {
// Single image
mode = SINGLE_IMAGE;
format = X_RGB;
state = CONTENT;
}
else if ( !strcasecmp( content_type, "image/x-rgbz" ) )
{
} else if ( !strcasecmp( content_type, "image/x-rgbz" ) ) {
// Single image
mode = SINGLE_IMAGE;
format = X_RGBZ;
state = CONTENT;
}
else if ( !strcasecmp( content_type, "multipart/x-mixed-replace" ) )
{
} else if ( !strcasecmp( content_type, "multipart/x-mixed-replace" ) ) {
// Image stream, so start processing
if ( !content_boundary[0] )
{
if ( !content_boundary[0] ) {
Error( "No content boundary found in header '%s'", header );
return( -1 );
}
@ -434,15 +422,12 @@ int RemoteCameraHttp::GetResponse() {
//{
//// MPEG stream, coming soon!
//}
else
{
else {
Error( "Unrecognised content type '%s'", content_type );
return( -1 );
}
buffer.consume( header_len );
}
else
{
} else {
Debug( 3, "Unable to extract header from stream, retrying" );
//return( -1 );
}
@ -454,39 +439,33 @@ int RemoteCameraHttp::GetResponse() {
static RegExpr *subcontent_length_expr = nullptr;
static RegExpr *subcontent_type_expr = nullptr;
if ( !subheader_expr )
{
if ( !subheader_expr ) {
char subheader_pattern[256] = "";
snprintf( subheader_pattern, sizeof(subheader_pattern), "^((?:\r?\n){0,2}?(?:--)?%s\r?\n.+?\r?\n\r?\n)", content_boundary );
subheader_expr = new RegExpr( subheader_pattern, PCRE_DOTALL );
}
if ( subheader_expr->Match( (char *)buffer, (int)buffer ) == 2 )
{
if ( subheader_expr->Match( (char *)buffer, (int)buffer ) == 2 ) {
subheader = subheader_expr->MatchString( 1 );
subheader_len = subheader_expr->MatchLength( 1 );
Debug( 4, "Captured subheader (%d bytes):'%s'", subheader_len, subheader );
if ( !subcontent_length_expr )
subcontent_length_expr = new RegExpr( "Content-length: ?([0-9]+)\r?\n", PCRE_CASELESS );
if ( subcontent_length_expr->Match( subheader, subheader_len ) == 2 )
{
if ( subcontent_length_expr->Match( subheader, subheader_len ) == 2 ) {
content_length = atoi( subcontent_length_expr->MatchString( 1 ) );
Debug( 3, "Got subcontent length '%d'", content_length );
}
if ( !subcontent_type_expr )
subcontent_type_expr = new RegExpr( "Content-type: ?(.+?)\r?\n", PCRE_CASELESS );
if ( subcontent_type_expr->Match( subheader, subheader_len ) == 2 )
{
if ( subcontent_type_expr->Match( subheader, subheader_len ) == 2 ) {
content_type = subcontent_type_expr->MatchString( 1 );
Debug( 3, "Got subcontent type '%s'", content_type );
}
buffer.consume( subheader_len );
state = CONTENT;
}
else
{
} else {
Debug( 3, "Unable to extract subheader from stream, retrying" );
buffer_len = GetData();
if ( buffer_len < 0 ) {
@ -506,28 +485,19 @@ int RemoteCameraHttp::GetResponse() {
*semicolon = '\0';
}
if ( !strcasecmp( content_type, "image/jpeg" ) || !strcasecmp( content_type, "image/jpg" ) )
{
if ( !strcasecmp( content_type, "image/jpeg" ) || !strcasecmp( content_type, "image/jpg" ) ) {
format = JPEG;
}
else if ( !strcasecmp( content_type, "image/x-rgb" ) )
{
} else if ( !strcasecmp( content_type, "image/x-rgb" ) ) {
format = X_RGB;
}
else if ( !strcasecmp( content_type, "image/x-rgbz" ) )
{
} else if ( !strcasecmp( content_type, "image/x-rgbz" ) ) {
format = X_RGBZ;
}
else
{
} else {
Error( "Found unsupported content type '%s'", content_type );
return( -1 );
}
if ( content_length )
{
while ( ((long)buffer.size() < content_length ) && ! zm_terminate )
{
if ( content_length ) {
while ( ((long)buffer.size() < content_length ) && ! zm_terminate ) {
Debug(3, "Need more data buffer %d < content length %d", buffer.size(), content_length );
int bytes_read = GetData();
@ -538,11 +508,8 @@ int RemoteCameraHttp::GetResponse() {
bytes += bytes_read;
}
Debug( 3, "Got end of image by length, content-length = %d", content_length );
}
else
{
while ( !content_length )
{
} else {
while ( !content_length ) {
buffer_len = GetData();
if ( buffer_len < 0 ) {
Error( "Unable to read content" );
@ -550,16 +517,13 @@ int RemoteCameraHttp::GetResponse() {
}
bytes += buffer_len;
static RegExpr *content_expr = 0;
if ( mode == MULTI_IMAGE )
{
if ( !content_expr )
{
if ( mode == MULTI_IMAGE ) {
if ( !content_expr ) {
char content_pattern[256] = "";
snprintf( content_pattern, sizeof(content_pattern), "^(.+?)(?:\r?\n)*(?:--)?%s\r?\n", content_boundary );
content_expr = new RegExpr( content_pattern, PCRE_DOTALL );
}
if ( content_expr->Match( buffer, buffer.size() ) == 2 )
{
if ( content_expr->Match( buffer, buffer.size() ) == 2 ) {
content_length = content_expr->MatchLength( 1 );
Debug( 3, "Got end of image by pattern, content-length = %d", content_length );
}
@ -1067,7 +1031,7 @@ int RemoteCameraHttp::GetResponse() {
}
}
return( 0 );
}
} // end RemoteCameraHttp::GetResponse
int RemoteCameraHttp::PrimeCapture() {
if ( sd < 0 ) {
@ -1098,9 +1062,9 @@ int RemoteCameraHttp::PreCapture() {
}
}
return 0;
}
} // end int RemoteCameraHttp::PreCapture()
int RemoteCameraHttp::Capture( Image &image ) {
int RemoteCameraHttp::Capture( ZMPacket &packet ) {
int content_length = GetResponse();
if ( content_length == 0 ) {
Warning( "Unable to capture image, retrying" );
@ -1111,46 +1075,54 @@ int RemoteCameraHttp::Capture( Image &image ) {
Disconnect();
return -1;
}
Image *image = packet.image;
switch( format ) {
case JPEG :
{
if ( !image.DecodeJpeg( buffer.extract( content_length ), content_length, colours, subpixelorder ) ) {
if ( !image->DecodeJpeg( buffer.extract( content_length ), content_length, colours, subpixelorder ) ) {
Error( "Unable to decode jpeg" );
Disconnect();
return -1;
}
break;
}
case X_RGB :
{
if ( content_length != (long)image.Size() ) {
Error( "Image length mismatch, expected %d bytes, content length was %d", image.Size(), content_length );
if ( content_length != (long)image->Size() ) {
Error( "Image length mismatch, expected %d bytes, content length was %d", image->Size(), content_length );
Disconnect();
return -1;
}
image.Assign( width, height, colours, subpixelorder, buffer, imagesize );
image->Assign( width, height, colours, subpixelorder, buffer, imagesize );
break;
}
case X_RGBZ :
{
if ( !image.Unzip( buffer.extract( content_length ), content_length ) ) {
if ( !image->Unzip( buffer.extract( content_length ), content_length ) ) {
Error( "Unable to unzip RGB image" );
Disconnect();
return -1;
}
image.Assign( width, height, colours, subpixelorder, buffer, imagesize );
image->Assign( width, height, colours, subpixelorder, buffer, imagesize );
break;
}
default :
{
Error( "Unexpected image format encountered" );
Disconnect();
return -1;
}
}
return 1;
}
} // end ZmPacket *RmoteCameraHttp::Capture( &image );
int RemoteCameraHttp::PostCapture() {
return 0;
}
AVStream *RemoteCameraHttp::get_VideoStream() {
if ( video_stream ) {
AVFormatContext *oc = avformat_alloc_context();
video_stream = avformat_new_stream( oc, NULL );
if ( video_stream ) {
video_stream->codec->width = width;
video_stream->codec->height = height;
video_stream->codec->pix_fmt = GetFFMPEGPixelFormat(colours,subpixelorder);
}
}
return video_stream;
}

View File

@ -44,7 +44,22 @@ protected:
enum { SIMPLE, REGEXP } method;
public:
RemoteCameraHttp( unsigned int p_monitor_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
RemoteCameraHttp(
unsigned int p_monitor_id,
const std::string &method,
const std::string &host,
const std::string &port,
const std::string &path,
int p_width,
int p_height,
int p_colours,
int p_brightness,
int p_contrast,
int p_hue,
int p_colour,
bool p_capture,
bool p_record_audio
);
~RemoteCameraHttp();
void Initialise();
@ -57,9 +72,9 @@ public:
int GetResponse();
int PrimeCapture();
int PreCapture();
int Capture( Image &image );
int Capture( ZMPacket &p );
int PostCapture();
int CaptureAndRecord( Image &image, timeval recording, char* event_directory ) {return 0;};
AVStream* get_VideoStream();
int Close() { Disconnect(); return 0; };
};

View File

@ -68,6 +68,7 @@ RemoteCameraNVSocket::RemoteCameraNVSocket(
timeout.tv_sec = 0;
timeout.tv_usec = 0;
subpixelorder = ZM_SUBPIX_ORDER_BGR;
video_stream = NULL;
if ( capture ) {
Initialise();
@ -137,13 +138,13 @@ int RemoteCameraNVSocket::Disconnect() {
}
int RemoteCameraNVSocket::SendRequest( std::string request ) {
Debug( 4, "Sending request: %s", request.c_str() );
//Debug( 4, "Sending request: %s", request.c_str() );
if ( write( sd, request.data(), request.length() ) < 0 ) {
Error( "Can't write: %s", strerror(errno) );
Disconnect();
return( -1 );
}
Debug( 4, "Request sent" );
//Debug( 4, "Request sent" );
return( 0 );
}
@ -178,11 +179,12 @@ int RemoteCameraNVSocket::PrimeCapture() {
Disconnect();
return -1;
}
mVideoStreamId=0;
return 0;
}
int RemoteCameraNVSocket::Capture( Image &image ) {
int RemoteCameraNVSocket::Capture( ZMPacket &zm_packet ) {
if ( SendRequest("GetNextImage\n") < 0 ) {
Warning( "Unable to capture image, retrying" );
return 0;
@ -202,10 +204,35 @@ int RemoteCameraNVSocket::Capture( Image &image ) {
return 0;
}
image.Assign(width, height, colours, subpixelorder, buffer, imagesize);
zm_packet.image->Assign(width, height, colours, subpixelorder, buffer, imagesize);
zm_packet.keyframe = 1;
return 1;
}
int RemoteCameraNVSocket::PostCapture() {
return( 0 );
}
AVStream *RemoteCameraNVSocket::get_VideoStream() {
if ( ! video_stream ) {
AVFormatContext *oc = avformat_alloc_context();
video_stream = avformat_new_stream( oc, NULL );
if ( video_stream ) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
video_stream->codecpar->width = width;
video_stream->codecpar->height = height;
video_stream->codecpar->format = GetFFMPEGPixelFormat(colours,subpixelorder);
video_stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
#else
video_stream->codec->width = width;
video_stream->codec->height = height;
video_stream->codec->pix_fmt = GetFFMPEGPixelFormat(colours,subpixelorder);
video_stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
#endif
} else {
Error("Can't create video stream");
}
} else {
Debug(5,"Have videostream");
}
return video_stream;
}

View File

@ -61,12 +61,11 @@ public:
int Connect();
int Disconnect();
int SendRequest( std::string );
int ReadData( Buffer &buffer, unsigned int bytes_expected=0 );
int GetResponse();
int PrimeCapture();
int Capture( Image &image );
int Capture( ZMPacket &p );
int PostCapture();
int CaptureAndRecord( Image &image, timeval recording, char* event_directory ) {return(0);};
AVStream* get_VideoStream();
int Close() { return 0; };
};

View File

@ -69,14 +69,8 @@ RemoteCameraRtsp::RemoteCameraRtsp(
mAudioStreamId = -1;
mCodecContext = nullptr;
mCodec = nullptr;
mRawFrame = nullptr;
mFrame = nullptr;
frameCount = 0;
startTime=0;
#if HAVE_LIBSWSCALE
mConvertContext = nullptr;
#endif
/* Has to be located inside the constructor so other components such as zma will receive correct colours and subpixel order */
if ( colours == ZM_COLOUR_RGB32 ) {
subpixelorder = ZM_SUBPIX_ORDER_RGBA;
@ -93,15 +87,6 @@ RemoteCameraRtsp::RemoteCameraRtsp(
} // end RemoteCameraRtsp::RemoteCameraRtsp(...)
RemoteCameraRtsp::~RemoteCameraRtsp() {
av_frame_free(&mFrame);
av_frame_free(&mRawFrame);
#if HAVE_LIBSWSCALE
if ( mConvertContext ) {
sws_freeContext(mConvertContext);
mConvertContext = nullptr;
}
#endif
if ( mCodecContext ) {
avcodec_close(mCodecContext);
@ -193,7 +178,12 @@ int RemoteCameraRtsp::PrimeCapture() {
Debug(3, "Unable to locate audio stream");
// Get a pointer to the codec context for the video stream
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
mCodecContext = avcodec_alloc_context3(NULL);
avcodec_parameters_to_context(mCodecContext, mFormatContext->streams[mVideoStreamId]->codecpar);
#else
mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;
#endif
// Find the decoder for the video stream
mCodec = avcodec_find_decoder(mCodecContext->codec_id);
@ -208,15 +198,6 @@ int RemoteCameraRtsp::PrimeCapture() {
#endif
Panic("Can't open codec");
// Allocate space for the native video frame
mRawFrame = zm_av_frame_alloc();
// Allocate space for the converted video frame
mFrame = zm_av_frame_alloc();
if ( mRawFrame == nullptr || mFrame == nullptr )
Fatal("Unable to allocate frame(s)");
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
int pSize = av_image_get_buffer_size(imagePixFormat, width, height, 1);
#else
@ -226,23 +207,9 @@ int RemoteCameraRtsp::PrimeCapture() {
if ( (unsigned int)pSize != imagesize ) {
Fatal("Image size mismatch. Required: %d Available: %d", pSize, imagesize);
}
/*
#if HAVE_LIBSWSCALE
if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
}
if(!sws_isSupportedOutput(imagePixFormat)) {
Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
}
#else // HAVE_LIBSWSCALE
Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE
*/
return 0;
}
return 1;
} // end PrimeCapture
int RemoteCameraRtsp::PreCapture() {
if ( !rtspThread->isRunning() )
@ -251,22 +218,14 @@ int RemoteCameraRtsp::PreCapture() {
Error("Cannot precapture, no RTP sources");
return -1;
}
return 0;
return 1;
}
int RemoteCameraRtsp::Capture( Image &image ) {
AVPacket packet;
uint8_t* directbuffer;
int RemoteCameraRtsp::Capture(ZMPacket &zm_packet) {
int frameComplete = false;
AVPacket *packet = &zm_packet.packet;
/* Request a writeable buffer of the target image */
directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
if ( directbuffer == nullptr ) {
Error("Failed requesting writeable buffer for the captured image.");
return -1;
}
while ( true ) {
while ( !frameComplete ) {
buffer.clear();
if ( !rtspThread->isRunning() )
return -1;
@ -283,7 +242,8 @@ int RemoteCameraRtsp::Capture( Image &image ) {
int nalType = (buffer.head()[3] & 0x1f);
// SPS The SPS NAL unit contains parameters that apply to a series of consecutive coded video pictures
if ( nalType == 7 ) {
if ( nalType == 1 ) {
} else if ( nalType == 7 ) {
lastSps = buffer;
continue;
} else if ( nalType == 8 ) {
@ -291,6 +251,8 @@ int RemoteCameraRtsp::Capture( Image &image ) {
lastPps = buffer;
continue;
} else if ( nalType == 5 ) {
packet->flags |= AV_PKT_FLAG_KEY;
zm_packet.keyframe = 1;
// IDR
buffer += lastSps;
buffer += lastPps;
@ -301,92 +263,31 @@ int RemoteCameraRtsp::Capture( Image &image ) {
Debug(3, "Not an h264 packet");
}
av_init_packet(&packet);
while ( (!frameComplete) && (buffer.size() > 0) ) {
packet.data = buffer.head();
packet.size = buffer.size();
bytes += packet.size;
packet->data = buffer.head();
packet->size = buffer.size();
bytes += packet->size;
// So I think this is the magic decode step. Result is a raw image?
int len = zm_send_packet_receive_frame(mCodecContext, mRawFrame, packet);
if ( len < 0 ) {
if ( 1 != zm_packet.decode(mCodecContext) ) {
Error("Error while decoding frame %d", frameCount);
Hexdump(Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size());
buffer.clear();
continue;
break;
}
int len = packet->size;
zm_packet.codec_type = mCodecContext->codec_type;
frameComplete = true;
Debug(2, "Frame: %d - %d/%d", frameCount, len, buffer.size());
//if ( buffer.size() < 400 )
//Hexdump( 0, buffer.head(), buffer.size() );
buffer -= len;
}
// At this point, we either have a frame or ran out of buffer. What happens if we run out of buffer?
if ( frameComplete ) {
Debug(3, "Got frame %d", frameCount);
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
// From what I've read, we should align the linesizes to 32bit so that ffmpeg can use SIMD instructions too.
int size = av_image_fill_arrays(
mFrame->data, mFrame->linesize,
directbuffer, imagePixFormat, width, height,
(AV_PIX_FMT_RGBA == imagePixFormat ? 32 : 1)
);
if ( size < 0 ) {
Error("Problem setting up data pointers into image %s",
av_make_error_string(size).c_str());
}
#else
avpicture_fill((AVPicture *)mFrame, directbuffer, imagePixFormat, width, height);
#endif
#if HAVE_LIBSWSCALE
if ( mConvertContext == nullptr ) {
mConvertContext = sws_getContext(
mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt,
width, height, imagePixFormat, SWS_BICUBIC, nullptr, nullptr, nullptr);
if ( mConvertContext == nullptr )
Fatal("Unable to create conversion context");
if (
((unsigned int)mRawFrame->width != width)
||
((unsigned int)mRawFrame->height != height)
) {
Warning("Monitor dimensions are %dx%d but camera is sending %dx%d",
width, height, mRawFrame->width, mRawFrame->height);
}
}
if ( sws_scale(mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize) < 0 )
Fatal("Unable to convert raw format %u to target format %u at frame %d",
mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE
Fatal("You must compile ffmpeg with the --enable-swscale option to use RTSP cameras");
#endif // HAVE_LIBSWSCALE
frameCount++;
} /* frame complete */
zm_av_packet_unref(&packet);
} /* getFrame() */
if ( frameComplete )
return 1;
} // end while true
// can never get here.
return 0;
}
//Function to handle capture and store
return 1;
} // end int RemoteCameraRtsp::Capture(ZMPacket &packet)
int RemoteCameraRtsp::PostCapture() {
return 0;
return 1;
}
#endif // HAVE_LIBAVFORMAT

View File

@ -55,22 +55,10 @@ protected:
#if HAVE_LIBAVFORMAT
AVFormatContext *mFormatContext;
int mVideoStreamId;
int mAudioStreamId;
AVCodecContext *mCodecContext;
AVCodec *mCodec;
AVFrame *mRawFrame;
AVFrame *mFrame;
_AVPIXELFORMAT imagePixFormat;
#endif // HAVE_LIBAVFORMAT
bool wasRecording;
VideoStore *videoStore;
char oldDirectory[4096];
int64_t startTime;
#if HAVE_LIBSWSCALE
struct SwsContext *mConvertContext;
#endif
public:
RemoteCameraRtsp( unsigned int p_monitor_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, bool p_rtsp_describe, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
@ -83,10 +71,21 @@ public:
int PrimeCapture();
int PreCapture();
int Capture( Image &image );
int Capture( ZMPacket &p );
int PostCapture();
int CaptureAndRecord( Image &image, timeval recording, char* event_directory ) {return 0;};
int Close() { return 0; };
AVStream *get_VideoStream() {
if ( mVideoStreamId != -1 )
return mFormatContext->streams[mVideoStreamId];
return nullptr;
}
AVStream *get_AudioStream() {
if ( mAudioStreamId != -1 )
return mFormatContext->streams[mAudioStreamId];
return nullptr;
}
AVCodecContext *get_VideoCodecContext() { return mVideoCodecContext; };
AVCodecContext *get_AudioCodecContext() { return mAudioCodecContext; };
};
#endif // ZM_REMOTE_CAMERA_RTSP_H

View File

@ -318,7 +318,7 @@ bool RtpSource::handlePacket(const unsigned char *packet, size_t packetLen) {
// What is the point of this for loop? Is it just me, or will it call getUpdatedValue once or twice? Could it not be better written as
// if ( ! mFrameProcessed.getUpdatedValue( 1 ) && mFrameProcessed.getUpdatedValue( 1 ) ) return false;
for ( int count = 0; !mFrameProcessed.getUpdatedValue( 1 ); count++ )
for ( int count = 0; !mFrameProcessed.getUpdatedValue(1); count++ )
if ( count > 1 )
return false;
}
@ -352,10 +352,11 @@ bool RtpSource::getFrame(Buffer &buffer) {
if ( !mFrameReady.getValueImmediate() ) {
Debug(3, "Getting frame but not ready");
// Allow for a couple of spurious returns
for ( int count = 0; !mFrameReady.getUpdatedValue(1); count++ )
for ( int count = 0; !mFrameReady.getUpdatedValue(1); count++ ) {
if ( count > 1 )
return false;
}
}
buffer = mFrame;
mFrameReady.setValueImmediate(false);
mFrameProcessed.updateValueSignal(true);

View File

@ -376,8 +376,8 @@ int RtspThread::run() {
try {
mSessDesc = new SessionDescriptor( mUrl, sdp );
mFormatContext = mSessDesc->generateFormatContext();
} catch( const Exception &e ) {
Error( e.getMessage().c_str() );
} catch ( const Exception &e ) {
Error(e.getMessage().c_str());
return -1;
}
@ -411,7 +411,7 @@ int RtspThread::run() {
{
// Check if control Url is absolute or relative
controlUrl = mediaDesc->getControlUrl();
if (std::equal(trackUrl.begin(), trackUrl.end(), controlUrl.begin())) {
if ( std::equal(trackUrl.begin(), trackUrl.end(), controlUrl.begin()) ) {
trackUrl = controlUrl;
} else {
if ( *trackUrl.rbegin() != '/') {
@ -422,46 +422,36 @@ int RtspThread::run() {
}
rtpClock = mediaDesc->getClock();
codecId = mFormatContext->streams[i]->codec->codec_id;
// Hackery pokery
//rtpClock = mFormatContext->streams[i]->codec->sample_rate;
break;
}
}
}
switch( mMethod ) {
switch ( mMethod ) {
case RTP_UNICAST :
{
localPorts[0] = requestPorts();
localPorts[1] = localPorts[0]+1;
message = "SETUP "+trackUrl+" RTSP/1.0\r\nTransport: RTP/AVP;unicast;client_port="+stringtf( "%d", localPorts[0] )+"-"+stringtf( "%d", localPorts[1] )+"\r\n";
break;
}
case RTP_MULTICAST :
{
message = "SETUP "+trackUrl+" RTSP/1.0\r\nTransport: RTP/AVP;multicast\r\n";
break;
}
case RTP_RTSP :
case RTP_RTSP_HTTP :
{
message = "SETUP "+trackUrl+" RTSP/1.0\r\nTransport: RTP/AVP/TCP;unicast\r\n";
break;
}
default:
{
Panic( "Got unexpected method %d", mMethod );
break;
}
}
if ( !sendCommand( message ) )
return( -1 );
if ( !recvResponse( response ) )
return( -1 );
if ( !sendCommand(message) )
return -1;
if ( !recvResponse(response) )
return -1;
lines = split( response, "\r\n" );
lines = split(response, "\r\n");
std::string session;
int timeout = 0;
char transport[256] = "";
@ -473,18 +463,18 @@ int RtspThread::run() {
if ( sessionLine.size() == 2 )
sscanf( trimSpaces( sessionLine[1] ).c_str(), "timeout=%d", &timeout );
}
sscanf( lines[i].c_str(), "Transport: %s", transport );
sscanf(lines[i].c_str(), "Transport: %s", transport);
}
if ( session.empty() )
Fatal( "Unable to get session identifier from response '%s'", response.c_str() );
Fatal("Unable to get session identifier from response '%s'", response.c_str());
Debug( 2, "Got RTSP session %s, timeout %d secs", session.c_str(), timeout );
Debug(2, "Got RTSP session %s, timeout %d secs", session.c_str(), timeout);
if ( !transport[0] )
Fatal( "Unable to get transport details from response '%s'", response.c_str() );
Fatal("Unable to get transport details from response '%s'", response.c_str());
Debug( 2, "Got RTSP transport %s", transport );
Debug(2, "Got RTSP transport %s", transport);
std::string method = "";
int remotePorts[2] = { 0, 0 };
@ -531,23 +521,23 @@ int RtspThread::run() {
Debug( 2, "RTSP Remote Channels are %d/%d", remoteChannels[0], remoteChannels[1] );
message = "PLAY "+mUrl+" RTSP/1.0\r\nSession: "+session+"\r\nRange: npt=0.000-\r\n";
if ( !sendCommand( message ) )
return( -1 );
if ( !recvResponse( response ) )
return( -1 );
if ( !sendCommand(message) )
return -1;
if ( !recvResponse(response) )
return -1;
lines = split( response, "\r\n" );
lines = split(response, "\r\n");
std::string rtpInfo;
for ( size_t i = 0; i < lines.size(); i++ ) {
if ( ( lines[i].size() > 9 ) && ( lines[i].substr( 0, 9 ) == "RTP-Info:" ) )
rtpInfo = trimSpaces( lines[i].substr( 9 ) );
if ( ( lines[i].size() > 9 ) && ( lines[i].substr(0, 9) == "RTP-Info:" ) )
rtpInfo = trimSpaces(lines[i].substr(9));
// Check for a timeout again. Some rtsp devices don't send a timeout until after the PLAY command is sent
if ( ( lines[i].size() > 8 ) && ( lines[i].substr( 0, 8 ) == "Session:" ) && ( timeout == 0 ) ) {
StringVector sessionLine = split( lines[i].substr(9), ";" );
if ( ( lines[i].size() > 8 ) && ( lines[i].substr(0, 8) == "Session:" ) && ( timeout == 0 ) ) {
StringVector sessionLine = split(lines[i].substr(9), ";");
if ( sessionLine.size() == 2 )
sscanf( trimSpaces( sessionLine[1] ).c_str(), "timeout=%d", &timeout );
sscanf(trimSpaces(sessionLine[1]).c_str(), "timeout=%d", &timeout);
if ( timeout > 0 )
Debug( 2, "Got timeout %d secs from PLAY command response", timeout );
Debug(2, "Got timeout %d secs from PLAY command response", timeout);
}
}

View File

@ -0,0 +1,46 @@
/* ---------------------------------------------------------------------------
**
** ADTS_DeviceSource.cpp
**
** ADTS Live555 source
**
** -------------------------------------------------------------------------*/
#include <sstream>
#include <iomanip>
// live555
#include <Base64.hh>
#include "zm_rtsp_server_adts_source.h"
static unsigned const samplingFrequencyTable[16] = {
96000, 88200, 64000, 48000,
44100, 32000, 24000, 22050,
16000, 12000, 11025, 8000,
7350, 0, 0, 0
};
// ---------------------------------
// ADTS ZoneMinder FramedSource
// ---------------------------------
//
ADTS_ZoneMinderDeviceSource::ADTS_ZoneMinderDeviceSource(
UsageEnvironment& env,
Monitor *monitor,
AVStream *stream,
unsigned int queueSize
)
:
ZoneMinderDeviceSource(env, monitor, stream, queueSize),
samplingFrequencyIndex(0),
channels(stream->codecpar->channels)
{
std::ostringstream os;
os <<
"profile-level-id=1;"
"mode=AAC-hbr;sizelength=13;indexlength=3;"
"indexdeltalength=3"
//<< extradata2psets(nullptr, m_stream)
<< "\r\n";
m_auxLine.assign(os.str());
}

View File

@ -0,0 +1,67 @@
/* ---------------------------------------------------------------------------
** This software is in the public domain, furnished "as is", without technical
** support, and with no warranty, express or implied, as to its usefulness for
** any purpose.
**
** ADTS_ZoneMinderDeviceSource.h
**
** ADTS ZoneMinder live555 source
**
** -------------------------------------------------------------------------*/
#ifndef ADTS_ZoneMinder_DEVICE_SOURCE
#define ADTS_ZoneMinder_DEVICE_SOURCE
// project
#include "zm_rtsp_server_device_source.h"
#include "zm_rtsp_server_frame.h"
// ---------------------------------
// ADTS(AAC) ZoneMinder FramedSource
// ---------------------------------
class ADTS_ZoneMinderDeviceSource : public ZoneMinderDeviceSource {
public:
static ADTS_ZoneMinderDeviceSource* createNew(
UsageEnvironment& env,
Monitor* monitor,
AVStream * stream,
unsigned int queueSize
) {
Debug(1, "m_stream %p codecpar %p channels %d",
stream, stream->codecpar, stream->codecpar->channels);
return new ADTS_ZoneMinderDeviceSource(env, monitor, stream, queueSize);
};
protected:
ADTS_ZoneMinderDeviceSource(
UsageEnvironment& env,
Monitor *monitor,
AVStream *stream,
unsigned int queueSize
);
virtual ~ADTS_ZoneMinderDeviceSource() {}
/*
virtual unsigned char* extractFrame(unsigned char* frame, size_t& size, size_t& outsize);
virtual unsigned char* findMarker(unsigned char *frame, size_t size, size_t &length);
*/
public:
int samplingFrequency() { return m_stream->codecpar->sample_rate; };
const char *configStr() { return config.c_str(); };
int numChannels() {
Debug(1, "this %p m_stream %p channels %d",
this, m_stream, channels);
Debug(1, "m_stream %p codecpar %p channels %d => %d",
m_stream, m_stream->codecpar, m_stream->codecpar->channels, channels);
return channels;
return m_stream->codecpar->channels;
}
protected:
std::string config;
int samplingFrequencyIndex;
int channels;
};
#endif

View File

@ -0,0 +1,207 @@
/* ---------------------------------------------------------------------------
** This software is in the public domain, furnished "as is", without technical
** support, and with no warranty, express or implied, as to its usefulness for
** any purpose.
**
**
** ZoneMinder Live555 source
**
** -------------------------------------------------------------------------*/
#include <utility>
#include "zm_rtsp_server_device_source.h"
#include "zm_rtsp_server_frame.h"
#include "zm_logger.h"
ZoneMinderDeviceSource::ZoneMinderDeviceSource(
UsageEnvironment& env,
Monitor* monitor,
AVStream *stream,
unsigned int queueSize
) :
FramedSource(env),
m_stream(stream),
m_monitor(monitor),
m_packetqueue(nullptr),
m_packetqueue_it(nullptr),
m_queueSize(queueSize)
{
m_eventTriggerId = envir().taskScheduler().createEventTrigger(ZoneMinderDeviceSource::deliverFrameStub);
memset(&m_thid, 0, sizeof(m_thid));
memset(&m_mutex, 0, sizeof(m_mutex));
if ( m_monitor ) {
m_packetqueue = m_monitor->GetPacketQueue();
if ( !m_packetqueue ) {
Fatal("No packetqueue");
}
pthread_mutex_init(&m_mutex, nullptr);
pthread_create(&m_thid, nullptr, threadStub, this);
} else {
Error("No monitor in ZoneMinderDeviceSource");
}
}
ZoneMinderDeviceSource::~ZoneMinderDeviceSource() {
stop = 1;
envir().taskScheduler().deleteEventTrigger(m_eventTriggerId);
pthread_join(m_thid, nullptr);
pthread_mutex_destroy(&m_mutex);
}
// thread mainloop
void* ZoneMinderDeviceSource::thread() {
stop = 0;
while ( !stop ) {
getNextFrame();
}
return nullptr;
}
// getting FrameSource callback
void ZoneMinderDeviceSource::doGetNextFrame() {
deliverFrame();
}
// stopping FrameSource callback
void ZoneMinderDeviceSource::doStopGettingFrames() {
Debug(1, "ZoneMinderDeviceSource::doStopGettingFrames");
FramedSource::doStopGettingFrames();
}
// deliver frame to the sink
void ZoneMinderDeviceSource::deliverFrame() {
if ( !isCurrentlyAwaitingData() ) {
Debug(4, "not awaiting data");
return;
}
pthread_mutex_lock(&m_mutex);
if ( m_captureQueue.empty() ) {
Debug(4, "Queue is empty");
pthread_mutex_unlock(&m_mutex);
return;
}
NAL_Frame *frame = m_captureQueue.front();
m_captureQueue.pop_front();
pthread_mutex_unlock(&m_mutex);
fDurationInMicroseconds = 0;
fFrameSize = 0;
unsigned int nal_size = frame->size();
if ( nal_size > fMaxSize ) {
fFrameSize = fMaxSize;
fNumTruncatedBytes = nal_size - fMaxSize;
} else {
fFrameSize = nal_size;
}
Debug(2, "deliverFrame stream: %d timestamp: %ld.%06ld size: %d queuesize: %d",
m_stream->index,
frame->m_timestamp.tv_sec, frame->m_timestamp.tv_usec,
fFrameSize,
m_captureQueue.size()
);
fPresentationTime = frame->m_timestamp;
memcpy(fTo, frame->buffer(), fFrameSize);
if ( fFrameSize > 0 ) {
// send Frame to the consumer
FramedSource::afterGetting(this);
}
delete frame;
} // end void ZoneMinderDeviceSource::deliverFrame()
// FrameSource callback on read event
void ZoneMinderDeviceSource::incomingPacketHandler() {
if ( this->getNextFrame() <= 0 ) {
handleClosure(this);
}
}
// read from monitor
int ZoneMinderDeviceSource::getNextFrame() {
if ( zm_terminate )
return -1;
if ( !m_packetqueue_it ) {
m_packetqueue_it = m_packetqueue->get_video_it(true);
}
ZMPacket *zm_packet = m_packetqueue->get_packet(m_packetqueue_it);
while ( zm_packet and (zm_packet->packet.stream_index != m_stream->index) ) {
zm_packet->unlock();
// We want our stream to start at the same it as the video
// but if this is an audio stream we need to increment past that first packet
Debug(4, "Have audio packet, skipping");
m_packetqueue->increment_it(m_packetqueue_it, m_stream->index);
zm_packet = m_packetqueue->get_packet(m_packetqueue_it);
}
if ( !zm_packet ) {
Debug(1, "null zm_packet %p", zm_packet);
return -1;
}
// packet is locked
AVPacket *pkt = &zm_packet->packet;
m_packetqueue->increment_it(m_packetqueue_it, m_stream->index);
// Convert pts to timeval
int64_t pts = av_rescale_q(pkt->dts, m_stream->time_base, AV_TIME_BASE_Q);
timeval tv = { pts/1000000, pts%1000000 };
dumpPacket(m_stream, pkt, "rtspServer");
Debug(2, "pts %" PRId64 " pkt.pts %" PRId64 " tv %d.%d", pts, pkt->pts, tv.tv_sec, tv.tv_usec);
std::list< std::pair<unsigned char*, size_t> > framesList = this->splitFrames(pkt->data, pkt->size);
zm_packet->unlock();
zm_packet = nullptr;// we no longer have the lock so shouldn't be accessing it
while ( framesList.size() ) {
std::pair<unsigned char*, size_t> nal = framesList.front();
framesList.pop_front();
NAL_Frame *frame = new NAL_Frame(nal.first, nal.second, tv);
pthread_mutex_lock(&m_mutex);
if ( m_captureQueue.size() ) {
NAL_Frame * f = m_captureQueue.front();
while ( (f->m_timestamp.tv_sec - tv.tv_sec) > 10 ) {
m_captureQueue.pop_front();
delete f;
f = m_captureQueue.front();
}
}
#if 0
while ( m_captureQueue.size() >= m_queueSize ) {
Debug(2, "Queue full dropping frame %d", m_captureQueue.size());
NAL_Frame * f = m_captureQueue.front();
m_captureQueue.pop_front();
delete f;
}
#endif
m_captureQueue.push_back(frame);
pthread_mutex_unlock(&m_mutex);
// post an event to ask to deliver the frame
envir().taskScheduler().triggerEvent(m_eventTriggerId, this);
} // end while we get frame from data
return 1;
}
// split packet in frames
std::list< std::pair<unsigned char*,size_t> > ZoneMinderDeviceSource::splitFrames(unsigned char* frame, unsigned frameSize) {
std::list< std::pair<unsigned char*,size_t> > frameList;
if ( frame != nullptr ) {
frameList.push_back(std::pair<unsigned char*,size_t>(frame, frameSize));
}
return frameList;
}
// extract a frame
unsigned char* ZoneMinderDeviceSource::extractFrame(unsigned char* frame, size_t& size, size_t& outsize) {
outsize = size;
size = 0;
return frame;
}

View File

@ -0,0 +1,78 @@
/* ---------------------------------------------------------------------------
**
** DeviceSource.h
**
** live555 source
**
** -------------------------------------------------------------------------*/
#ifndef DEVICE_SOURCE
#define DEVICE_SOURCE
#include <string>
#include <list>
#include <iostream>
#include <liveMedia.hh>
#include "zm_monitor.h"
#include "zm_rtsp_server_frame.h"
#include "zm_packetqueue.h"
#include <linux/types.h>
class ZoneMinderDeviceSource: public FramedSource {
public:
static ZoneMinderDeviceSource* createNew(
UsageEnvironment& env,
Monitor* monitor,
AVStream * stream,
unsigned int queueSize
) {
return new ZoneMinderDeviceSource(env, monitor, stream, queueSize);
};
std::string getAuxLine() { return m_auxLine; };
int getWidth() { return m_monitor->Width(); };
int getHeight() { return m_monitor->Height(); };
protected:
ZoneMinderDeviceSource(UsageEnvironment& env, Monitor* monitor, AVStream * stream, unsigned int queueSize);
virtual ~ZoneMinderDeviceSource();
protected:
static void* threadStub(void* clientData) { return ((ZoneMinderDeviceSource*) clientData)->thread();};
void* thread();
static void deliverFrameStub(void* clientData) {((ZoneMinderDeviceSource*) clientData)->deliverFrame();};
void deliverFrame();
static void incomingPacketHandlerStub(void* clientData, int mask) { ((ZoneMinderDeviceSource*) clientData)->incomingPacketHandler(); };
void incomingPacketHandler();
int getNextFrame();
void processFrame(char * frame, int frameSize, const timeval &ref);
void queueFrame(char * frame, int frameSize, const timeval &tv);
// split packet in frames
virtual std::list< std::pair<unsigned char*, size_t> > splitFrames(unsigned char* frame, unsigned frameSize);
// overide FramedSource
virtual void doGetNextFrame();
virtual void doStopGettingFrames();
virtual unsigned char *extractFrame(unsigned char *data, size_t& size, size_t& outsize);
protected:
std::list<NAL_Frame*> m_captureQueue;
EventTriggerId m_eventTriggerId;
AVStream *m_stream;
Monitor* m_monitor;
zm_packetqueue *m_packetqueue;
std::list<ZMPacket *>::iterator *m_packetqueue_it;
unsigned int m_queueSize;
pthread_t m_thid;
pthread_mutex_t m_mutex;
std::string m_auxLine;
int stop;
};
#endif

View File

@ -0,0 +1,58 @@
#pragma once
#include "zm_logger.h"
#include <sys/time.h>
// ---------------------------------
// Captured frame
// ---------------------------------
const char H264marker[] = {0,0,0,1};
const char H264shortmarker[] = {0,0,1};
class NAL_Frame {
public:
NAL_Frame(unsigned char * buffer, size_t size, timeval timestamp) :
m_buffer(nullptr),
m_size(size),
m_timestamp(timestamp),
m_ref_count(1) {
m_buffer = new unsigned char[m_size];
memcpy(m_buffer, buffer, m_size);
};
NAL_Frame(unsigned char* buffer, size_t size) : m_buffer(buffer), m_size(size) {
gettimeofday(&m_timestamp, NULL);
};
NAL_Frame& operator=(const NAL_Frame&);
~NAL_Frame() {
delete[] m_buffer;
m_buffer = nullptr;
};
unsigned char *buffer() const { return m_buffer; };
// The buffer has a 32bit nal size value at the front, so if we want the nal, it's
// the address of the buffer plus 4 bytes.
unsigned char *nal() const { return m_buffer+4; };
size_t size() const { return m_size; };
size_t nal_size() const { return m_size-4; };
bool check() const {
// Look for marker at beginning
unsigned char *marker = (unsigned char*)memmem(m_buffer, sizeof(H264marker), H264marker, sizeof(H264marker));
if ( marker ) {
Debug(1, "marker found at beginning");
return true;
} else {
marker = (unsigned char*)memmem(m_buffer, m_size, H264marker, sizeof(H264marker));
if ( marker ) {
Debug(1, "marker not found at beginning");
return false;
}
}
return false;
}
private:
unsigned char* m_buffer;
size_t m_size;
public:
timeval m_timestamp;
private:
int m_ref_count;
};

View File

@ -0,0 +1,224 @@
/* ---------------------------------------------------------------------------
**
** H264_DeviceSource.cpp
**
** H264 Live555 source
**
** -------------------------------------------------------------------------*/
#include <sstream>
#include <iomanip>
// live555
#include <Base64.hh>
#include "zm_rtsp_server_h264_device_source.h"
// ---------------------------------
// H264 ZoneMinder FramedSource
// ---------------------------------
//
H264_ZoneMinderDeviceSource::H264_ZoneMinderDeviceSource(
UsageEnvironment& env,
Monitor *monitor,
AVStream *stream,
unsigned int queueSize,
bool repeatConfig,
bool keepMarker)
: H26X_ZoneMinderDeviceSource(env, monitor, stream, queueSize, repeatConfig, keepMarker)
{
// extradata appears to simply be the SPS and PPS NAL's
this->splitFrames(m_stream->codecpar->extradata, m_stream->codecpar->extradata_size);
}
// split packet into frames
std::list< std::pair<unsigned char*, size_t> > H264_ZoneMinderDeviceSource::splitFrames(unsigned char* frame, unsigned frameSize) {
std::list< std::pair<unsigned char*, size_t> > frameList;
size_t bufSize = frameSize;
size_t size = 0;
unsigned char* buffer = this->extractFrame(frame, bufSize, size);
while ( buffer != nullptr ) {
switch ( m_frameType & 0x1F ) {
case 7:
Debug(1, "SPS_Size: %d bufSize %d", size, bufSize);
m_sps.assign((char*)buffer, size);
break;
case 8:
Debug(1, "PPS_Size: %d bufSize %d", size, bufSize);
m_pps.assign((char*)buffer,size);
break;
case 5:
Debug(1, "IDR_Size: %d bufSize %d", size, bufSize);
if ( m_repeatConfig && !m_sps.empty() && !m_pps.empty() ) {
frameList.push_back(std::pair<unsigned char*,size_t>((unsigned char*)m_sps.c_str(), m_sps.size()));
frameList.push_back(std::pair<unsigned char*,size_t>((unsigned char*)m_pps.c_str(), m_pps.size()));
}
break;
default:
Debug(1, "Unknown frametype!? %d %d", m_frameType, m_frameType & 0x1F);
break;
}
if ( !m_sps.empty() && !m_pps.empty() ) {
u_int32_t profile_level_id = 0;
if ( m_sps.size() >= 4 ) profile_level_id = (m_sps[1]<<16)|(m_sps[2]<<8)|m_sps[3];
char* sps_base64 = base64Encode(m_sps.c_str(), m_sps.size());
char* pps_base64 = base64Encode(m_pps.c_str(), m_pps.size());
std::ostringstream os;
os << "profile-level-id=" << std::hex << std::setw(6) << std::setfill('0') << profile_level_id;
os << ";sprop-parameter-sets=" << sps_base64 << "," << pps_base64;
m_auxLine.assign(os.str());
Debug(1, "auxLine: %s", m_auxLine.c_str());
delete [] sps_base64;
delete [] pps_base64;
}
frameList.push_back(std::pair<unsigned char*,size_t>(buffer, size));
buffer = this->extractFrame(&buffer[size], bufSize, size);
} // end while buffer
return frameList;
}
H265_ZoneMinderDeviceSource::H265_ZoneMinderDeviceSource(
UsageEnvironment& env,
Monitor *monitor,
AVStream *stream,
unsigned int queueSize,
bool repeatConfig,
bool keepMarker)
: H26X_ZoneMinderDeviceSource(env, monitor, stream, queueSize, repeatConfig, keepMarker)
{
// extradata appears to simply be the SPS and PPS NAL's
this->splitFrames(m_stream->codecpar->extradata, m_stream->codecpar->extradata_size);
}
// split packet in frames
std::list< std::pair<unsigned char*,size_t> >
H265_ZoneMinderDeviceSource::splitFrames(unsigned char* frame, unsigned frameSize) {
std::list< std::pair<unsigned char*,size_t> > frameList;
size_t bufSize = frameSize;
size_t size = 0;
unsigned char* buffer = this->extractFrame(frame, bufSize, size);
while ( buffer != nullptr ) {
switch ((m_frameType&0x7E)>>1) {
case 32:
Debug(1, "VPS_Size: %d bufSize %d", size, bufSize);
m_vps.assign((char*)buffer,size);
break;
case 33:
Debug(1, "SPS_Size: %d bufSize %d", size, bufSize);
m_sps.assign((char*)buffer,size);
break;
case 34:
Debug(1, "PPS_Size: %d bufSize %d", size, bufSize);
m_pps.assign((char*)buffer,size);
break;
case 19:
case 20:
Debug(1, "IDR_Size: %d bufSize %d", size, bufSize);
if ( m_repeatConfig && !m_vps.empty() && !m_sps.empty() && !m_pps.empty() ) {
frameList.push_back(std::pair<unsigned char*,size_t>((unsigned char*)m_vps.c_str(), m_vps.size()));
frameList.push_back(std::pair<unsigned char*,size_t>((unsigned char*)m_sps.c_str(), m_sps.size()));
frameList.push_back(std::pair<unsigned char*,size_t>((unsigned char*)m_pps.c_str(), m_pps.size()));
}
break;
default:
Debug(1, "Unknown frametype!? %d %d", m_frameType, ((m_frameType & 0x7E) >> 1));
break;
}
if ( !m_vps.empty() && !m_sps.empty() && !m_pps.empty() ) {
char* vps_base64 = base64Encode(m_vps.c_str(), m_vps.size());
char* sps_base64 = base64Encode(m_sps.c_str(), m_sps.size());
char* pps_base64 = base64Encode(m_pps.c_str(), m_pps.size());
std::ostringstream os;
os << "sprop-vps=" << vps_base64;
os << ";sprop-sps=" << sps_base64;
os << ";sprop-pps=" << pps_base64;
m_auxLine.assign(os.str());
Debug(1, "Assigned auxLine to %s", m_auxLine.c_str());
delete [] vps_base64;
delete [] sps_base64;
delete [] pps_base64;
}
frameList.push_back(std::pair<unsigned char*,size_t>(buffer, size));
buffer = this->extractFrame(&buffer[size], bufSize, size);
} // end while buffer
if ( bufSize ) {
Debug(1, "%d bytes remaining", bufSize);
}
return frameList;
} // end H265_ZoneMinderDeviceSource::splitFrames(unsigned char* frame, unsigned frameSize)
unsigned char * H26X_ZoneMinderDeviceSource::findMarker(
unsigned char *frame, size_t size, size_t &length
) {
//Debug(1, "findMarker %p %d", frame, size);
unsigned char *start = nullptr;
for ( size_t i = 0; i < size-2; i += 1 ) {
//Debug(1, "%d: %d %d %d", i, frame[i], frame[i+1], frame[i+2]);
if ( (frame[i] == 0) and (frame[i+1]) == 0 and (frame[i+2] == 1) ) {
if ( i and (frame[i-1] == 0) ) {
start = frame + i - 1;
length = sizeof(H264marker);
} else {
start = frame + i;
length = sizeof(H264shortmarker);
}
break;
}
}
return start;
}
// extract a frame
unsigned char* H26X_ZoneMinderDeviceSource::extractFrame(unsigned char* frame, size_t& size, size_t& outsize) {
unsigned char *outFrame = nullptr;
Debug(1, "ExtractFrame: %p %d", frame, size);
outsize = 0;
size_t markerLength = 0;
size_t endMarkerLength = 0;
m_frameType = 0;
unsigned char *startFrame = nullptr;
if ( size >= 3 )
startFrame = this->findMarker(frame, size, markerLength);
if ( startFrame != nullptr ) {
Debug(1, "startFrame: %p marker Length %d", startFrame, markerLength);
m_frameType = startFrame[markerLength];
int remainingSize = size-(startFrame-frame+markerLength);
unsigned char *endFrame = nullptr;
if ( remainingSize > 3 ) {
endFrame = this->findMarker(startFrame+markerLength, remainingSize, endMarkerLength);
}
Debug(1, "endFrame: %p marker Length %d, remaining size %d", endFrame, endMarkerLength, remainingSize);
if ( m_keepMarker ) {
size -= startFrame-frame;
outFrame = startFrame;
} else {
size -= startFrame-frame+markerLength;
outFrame = &startFrame[markerLength];
}
if ( endFrame != nullptr ) {
outsize = endFrame - outFrame;
} else {
outsize = size;
}
size -= outsize;
Debug(1, "Have frame type: %d size %d, keepmarker %d", m_frameType, outsize, m_keepMarker);
} else if ( size >= sizeof(H264shortmarker) ) {
Info("No marker found");
}
return outFrame;
}

View File

@ -0,0 +1,103 @@
/* ---------------------------------------------------------------------------
** This software is in the public domain, furnished "as is", without technical
** support, and with no warranty, express or implied, as to its usefulness for
** any purpose.
**
** H264_ZoneMinderDeviceSource.h
**
** H264 ZoneMinder live555 source
**
** -------------------------------------------------------------------------*/
#ifndef H264_ZoneMinder_DEVICE_SOURCE
#define H264_ZoneMinder_DEVICE_SOURCE
#include "zm_rtsp_server_device_source.h"
#include "zm_rtsp_server_frame.h"
// ---------------------------------
// H264 ZoneMinder FramedSource
// ---------------------------------
class H26X_ZoneMinderDeviceSource : public ZoneMinderDeviceSource {
protected:
H26X_ZoneMinderDeviceSource(
UsageEnvironment& env,
Monitor *monitor,
AVStream *stream,
unsigned int queueSize,
bool repeatConfig,
bool keepMarker)
:
ZoneMinderDeviceSource(env, monitor, stream, queueSize),
m_repeatConfig(repeatConfig),
m_keepMarker(keepMarker),
m_frameType(0) { }
virtual ~H26X_ZoneMinderDeviceSource() {}
virtual unsigned char* extractFrame(unsigned char* frame, size_t& size, size_t& outsize);
virtual unsigned char* findMarker(unsigned char *frame, size_t size, size_t &length);
protected:
std::string m_sps;
std::string m_pps;
bool m_repeatConfig;
bool m_keepMarker;
int m_frameType;
};
class H264_ZoneMinderDeviceSource : public H26X_ZoneMinderDeviceSource {
public:
static H264_ZoneMinderDeviceSource* createNew(
UsageEnvironment& env,
Monitor *monitor,
AVStream *stream,
unsigned int queueSize,
bool repeatConfig,
bool keepMarker) {
return new H264_ZoneMinderDeviceSource(env, monitor, stream, queueSize, repeatConfig, keepMarker);
}
protected:
H264_ZoneMinderDeviceSource(
UsageEnvironment& env,
Monitor *monitor,
AVStream *stream,
unsigned int queueSize,
bool repeatConfig,
bool keepMarker);
// overide ZoneMinderDeviceSource
virtual std::list< std::pair<unsigned char*,size_t> > splitFrames(unsigned char* frame, unsigned frameSize);
};
class H265_ZoneMinderDeviceSource : public H26X_ZoneMinderDeviceSource {
public:
static H265_ZoneMinderDeviceSource* createNew(
UsageEnvironment& env,
Monitor *monitor,
AVStream *stream,
unsigned int queueSize,
bool repeatConfig,
bool keepMarker) {
return new H265_ZoneMinderDeviceSource(env, monitor, stream, queueSize, repeatConfig, keepMarker);
}
protected:
H265_ZoneMinderDeviceSource(
UsageEnvironment& env,
Monitor *monitor,
AVStream *stream,
unsigned int queueSize,
bool repeatConfig,
bool keepMarker);
// overide ZoneMinderDeviceSource
virtual std::list< std::pair<unsigned char*,size_t> > splitFrames(unsigned char* frame, unsigned frameSize);
protected:
std::string m_vps;
};
#endif

View File

@ -0,0 +1,106 @@
/* ---------------------------------------------------------------------------
**
** ServerMediaSubsession.cpp
**
** -------------------------------------------------------------------------*/
#include <sstream>
#include "zm_rtsp_server_server_media_subsession.h"
#include "zm_rtsp_server_device_source.h"
#include "zm_rtsp_server_adts_source.h"
// ---------------------------------
// BaseServerMediaSubsession
// ---------------------------------
FramedSource* BaseServerMediaSubsession::createSource(
UsageEnvironment& env, FramedSource* inputSource, const std::string& format)
{
FramedSource* source = nullptr;
if ( format == "video/MP2T" ) {
source = MPEG2TransportStreamFramer::createNew(env, inputSource);
} else if ( format == "video/H264" ) {
source = H264VideoStreamDiscreteFramer::createNew(env, inputSource);
}
#if LIVEMEDIA_LIBRARY_VERSION_INT > 1414454400
else if ( format == "video/H265" ) {
source = H265VideoStreamDiscreteFramer::createNew(env, inputSource);
}
#endif
#if 0
else if (format == "video/JPEG") {
source = MJPEGVideoSource::createNew(env, inputSource);
}
#endif
else {
source = inputSource;
}
return source;
}
/* source is generally a replica */
RTPSink* BaseServerMediaSubsession::createSink(
UsageEnvironment& env,
Groupsock* rtpGroupsock,
unsigned char rtpPayloadTypeIfDynamic,
const std::string& format,
FramedSource *source
) {
RTPSink* sink = nullptr;
if ( format == "video/MP2T" ) {
sink = SimpleRTPSink::createNew(env, rtpGroupsock, rtpPayloadTypeIfDynamic, 90000, "video", "MP2T", 1, true, false);
} else if ( format == "video/H264" ) {
sink = H264VideoRTPSink::createNew(env, rtpGroupsock, rtpPayloadTypeIfDynamic);
} else if ( format == "video/VP8" ) {
sink = VP8VideoRTPSink::createNew(env, rtpGroupsock, rtpPayloadTypeIfDynamic);
}
#if LIVEMEDIA_LIBRARY_VERSION_INT > 1414454400
else if ( format == "video/VP9" ) {
sink = VP9VideoRTPSink::createNew(env, rtpGroupsock, rtpPayloadTypeIfDynamic);
} else if ( format == "video/H265" ) {
sink = H265VideoRTPSink::createNew(env, rtpGroupsock, rtpPayloadTypeIfDynamic);
#endif
} else if ( format == "audio/AAC" ) {
ADTS_ZoneMinderDeviceSource *adts_source = (ADTS_ZoneMinderDeviceSource *)(m_replicator->inputSource());
sink = MPEG4GenericRTPSink::createNew(env, rtpGroupsock,
rtpPayloadTypeIfDynamic,
adts_source->samplingFrequency(),
"audio", "AAC-hbr",
adts_source->configStr(),
adts_source->numChannels()
);
} else {
Error("unknown format");
}
#if 0
else if (format == "video/JPEG") {
sink = JPEGVideoRTPSink::createNew (env, rtpGroupsock);
}
#endif
return sink;
}
char const* BaseServerMediaSubsession::getAuxLine(
ZoneMinderDeviceSource* source,
unsigned char rtpPayloadType
) {
const char* auxLine = nullptr;
if ( source ) {
std::ostringstream os;
os << "a=fmtp:" << int(rtpPayloadType) << " ";
os << source->getAuxLine();
os << "\r\n";
int width = source->getWidth();
int height = source->getHeight();
if ( (width > 0) && (height>0) ) {
os << "a=x-dimensions:" << width << "," << height << "\r\n";
}
auxLine = strdup(os.str().c_str());
Debug(1, "auxLine: %s", auxLine);
} else {
Error("No source auxLine: ");
}
return auxLine;
}

View File

@ -0,0 +1,46 @@
/* ---------------------------------------------------------------------------
** This software is in the public domain, furnished "as is", without technical
** support, and with no warranty, express or implied, as to its usefulness for
** any purpose.
**
** ServerMediaSubsession.h
**
** -------------------------------------------------------------------------*/
#pragma once
#include <sys/stat.h>
#include <string>
#include <iomanip>
#include <iostream>
#include <fstream>
#include <liveMedia.hh>
class ZoneMinderDeviceSource;
class BaseServerMediaSubsession {
public:
BaseServerMediaSubsession(StreamReplicator* replicator):
m_replicator(replicator) {};
FramedSource* createSource(
UsageEnvironment& env,
FramedSource * videoES,
const std::string& format);
RTPSink * createSink(
UsageEnvironment& env,
Groupsock * rtpGroupsock,
unsigned char rtpPayloadTypeIfDynamic,
const std::string& format,
FramedSource *source);
char const* getAuxLine(
ZoneMinderDeviceSource* source,
unsigned char rtpPayloadType);
protected:
StreamReplicator* m_replicator;
};

View File

@ -0,0 +1,157 @@
#include "zm_rtsp_server_thread.h"
#include "zm_rtsp_server_device_source.h"
#include "zm_rtsp_server_h264_device_source.h"
#include "zm_rtsp_server_adts_source.h"
#include "zm_rtsp_server_unicast_server_media_subsession.h"
#include <StreamReplicator.hh>
#include "zm.h"
#if HAVE_RTSP_SERVER
RTSPServerThread::RTSPServerThread(Monitor *p_monitor) :
monitor(p_monitor),
terminate(0)
{
//unsigned short rtsp_over_http_port = 0;
//const char *realm = "ZoneMinder";
//unsigned int timeout = 65;
OutPacketBuffer::maxSize = 2000000;
scheduler = BasicTaskScheduler::createNew();
env = BasicUsageEnvironment::createNew(*scheduler);
authDB = nullptr;
//authDB = new UserAuthenticationDatabase("ZoneMinder");
//authDB->addUserRecord("username1", "password1"); // replace these with real strings
portNumBits rtspServerPortNum = config.min_rtsp_port + monitor->Id();
rtspServer = RTSPServer::createNew(*env, rtspServerPortNum, authDB);
if ( rtspServer == nullptr ) {
Fatal("Failed to create rtspServer at port %d", rtspServerPortNum);
return;
}
const char *prefix = rtspServer->rtspURLPrefix();
Debug(1, "RTSP prefix is %s", prefix);
delete[] prefix;
} // end RTSPServerThread::RTSPServerThread
RTSPServerThread::~RTSPServerThread() {
if ( rtspServer ) {
Medium::close(rtspServer);
} // end if rtsp_server
while ( sources.size() ) {
FramedSource *source = sources.front();
sources.pop_front();
Medium::close(source);
}
env->reclaim();
delete scheduler;
}
int RTSPServerThread::run() {
Debug(1, "RTSPServerThread::run()");
if ( rtspServer )
env->taskScheduler().doEventLoop(&terminate); // does not return
Debug(1, "RTSPServerThread::done()");
return 0;
} // end in RTSPServerThread::run()
void RTSPServerThread::stop() {
Debug(1, "RTSPServerThread::stop()");
terminate = 1;
for ( std::list<FramedSource *>::iterator it = sources.begin(); it != sources.end(); ++it ) {
(*it)->stopGettingFrames();
}
} // end RTSPServerThread::stop()
bool RTSPServerThread::stopped() const {
return terminate ? true : false;
} // end RTSPServerThread::stopped()
void RTSPServerThread::addStream(AVStream *stream, AVStream *audio_stream) {
if ( !rtspServer )
return;
AVCodecID codec_id = stream->codecpar->codec_id;
std::string rtpFormat = getRtpFormat(codec_id, false);
Debug(1, "RTSP: format %s", rtpFormat.c_str());
if ( rtpFormat.empty() ) {
Error("No streaming format");
return;
}
int queueSize = 30;
bool repeatConfig = true;
bool muxTS = false;
ServerMediaSession *sms = nullptr;
if ( stream ) {
StreamReplicator* videoReplicator = nullptr;
FramedSource *source = nullptr;
if ( rtpFormat == "video/H264" ) {
source = H264_ZoneMinderDeviceSource::createNew(*env, monitor, stream, queueSize, repeatConfig, muxTS);
} else if ( rtpFormat == "video/H265" ) {
source = H265_ZoneMinderDeviceSource::createNew(*env, monitor, stream, queueSize, repeatConfig, muxTS);
}
if ( source == nullptr ) {
Error("Unable to create source");
} else {
videoReplicator = StreamReplicator::createNew(*env, source, false);
}
sources.push_back(source);
// Create Unicast Session
if ( videoReplicator ) {
if ( !sms )
sms = ServerMediaSession::createNew(*env, "streamname");
sms->addSubsession(UnicastServerMediaSubsession::createNew(*env, videoReplicator, rtpFormat));
}
}
if ( audio_stream ) {
StreamReplicator* replicator = nullptr;
FramedSource *source = nullptr;
rtpFormat = getRtpFormat(audio_stream->codecpar->codec_id, false);
if ( rtpFormat == "audio/AAC" ) {
source = ADTS_ZoneMinderDeviceSource::createNew(*env, monitor, audio_stream, queueSize);
Debug(1, "ADTS source %p", source);
}
if ( source ) {
replicator = StreamReplicator::createNew(*env, source, false /* deleteWhenLastReplicaDies */);
sources.push_back(source);
}
if ( replicator ) {
if ( !sms )
sms = ServerMediaSession::createNew(*env, "streamname");
sms->addSubsession(UnicastServerMediaSubsession::createNew(*env, replicator, rtpFormat));
}
} else {
Debug(1, "Not Adding auto stream");
}
rtspServer->addServerMediaSession(sms);
char *url = rtspServer->rtspURL(sms);
Debug(1, "url is %s", url);
delete[] url;
} // end void addStream
// -----------------------------------------
// convert V4L2 pix format to RTP mime
// -----------------------------------------
const std::string RTSPServerThread::getRtpFormat(AVCodecID codec_id, bool muxTS) {
if ( muxTS ) {
return "video/MP2T";
} else {
switch ( codec_id ) {
case AV_CODEC_ID_H265 : return "video/H265";
case AV_CODEC_ID_H264 : return "video/H264";
//case PIX_FMT_MJPEG: rtpFormat = "video/JPEG"; break;
//case PIX_FMT_JPEG : rtpFormat = "video/JPEG"; break;
//case AV_PIX_FMT_VP8 : rtpFormat = "video/VP8" ; break;
//case AV_PIX_FMT_VP9 : rtpFormat = "video/VP9" ; break;
case AV_CODEC_ID_AAC : return "audio/AAC";
default: break;
}
}
return "";
}
#endif

View File

@ -0,0 +1,45 @@
#include "zm.h"
#if HAVE_RTSP_SERVER
#ifndef ZM_RTSP_SERVER_THREAD_H
#define ZM_RTSP_SERVER_THREAD_H
#include "zm_thread.h"
#include <signal.h>
#include "zm_monitor.h"
#include <BasicUsageEnvironment.hh>
#include <RTSPServer.hh>
#include <libavcodec/codec_id.h>
#include <libavformat/avformat.h>
class RTSPServerThread : public Thread {
private:
Monitor *monitor;
char terminate;
TaskScheduler* scheduler;
UsageEnvironment* env;
UserAuthenticationDatabase* authDB;
RTSPServer* rtspServer;
std::list<FramedSource *> sources;
public:
explicit RTSPServerThread(Monitor *);
~RTSPServerThread();
void addStream(AVStream *, AVStream *);
int run();
void stop();
bool stopped() const;
private:
const std::string getRtpFormat(AVCodecID codec, bool muxTS);
int addSession(
const std::string & sessionName,
const std::list<ServerMediaSubsession*> & subSession
);
};
#endif
#endif

View File

@ -0,0 +1,47 @@
/* ---------------------------------------------------------------------------
** This software is in the public domain, furnished "as is", without technical
** support, and with no warranty, express or implied, as to its usefulness for
** any purpose.
**
** ServerMediaSubsession.cpp
**
** -------------------------------------------------------------------------*/
#include "zm_rtsp_server_unicast_server_media_subsession.h"
#include "zm_rtsp_server_device_source.h"
// -----------------------------------------
// ServerMediaSubsession for Unicast
// -----------------------------------------
UnicastServerMediaSubsession* UnicastServerMediaSubsession::createNew(
UsageEnvironment& env,
StreamReplicator* replicator,
//FramedSource *source,
const std::string& format
) {
return new UnicastServerMediaSubsession(env, replicator, format);
//return new UnicastServerMediaSubsession(env, replicator, source, format);
}
FramedSource* UnicastServerMediaSubsession::createNewStreamSource(
unsigned clientSessionId,
unsigned& estBitrate
) {
FramedSource* replica = m_replicator->createStreamReplica();
return createSource(envir(), replica, m_format);
}
RTPSink* UnicastServerMediaSubsession::createNewRTPSink(
Groupsock* rtpGroupsock,
unsigned char rtpPayloadTypeIfDynamic,
FramedSource* inputSource
) {
return createSink(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, m_format, inputSource);
}
char const* UnicastServerMediaSubsession::getAuxSDPLine(
RTPSink* rtpSink, FramedSource* inputSource
) {
return this->getAuxLine(dynamic_cast<ZoneMinderDeviceSource*>(m_replicator->inputSource()), rtpSink->rtpPayloadType());
}

View File

@ -0,0 +1,45 @@
/* ---------------------------------------------------------------------------
** This software is in the public domain, furnished "as is", without technical
** support, and with no warranty, express or implied, as to its usefulness for
** any purpose.
**
** ServerMediaSubsession.h
**
** -------------------------------------------------------------------------*/
#pragma once
#include "zm_rtsp_server_server_media_subsession.h"
// -----------------------------------------
// ServerMediaSubsession for Unicast
// -----------------------------------------
class UnicastServerMediaSubsession :
public OnDemandServerMediaSubsession,
public BaseServerMediaSubsession
{
public:
static UnicastServerMediaSubsession* createNew(
UsageEnvironment& env,
StreamReplicator* replicator,
const std::string& format);
protected:
UnicastServerMediaSubsession(
UsageEnvironment& env,
StreamReplicator* replicator,
const std::string& format)
:
OnDemandServerMediaSubsession(env, true
/* Boolean reuseFirstSource, portNumBits initialPortNum=6970, Boolean multiplexRTCPWithRTP=False */
),
BaseServerMediaSubsession(replicator),
m_format(format) {};
virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate);
virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource);
virtual char const* getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource);
protected:
const std::string m_format;
};

View File

@ -350,7 +350,7 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
AVCodecContext *codec_context = avcodec_alloc_context3(nullptr);
avcodec_parameters_to_context(codec_context, stream->codecpar);
//avcodec_parameters_to_context(codec_context, stream->codecpar);
stream->codec = codec_context;
#else
AVCodecContext *codec_context = stream->codec;
@ -376,9 +376,6 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const {
#endif
else
Warning("Unknown media_type %s", type.c_str());
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
stream->codecpar->codec_type = codec_context->codec_type;
#endif
#if LIBAVCODEC_VERSION_CHECK(55, 50, 3, 60, 103)
std::string codec_name;
@ -417,6 +414,7 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const {
}
} /// end if static or dynamic
#if LIBAVCODEC_VERSION_CHECK(55, 50, 3, 60, 103)
if ( codec_name.empty() )
#else
@ -425,7 +423,6 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const {
{
Warning( "Can't find payload details for %s payload type %d, name %s",
mediaDesc->getType().c_str(), mediaDesc->getPayloadType(), mediaDesc->getPayloadDesc().c_str() );
//return( 0 );
}
if ( mediaDesc->getWidth() )
codec_context->width = mediaDesc->getWidth();
@ -439,7 +436,7 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const {
strcpy(pvalue, mediaDesc->getSprops().c_str());
while (*value) {
while ( *value ) {
char base64packet[1024];
uint8_t decoded_packet[1024];
uint32_t packet_size;
@ -454,9 +451,9 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const {
if ( *value == ',' )
value++;
packet_size= av_base64_decode(decoded_packet, (const char *)base64packet, (int)sizeof(decoded_packet));
packet_size = av_base64_decode(decoded_packet, (const char *)base64packet, (int)sizeof(decoded_packet));
Hexdump(4, (char *)decoded_packet, packet_size);
if (packet_size) {
if ( packet_size ) {
uint8_t *dest =
(uint8_t *)av_malloc(packet_size + sizeof(start_sequence) +
codec_context->extradata_size +
@ -493,7 +490,10 @@ AVFormatContext *SessionDescriptor::generateFormatContext() const {
}
}
}
}
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
avcodec_parameters_from_context(stream->codecpar, codec_context);
#endif
} // end foreach mediaList
return formatContext;
}

View File

@ -33,7 +33,13 @@ class Monitor;
class StreamBase {
public:
typedef enum { STREAM_JPEG, STREAM_RAW, STREAM_ZIP, STREAM_SINGLE, STREAM_MPEG } StreamType;
typedef enum {
STREAM_JPEG,
STREAM_RAW,
STREAM_ZIP,
STREAM_SINGLE,
STREAM_MPEG
} StreamType;
protected:
static const int MAX_STREAM_DELAY = 5; // Seconds
@ -57,8 +63,33 @@ protected:
char msg_data[256];
} DataMsg;
typedef enum { MSG_CMD=1, MSG_DATA_WATCH, MSG_DATA_EVENT } MsgType;
typedef enum { CMD_NONE=0, CMD_PAUSE, CMD_PLAY, CMD_STOP, CMD_FASTFWD, CMD_SLOWFWD, CMD_SLOWREV, CMD_FASTREV, CMD_ZOOMIN, CMD_ZOOMOUT, CMD_PAN, CMD_SCALE, CMD_PREV, CMD_NEXT, CMD_SEEK, CMD_VARPLAY, CMD_GET_IMAGE, CMD_QUIT, CMD_QUERY=99 } MsgCommand;
typedef enum {
MSG_CMD=1,
MSG_DATA_WATCH,
MSG_DATA_EVENT
} MsgType;
typedef enum {
CMD_NONE=0,
CMD_PAUSE,
CMD_PLAY,
CMD_STOP,
CMD_FASTFWD,
CMD_SLOWFWD,
CMD_SLOWREV,
CMD_FASTREV,
CMD_ZOOMIN,
CMD_ZOOMOUT,
CMD_PAN,
CMD_SCALE,
CMD_PREV,
CMD_NEXT,
CMD_SEEK,
CMD_VARPLAY,
CMD_GET_IMAGE,
CMD_QUIT,
CMD_QUERY=99
} MsgCommand;
protected:
int monitor_id;
@ -109,7 +140,6 @@ protected:
bool checkInitialised();
void updateFrameRate(double fps);
Image *prepareImage(Image *image);
bool sendTextFrame(const char *text);
bool checkCommandQueue();
virtual void processCommand(const CmdMsg *msg)=0;
@ -188,6 +218,7 @@ public:
void setStreamQueue(int p_connkey) {
connkey = p_connkey;
}
bool sendTextFrame(const char *text);
virtual void openComms();
virtual void closeComms();
virtual void runStream()=0;

View File

@ -25,8 +25,7 @@
#if HAVE_LIBSWSCALE && HAVE_LIBAVUTIL
SWScale::SWScale() : gotdefaults(false), swscale_ctx(nullptr), input_avframe(nullptr), output_avframe(nullptr) {
Debug(4,"SWScale object created");
Debug(4, "SWScale object created");
}
bool SWScale::init() {
@ -68,10 +67,14 @@ SWScale::~SWScale() {
swscale_ctx = nullptr;
}
Debug(4,"SWScale object destroyed");
Debug(4, "SWScale object destroyed");
}
int SWScale::SetDefaults(enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
int SWScale::SetDefaults(
enum _AVPIXELFORMAT in_pf,
enum _AVPIXELFORMAT out_pf,
unsigned int width,
unsigned int height) {
/* Assign the defaults */
default_input_pf = in_pf;
@ -84,6 +87,48 @@ int SWScale::SetDefaults(enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf,
return 0;
}
int SWScale::Convert(
AVFrame *in_frame,
AVFrame *out_frame
) {
// THe J formats are deprecated, so we need to convert
AVPixelFormat format;
switch ( in_frame->format ) {
case AV_PIX_FMT_YUVJ420P :
format = AV_PIX_FMT_YUV420P;
break;
case AV_PIX_FMT_YUVJ422P :
format = AV_PIX_FMT_YUV422P;
break;
case AV_PIX_FMT_YUVJ444P :
format = AV_PIX_FMT_YUV444P;
break;
case AV_PIX_FMT_YUVJ440P :
format = AV_PIX_FMT_YUV440P;
break;
default:
format = (AVPixelFormat)in_frame->format;
break;
}
/* Get the context */
swscale_ctx = sws_getCachedContext(swscale_ctx,
in_frame->width, in_frame->height, format,
out_frame->width, out_frame->height, (AVPixelFormat)out_frame->format,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if ( swscale_ctx == NULL ) {
Error("Failed getting swscale context");
return -6;
}
/* Do the conversion */
if ( !sws_scale(swscale_ctx, in_frame->data, in_frame->linesize, 0, in_frame->height, out_frame->data, out_frame->linesize ) ) {
Error("swscale conversion failed");
return -10;
}
return 0;
}
int SWScale::Convert(
const uint8_t* in_buffer,
const size_t in_buffer_size,
@ -96,6 +141,8 @@ int SWScale::Convert(
unsigned int new_width,
unsigned int new_height
) {
Debug(1, "Convert: in_buffer %p in_buffer_size %d out_buffer %p size %d width %d height %d width %d height %d",
in_buffer, in_buffer_size, out_buffer, out_buffer_size, width, height, new_width, new_height);
/* Parameter checking */
if ( in_buffer == nullptr ) {
Error("NULL Input buffer");
@ -109,11 +156,29 @@ int SWScale::Convert(
// Error("Invalid input or output pixel formats");
// return -2;
// }
if (!width || !height || !new_height || !new_width) {
if ( !width || !height || !new_height || !new_width ) {
Error("Invalid width or height");
return -3;
}
// THe J formats are deprecated, so we need to convert
switch ( in_pf ) {
case AV_PIX_FMT_YUVJ420P :
in_pf = AV_PIX_FMT_YUV420P;
break;
case AV_PIX_FMT_YUVJ422P :
in_pf = AV_PIX_FMT_YUV422P;
break;
case AV_PIX_FMT_YUVJ444P :
in_pf = AV_PIX_FMT_YUV444P;
break;
case AV_PIX_FMT_YUVJ440P :
in_pf = AV_PIX_FMT_YUV440P;
break;
default:
break;
}
#if LIBSWSCALE_VERSION_CHECK(0, 8, 0, 8, 0)
/* Warn if the input or output pixelformat is not supported */
if ( !sws_isSupportedInput(in_pf) ) {
@ -128,7 +193,7 @@ int SWScale::Convert(
/* Check the buffer sizes */
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
size_t insize = av_image_get_buffer_size(in_pf, width, height, 1);
size_t insize = av_image_get_buffer_size(in_pf, width, height, 32);
#else
size_t insize = avpicture_get_size(in_pf, width, height);
#endif
@ -137,7 +202,7 @@ int SWScale::Convert(
return -4;
}
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
size_t outsize = av_image_get_buffer_size(out_pf, new_width, new_height, 1);
size_t outsize = av_image_get_buffer_size(out_pf, new_width, new_height, 32);
#else
size_t outsize = avpicture_get_size(out_pf, new_width, new_height);
#endif
@ -148,7 +213,9 @@ int SWScale::Convert(
}
/* Get the context */
swscale_ctx = sws_getCachedContext( swscale_ctx, width, height, in_pf, new_width, new_height, out_pf, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr );
swscale_ctx = sws_getCachedContext(
swscale_ctx, width, height, in_pf, new_width, new_height,
out_pf, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
if ( swscale_ctx == nullptr ) {
Error("Failed getting swscale context");
return -6;
@ -156,7 +223,7 @@ int SWScale::Convert(
/* Fill in the buffers */
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
if (av_image_fill_arrays(input_avframe->data, input_avframe->linesize,
if ( av_image_fill_arrays(input_avframe->data, input_avframe->linesize,
(uint8_t*) in_buffer, in_pf, width, height, 1) <= 0) {
#else
if (avpicture_fill((AVPicture*) input_avframe, (uint8_t*) in_buffer,
@ -166,10 +233,10 @@ int SWScale::Convert(
return -7;
}
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
if (av_image_fill_arrays(output_avframe->data, output_avframe->linesize,
if ( av_image_fill_arrays(output_avframe->data, output_avframe->linesize,
out_buffer, out_pf, new_width, new_height, 1) <= 0) {
#else
if (avpicture_fill((AVPicture*) output_avframe, out_buffer, out_pf, new_width,
if ( avpicture_fill((AVPicture*) output_avframe, out_buffer, out_pf, new_width,
new_height) <= 0) {
#endif
Error("Failed filling output frame with output buffer");
@ -177,7 +244,9 @@ int SWScale::Convert(
}
/* Do the conversion */
if(!sws_scale(swscale_ctx, input_avframe->data, input_avframe->linesize, 0, height, output_avframe->data, output_avframe->linesize ) ) {
if ( !sws_scale(swscale_ctx,
input_avframe->data, input_avframe->linesize,
0, height, output_avframe->data, output_avframe->linesize ) ) {
Error("swscale conversion failed");
return -10;
}
@ -185,27 +254,42 @@ int SWScale::Convert(
return 0;
}
int SWScale::Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
int SWScale::Convert(
const uint8_t* in_buffer,
const size_t in_buffer_size,
uint8_t* out_buffer,
const size_t out_buffer_size,
enum _AVPIXELFORMAT in_pf,
enum _AVPIXELFORMAT out_pf,
unsigned int width,
unsigned int height) {
return Convert(in_buffer, in_buffer_size, out_buffer, out_buffer_size, in_pf, out_pf, width, height, width, height);
}
int SWScale::Convert(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
if(img->Width() != width) {
Error("Source image width differs. Source: %d Output: %d",img->Width(), width);
int SWScale::Convert(
const Image* img,
uint8_t* out_buffer,
const size_t out_buffer_size,
enum _AVPIXELFORMAT in_pf,
enum _AVPIXELFORMAT out_pf,
unsigned int width,
unsigned int height) {
if ( img->Width() != width ) {
Error("Source image width differs. Source: %d Output: %d", img->Width(), width);
return -12;
}
if(img->Height() != height) {
Error("Source image height differs. Source: %d Output: %d",img->Height(), height);
if ( img->Height() != height ) {
Error("Source image height differs. Source: %d Output: %d", img->Height(), height);
return -13;
}
return Convert(img->Buffer(),img->Size(),out_buffer,out_buffer_size,in_pf,out_pf,width,height);
return Convert(img->Buffer(), img->Size(), out_buffer, out_buffer_size, in_pf, out_pf, width, height);
}
int SWScale::ConvertDefaults(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size) {
if(!gotdefaults) {
if ( !gotdefaults ) {
Error("Defaults are not set");
return -24;
}
@ -215,7 +299,7 @@ int SWScale::ConvertDefaults(const Image* img, uint8_t* out_buffer, const size_t
int SWScale::ConvertDefaults(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size) {
if(!gotdefaults) {
if ( !gotdefaults ) {
Error("Defaults are not set");
return -24;
}

View File

@ -14,6 +14,7 @@ class SWScale {
int SetDefaults(enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height);
int ConvertDefaults(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size);
int ConvertDefaults(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size);
int Convert( AVFrame *in_frame, AVFrame *out_frame );
int Convert(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height);
int Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height);
int Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height, unsigned int new_width, unsigned int new_height);

View File

@ -66,33 +66,35 @@ int Mutex::trylock() {
}
void Mutex::lock() {
if ( pthread_mutex_lock(&mMutex) < 0 )
throw ThreadException( stringtf( "Unable to lock pthread mutex: %s", strerror(errno) ) );
throw ThreadException(stringtf("Unable to lock pthread mutex: %s", strerror(errno)));
//Debug(3, "Lock");
}
void Mutex::lock( int secs ) {
struct timespec timeout = getTimeout( secs );
if ( pthread_mutex_timedlock( &mMutex, &timeout ) < 0 )
throw ThreadException( stringtf( "Unable to timedlock pthread mutex: %s", strerror(errno) ) );
struct timespec timeout = getTimeout(secs);
if ( pthread_mutex_timedlock(&mMutex, &timeout) < 0 )
throw ThreadException(stringtf("Unable to timedlock pthread mutex: %s", strerror(errno)));
}
void Mutex::lock( double secs ) {
struct timespec timeout = getTimeout( secs );
if ( pthread_mutex_timedlock( &mMutex, &timeout ) < 0 )
throw ThreadException( stringtf( "Unable to timedlock pthread mutex: %s", strerror(errno) ) );
struct timespec timeout = getTimeout(secs);
if ( pthread_mutex_timedlock(&mMutex, &timeout) < 0 )
throw ThreadException(stringtf("Unable to timedlock pthread mutex: %s", strerror(errno)));
}
void Mutex::unlock() {
if ( pthread_mutex_unlock( &mMutex ) < 0 )
throw ThreadException( stringtf( "Unable to unlock pthread mutex: %s", strerror(errno) ) );
if ( pthread_mutex_unlock(&mMutex) < 0 )
throw ThreadException(stringtf("Unable to unlock pthread mutex: %s", strerror(errno)));
//Debug(3, "unLock");
}
bool Mutex::locked() {
int state = pthread_mutex_trylock( &mMutex );
if ( state != 0 && state != EBUSY )
throw ThreadException( stringtf( "Unable to trylock pthread mutex: %s", strerror(errno) ) );
int state = pthread_mutex_trylock(&mMutex);
if ( (state != 0) && (state != EBUSY) )
throw ThreadException(stringtf("Unable to trylock pthread mutex: %s", strerror(errno)));
if ( state != EBUSY )
unlock();
return( state == EBUSY );
return (state == EBUSY);
}
RecursiveMutex::RecursiveMutex() {
@ -105,8 +107,8 @@ RecursiveMutex::RecursiveMutex() {
}
Condition::Condition( Mutex &mutex ) : mMutex( mutex ) {
if ( pthread_cond_init( &mCondition, nullptr ) < 0 )
throw ThreadException( stringtf( "Unable to create pthread condition: %s", strerror(errno) ) );
if ( pthread_cond_init(&mCondition, nullptr) < 0 )
throw ThreadException(stringtf("Unable to create pthread condition: %s", strerror(errno)));
}
Condition::~Condition() {
@ -236,8 +238,10 @@ Thread::Thread() :
Thread::~Thread() {
Debug( 1, "Destroying thread %d", mPid );
if ( mStarted )
if ( mStarted ) {
Warning("You should really join the thread before destroying it");
join();
}
}
void *Thread::mThreadFunc( void *arg ) {
@ -251,6 +255,7 @@ void *Thread::mThreadFunc( void *arg ) {
thisPtr->mThreadCondition.signal();
thisPtr->mThreadMutex.unlock();
thisPtr->mRunning = true;
Debug(2,"Runnning");
thisPtr->status = thisPtr->run();
thisPtr->mRunning = false;
Debug( 2, "Exiting thread, status %p", (void *)&(thisPtr->status) );
@ -264,9 +269,9 @@ void *Thread::mThreadFunc( void *arg ) {
}
void Thread::start() {
Debug( 1, "Starting thread" );
Debug(4, "Starting thread" );
if ( isThread() )
throw ThreadException( "Can't self start thread" );
throw ThreadException("Can't self start thread");
mThreadMutex.lock();
if ( !mStarted ) {
pthread_attr_t threadAttrs;
@ -282,11 +287,11 @@ void Thread::start() {
}
mThreadCondition.wait();
mThreadMutex.unlock();
Debug( 1, "Started thread %d", mPid );
Debug(4, "Started thread %d", mPid);
}
void Thread::join() {
Debug( 1, "Joining thread %d", mPid );
Debug(1, "Joining thread %d", mPid);
if ( isThread() )
throw ThreadException( "Can't self join thread" );
mThreadMutex.lock();

View File

@ -233,7 +233,7 @@ User *zmLoadAuthUser(const char *auth, bool use_remote_addr) {
}
}
Debug(1, "Attempting to authenticate user from auth string '%s'", auth);
Debug(1, "Attempting to authenticate user from auth string '%s', remote addr(%s)", auth, remote_addr);
char sql[ZM_SQL_SML_BUFSIZ] = "";
snprintf(sql, sizeof(sql),
"SELECT `Id`, `Username`, `Password`, `Enabled`,"
@ -257,13 +257,14 @@ User *zmLoadAuthUser(const char *auth, bool use_remote_addr) {
return nullptr;
}
// getting the time is expensive, so only do it once.
time_t now = time(nullptr);
unsigned int hours = config.auth_hash_ttl;
if ( ! hours ) {
if ( !hours ) {
Warning("No value set for ZM_AUTH_HASH_TTL. Defaulting to 2.");
hours = 2;
} else {
Debug(1, "AUTH_HASH_TTL is %d", hours);
Debug(1, "AUTH_HASH_TTL is %d, time is %d", hours, now);
}
char auth_key[512] = "";
char auth_md5[32+1] = "";

View File

@ -96,6 +96,16 @@ bool startsWith(const std::string &haystack, const std::string &needle) {
return ( haystack.substr(0, needle.length()) == needle );
}
std::vector<std::string> split(const std::string &s, char delim) {
std::vector<std::string> elems;
std::stringstream ss(s);
std::string item;
while(std::getline(ss, item, delim)) {
elems.push_back(trimSpaces(item));
}
return elems;
}
StringVector split(const std::string &string, const std::string &chars, int limit) {
StringVector stringVector;
std::string tempString = string;

View File

@ -553,8 +553,8 @@ int ParseEncoderParameters(
}
valueoffset = line.find('=');
if ( valueoffset == std::string::npos || valueoffset+1 >= line.length() || valueoffset == 0 ) {
Warning("Failed parsing encoder parameters line %d: Invalid pair", lineno);
if ( valueoffset == std::string::npos || (valueoffset+1 >= line.length()) || (valueoffset == 0) ) {
Warning("Failed parsing encoder parameters line %d %s: Invalid pair", lineno, line.c_str());
continue;
}

File diff suppressed because it is too large Load Diff

View File

@ -15,36 +15,55 @@ extern "C" {
#if HAVE_LIBAVCODEC
class VideoStore;
#include "zm_monitor.h"
#include "zm_packet.h"
#include "zm_packetqueue.h"
#include "zm_swscale.h"
class VideoStore {
private:
private:
struct CodecData {
const AVCodecID codec_id;
const char *codec_codec;
const char *codec_name;
const enum AVPixelFormat pix_fmt;
};
static struct CodecData codec_data[];
Monitor *monitor;
AVOutputFormat *out_format;
AVFormatContext *oc;
AVStream *video_out_stream;
AVStream *audio_out_stream;
int video_in_stream_index;
int audio_in_stream_index;
AVCodec *video_out_codec;
AVCodecContext *video_in_ctx;
AVCodecContext *video_out_ctx;
AVStream *video_out_stream;
AVStream *video_in_stream;
AVStream *audio_in_stream;
Monitor *monitor;
const AVCodec *audio_in_codec;
AVCodecContext *audio_in_ctx;
// The following are used when encoding the audio stream to AAC
AVCodec *audio_out_codec;
AVCodecContext *audio_out_ctx;
// Move this into the object so that we aren't constantly allocating/deallocating it on the stack
AVPacket opkt;
// we are transcoding
AVFrame *video_in_frame;
AVFrame *in_frame;
AVFrame *out_frame;
AVCodecContext *video_in_ctx;
const AVCodec *audio_in_codec;
AVCodecContext *audio_in_ctx;
SWScale swscale;
unsigned int packets_written;
unsigned int frame_count;
// The following are used when encoding the audio stream to AAC
AVStream *audio_out_stream;
AVCodec *audio_out_codec;
AVCodecContext *audio_out_ctx;
#ifdef HAVE_LIBSWRESAMPLE
SwrContext *resample_ctx;
#else
@ -59,6 +78,8 @@ private:
const char *format;
// These are for in
int64_t video_start_pts;
int64_t video_last_pts;
int64_t video_last_dts;
int64_t audio_last_pts;
@ -83,13 +104,20 @@ public:
const char *filename_in,
const char *format_in,
AVStream *video_in_stream,
AVCodecContext *video_in_ctx,
AVStream *audio_in_stream,
AVCodecContext *audio_in_ctx,
Monitor * p_monitor);
bool open();
~VideoStore();
bool open();
int writeVideoFramePacket( AVPacket *pkt );
int writeAudioFramePacket( AVPacket *pkt );
void write_video_packet( AVPacket &pkt );
void write_audio_packet( AVPacket &pkt );
int writeVideoFramePacket( ZMPacket *pkt );
int writeAudioFramePacket( ZMPacket *pkt );
int writePacket( ZMPacket *pkt );
int write_packets( zm_packetqueue &queue );
void flush_codecs();
};
#endif //havelibav

View File

@ -844,12 +844,11 @@ int Zone::Load(Monitor *monitor, Zone **&zones) {
}
MYSQL_RES *result = mysql_store_result(&dbconn);
db_mutex.unlock();
if ( !result ) {
Error("Can't use query result: %s", mysql_error(&dbconn));
db_mutex.unlock();
return 0;
}
db_mutex.unlock();
int n_zones = mysql_num_rows(result);
Debug(1, "Got %d zones for monitor %s", n_zones, monitor->Name());
delete[] zones;

View File

@ -70,6 +70,8 @@ possible, this should run at more or less constant speed.
#include "zm_time.h"
#include "zm_signal.h"
#include "zm_monitor.h"
#include "zm_analysis_thread.h"
#include "zm_rtsp_server_thread.h"
void Usage() {
fprintf(stderr, "zmc -d <device_path> or -r <proto> -H <host> -P <port> -p <path> or -f <file_path> or -m <monitor_id>\n");
@ -240,11 +242,15 @@ int main(int argc, char *argv[]) {
result = 0;
static char sql[ZM_SQL_SML_BUFSIZ];
for ( int i = 0; i < n_monitors; i++ ) {
if ( ! monitors[i]->getCamera() ) {
}
if ( ! monitors[i]->connect() ) {
}
time_t now = (time_t)time(nullptr);
monitors[i]->setStartupTime(now);
snprintf(sql, sizeof(sql),
"INSERT INTO Monitor_Status (MonitorId,Status) VALUES (%d, 'Running') ON DUPLICATE KEY UPDATE Status='Running'",
"INSERT INTO Monitor_Status (MonitorId,Status,CaptureFPS,AnalysisFPS) VALUES (%d, 'Running',0,0) ON DUPLICATE KEY UPDATE Status='Running',CaptureFPS=0,AnalysisFPS=0",
monitors[i]->Id());
if ( mysql_query(&dbconn, sql) ) {
Error("Can't run query: %s", mysql_error(&dbconn));
@ -252,95 +258,129 @@ int main(int argc, char *argv[]) {
} // end foreach monitor
// Outer primary loop, handles connection to camera
if ( monitors[0]->PrimeCapture() < 0 ) {
if ( monitors[0]->PrimeCapture() <= 0 ) {
if ( prime_capture_log_count % 60 ) {
Error("Failed to prime capture of initial monitor");
} else {
Debug(1, "Failed to prime capture of initial monitor");
}
prime_capture_log_count ++;
if ( !zm_terminate )
sleep(10);
monitors[0]->disconnect();
if ( !zm_terminate ) {
Debug(1, "Sleeping");
sleep(5);
}
continue;
}
int *capture_delays = new int[n_monitors];
int *alarm_capture_delays = new int[n_monitors];
int *next_delays = new int[n_monitors];
struct timeval * last_capture_times = new struct timeval[n_monitors];
for ( int i = 0; i < n_monitors; i++ ) {
last_capture_times[i].tv_sec = last_capture_times[i].tv_usec = 0;
capture_delays[i] = monitors[i]->GetCaptureDelay();
alarm_capture_delays[i] = monitors[i]->GetAlarmCaptureDelay();
snprintf(sql, sizeof(sql),
"INSERT INTO Monitor_Status (MonitorId,Status) VALUES (%d, 'Connected') ON DUPLICATE KEY UPDATE Status='Connected'",
monitors[i]->Id());
if ( mysql_query(&dbconn, sql) ) {
Error("Can't run query: %s", mysql_error(&dbconn));
}
}
#if HAVE_RTSP_SERVER
RTSPServerThread ** rtsp_server_threads = nullptr;
if ( config.min_rtsp_port ) {
rtsp_server_threads = new RTSPServerThread *[n_monitors];
Debug(1, "Starting RTSP server because min_rtsp_port is set");
} else {
Debug(1, "Not starting RTSP server because min_rtsp_port not set");
}
#endif
AnalysisThread **analysis_threads = new AnalysisThread *[n_monitors];
int *capture_delays = new int[n_monitors];
int *alarm_capture_delays = new int[n_monitors];
struct timeval * last_capture_times = new struct timeval[n_monitors];
for ( int i = 0; i < n_monitors; i++ ) {
last_capture_times[i].tv_sec = last_capture_times[i].tv_usec = 0;
capture_delays[i] = monitors[i]->GetCaptureDelay();
alarm_capture_delays[i] = monitors[i]->GetAlarmCaptureDelay();
Debug(2, "capture delay(%u mSecs 1000/capture_fps) alarm delay(%u)",
capture_delays[i], alarm_capture_delays[i]);
Monitor::Function function = monitors[0]->GetFunction();
if ( function != Monitor::MONITOR ) {
Debug(1, "Starting an analysis thread for monitor (%d)", monitors[i]->Id());
analysis_threads[i] = new AnalysisThread(monitors[i]);
analysis_threads[i]->start();
} else {
analysis_threads[i] = NULL;
}
#if HAVE_RTSP_SERVER
if ( rtsp_server_threads ) {
for ( int i = 0; i < n_monitors; i++ ) {
rtsp_server_threads[i] = new RTSPServerThread(monitors[i]);
Camera *camera = monitors[i]->getCamera();
rtsp_server_threads[i]->addStream(camera->get_VideoStream(), camera->get_AudioStream());
rtsp_server_threads[i]->start();
}
}
#endif
} // end foreach monitor
struct timeval now;
struct DeltaTimeval delta_time;
while ( !zm_terminate ) {
//sigprocmask(SIG_BLOCK, &block_set, 0);
for ( int i = 0; i < n_monitors; i++ ) {
long min_delay = MAXINT;
gettimeofday(&now, nullptr);
for ( int j = 0; j < n_monitors; j++ ) {
if ( last_capture_times[j].tv_sec ) {
DELTA_TIMEVAL(delta_time, now, last_capture_times[j], DT_PREC_3);
if ( monitors[i]->GetState() == Monitor::ALARM )
next_delays[j] = alarm_capture_delays[j]-delta_time.delta;
else
next_delays[j] = capture_delays[j]-delta_time.delta;
if ( next_delays[j] < 0 )
next_delays[j] = 0;
} else {
next_delays[j] = 0;
}
if ( next_delays[j] <= min_delay ) {
min_delay = next_delays[j];
}
} // end foreach monitor
monitors[i]->CheckAction();
if ( next_delays[i] <= min_delay || next_delays[i] <= 0 ) {
if ( monitors[i]->PreCapture() < 0 ) {
Error("Failed to pre-capture monitor %d %s (%d/%d)",
Error("Failed to pre-capture monitor %d %d (%d/%d)",
monitors[i]->Id(), monitors[i]->Name(), i+1, n_monitors);
monitors[i]->Close();
result = -1;
break;
}
if ( monitors[i]->Capture() < 0 ) {
Info("Failed to capture image from monitor %d %s (%d/%d)",
Error("Failed to capture image from monitor %d %s (%d/%d)",
monitors[i]->Id(), monitors[i]->Name(), i+1, n_monitors);
monitors[i]->Close();
result = -1;
break;
}
if ( monitors[i]->PostCapture() < 0 ) {
Error("Failed to post-capture monitor %d %s (%d/%d)",
monitors[i]->Id(), monitors[i]->Name(), i+1, n_monitors);
monitors[i]->Close();
result = -1;
break;
}
if ( next_delays[i] > 0 ) {
gettimeofday(&now, nullptr);
// capture_delay is the amount of time we should sleep to achieve the desired framerate.
int delay = monitors[i]->GetState() == Monitor::ALARM ? alarm_capture_delays[i] : capture_delays[i];
if ( delay && last_capture_times[i].tv_sec ) {
int sleep_time;
DELTA_TIMEVAL(delta_time, now, last_capture_times[i], DT_PREC_3);
long sleep_time = next_delays[i]-delta_time.delta;
sleep_time = delay - delta_time.delta;
Debug(3, "Sleep time is %d from now:%d.%d last:%d.%d delay: %d",
sleep_time,
now.tv_sec, now.tv_usec,
last_capture_times[i].tv_sec, last_capture_times[i].tv_usec,
delay
);
if ( sleep_time < 0 )
sleep_time = 0;
if ( sleep_time > 0 ) {
Debug(2,"usleeping (%d)", sleep_time*(DT_MAXGRAN/DT_PREC_3) );
usleep(sleep_time*(DT_MAXGRAN/DT_PREC_3));
}
}
gettimeofday(&(last_capture_times[i]), nullptr);
} // end if next_delay <= min_delay || next_delays[i] <= 0 )
} // end if has a last_capture time
last_capture_times[i] = now;
} // end foreach n_monitors
//sigprocmask(SIG_UNBLOCK, &block_set, 0);
if ( result < 0 ) {
// Failure, try reconnecting
break;
}
if ( zm_reload ) {
for ( int i = 0; i < n_monitors; i++ ) {
monitors[i]->Reload();
@ -348,19 +388,57 @@ int main(int argc, char *argv[]) {
logTerm();
logInit(log_id_string);
zm_reload = false;
} // end if zm_reload
} // end while ! zm_terminate and connected
// Killoff the analysis threads. Don't need them spinning while we try to reconnect
for ( int i = 0; i < n_monitors; i++ ) {
if ( analysis_threads[i] ) {
analysis_threads[i]->stop();
}
if ( result < 0 ) {
// Failure, try reconnecting
sleep(5);
break;
#if HAVE_RTSP_SERVER
if ( rtsp_server_threads ) {
rtsp_server_threads[i]->stop();
}
} // end while ! zm_terminate
#endif
}
for ( int i = 0; i < n_monitors; i++ ) {
monitors[i]->Close();
}
// Killoff the analysis threads. Don't need them spinning while we try to reconnect
for ( int i = 0; i < n_monitors; i++ ) {
if ( analysis_threads[i] ) {
analysis_threads[i]->join();
delete analysis_threads[i];
analysis_threads[i] = nullptr;
}
} // end foreach monitor
delete [] analysis_threads;
#if HAVE_RTSP_SERVER
if ( rtsp_server_threads ) {
for ( int i = 0; i < n_monitors; i++ ) {
rtsp_server_threads[i]->join();;
delete rtsp_server_threads[i];
rtsp_server_threads[i] = nullptr;
}
delete[] rtsp_server_threads;
rtsp_server_threads = nullptr;
}
#endif
delete [] alarm_capture_delays;
delete [] capture_delays;
delete [] next_delays;
delete [] last_capture_times;
if ( result < 0 ) {
// Failure, try reconnecting
Debug(1, "Sleeping for 5");
sleep(5);
}
} // end while ! zm_terminate outer connection loop
Debug(1,"Updating Monitor status");
for ( int i = 0; i < n_monitors; i++ ) {
static char sql[ZM_SQL_SML_BUFSIZ];
snprintf(sql, sizeof(sql),
@ -374,6 +452,7 @@ int main(int argc, char *argv[]) {
delete [] monitors;
Image::Deinitialise();
Debug(1,"terminating");
logTerm();
zmDbClose();

View File

@ -262,7 +262,13 @@ int main(int argc, const char *argv[], char **envp) {
stream.setStreamTTL(ttl);
stream.setStreamQueue(connkey);
stream.setStreamBuffer(playback_buffer);
stream.setStreamStart(monitor_id);
if ( !stream.setStreamStart(monitor_id) ) {
Error("Unable set start stream for monitor %d", monitor_id);
stream.sendTextFrame("Unable to connect to monitor");
logTerm();
zmDbClose();
return -1;
}
if ( mode == ZMS_JPEG ) {
stream.setStreamType(MonitorStream::STREAM_JPEG);

View File

@ -548,10 +548,11 @@ int main(int argc, char *argv[]) {
}
if ( function & ZMU_FPS ) {
if ( verbose ) {
printf("Current capture rate: %.2f frames per second\n", monitor->GetFPS());
printf("Current capture rate: %.2f frames per second, analysis rate: %.2f frames per second\n",
monitor->get_capture_fps(), monitor->get_analysis_fps());
} else {
if ( have_output ) fputc(separator, stdout);
printf("%.2f", monitor->GetFPS());
printf("capture: %.2f, analysis: %.2f", monitor->get_capture_fps(), monitor->get_analysis_fps());
have_output = true;
}
}

View File

@ -48,18 +48,18 @@ while (my $line = <F>) {
$in_head-- if $line =~ /^$/ and $in_head;
next while $in_head;
unless ($line =~ /^\s+(0x..), \/\* (........)/) {
$line =~ s/static unsigned char fontdata/static unsigned int bigfontdata/;
#$line =~ s/static unsigned char fontdata/static unsigned int bigfontdata/;
print $line;
next;
}
my $code = $1;
my $bincode = $2;
$bincode = "$1$1$2$2$3$3$4$4$5$5$6$6$7$7$8$8" if $bincode =~ /(.)(.)(.)(.)(.)(.)(.)(.)$/;
$bincode =~ s/ /1/g;
my $intcode = unpack("N", pack("B32", substr("0" x 32 . $bincode, -32)));
my $hexcode = sprintf("%#x", $intcode);
$hexcode =~ s/^0$/0x0/;
$bincode =~ s/1/ /g;
#$bincode =~ s/ /1/g;
#my $intcode = unpack("N", pack("B32", substr("0" x 32 . $bincode, -32)));
#my $hexcode = sprintf("%#x", $intcode);
#$hexcode =~ s/^0$/0x0/;
#$bincode =~ s/1/ /g;
print sprintf("\t%10s, /* %s */\n", $hexcode, $bincode);
print sprintf("\t%10s, /* %s */\n", $hexcode, $bincode);
}

View File

@ -1 +1 @@
1.35.16
1.35.17

View File

@ -1,14 +1,22 @@
<?php
error_reporting(0);
$defaultMonitor = new ZM\Monitor();
$defaultMonitor->set(array(
'StorageId' => 1,
'ServerId' => 'auto',
'Function' => 'Record',
'Function' => 'Mocord',
'Type' => 'Ffmpeg',
'Enabled' => '1',
'Colour' => '4', // 32bit
'PreEventCount' => 0,
'ImageBufferCount' => '20',
'WarmupCount' => '0',
'PreEventCount' => '0',
'StreamReplayBuffer' => '0',
'SaveJPEGs' => '4',
'VideoWriter' => '1',
'MaxFPS' => '20',
'AlarmMaxFPS' => '20',
) );
function probe( &$url_bits ) {

View File

@ -117,8 +117,10 @@ if ( sem_acquire($semaphore,1) !== false ) {
$data = unpack('ltype', $msg);
switch ( $data['type'] ) {
case MSG_DATA_WATCH :
$data = unpack('ltype/imonitor/istate/dfps/ilevel/irate/ddelay/izoom/Cdelayed/Cpaused/Cenabled/Cforced', $msg);
$data = unpack('ltype/imonitor/istate/dfps/dcapturefps/danalysisfps/ilevel/irate/ddelay/izoom/Cdelayed/Cpaused/Cenabled/Cforced', $msg);
$data['fps'] = round( $data['fps'], 2 );
$data['capturefps'] = round( $data['capturefps'], 2 );
$data['analysisfps'] = round( $data['analysisfps'], 2 );
$data['rate'] /= RATE_BASE;
$data['delay'] = round( $data['delay'], 2 );
$data['zoom'] = round( $data['zoom']/SCALE_BASE, 1 );

View File

@ -63,7 +63,7 @@ class Event extends ZM_Object {
$this->{'Storage'} = Storage::find_one(array('Id'=>$this->{'StorageId'}));
if ( ! ( property_exists($this, 'Storage') and $this->{'Storage'} ) ) {
$this->{'Storage'} = new Storage(NULL);
$this->{'Storage'}->Scheme($this->{'Scheme'});
$this->{'Storage'}->Scheme($this->Scheme());
}
}
return $this->{'Storage'};

View File

@ -68,6 +68,7 @@ class Monitor extends ZM_Object {
'SaveJPEGs' => 3,
'VideoWriter' => '0',
'OutputCodec' => null,
'Encoder' => 'auto',
'OutputContainer' => null,
'EncoderParameters' => "# Lines beginning with # are a comment \n# For changing quality, use the crf option\n# 1 is best, 51 is worst quality\n#crf=23\n",
'RecordAudio' => array('type'=>'boolean', 'default'=>0),

969
web/includes/actions.php Normal file
View File

@ -0,0 +1,969 @@
<?php
//
// ZoneMinder web action file, $Date$, $Revision$
// Copyright (C) 2001-2008 Philip Coombes
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
//
function getAffectedIds( $name ) {
$names = $name.'s';
$ids = array();
if ( isset($_REQUEST[$names]) ) {
if ( is_array($_REQUEST[$names]) ) {
$ids = $_REQUEST[$names];
} else {
$ids = array($_REQUEST[$names]);
}
} else if ( isset($_REQUEST[$name]) ) {
if ( is_array($_REQUEST[$name]) ) {
$ids = $_REQUEST[$name];
} else {
$ids = array($_REQUEST[$name]);
}
}
return $ids;
}
if ( empty($action) ) {
return;
}
if ( $action == 'login' && isset($_REQUEST['username']) && ( ZM_AUTH_TYPE == 'remote' || isset($_REQUEST['password']) ) ) {
$refreshParent = true;
// User login is automatically performed in includes/auth.php So we don't need to perform a login here,
// just handle redirects. This is the action that comes from the login view, so the logical thing to
// do on successful auth is redirect to console, otherwise loop back to login.
if ( !$user ) {
$view = 'login';
} else {
$view = 'console';
$redirect = ZM_BASE_URL.$_SERVER['PHP_SELF'].'?view=console';
}
} else if ( $action == 'logout' ) {
userLogout();
$refreshParent = true;
$view = 'none';
} else if ( $action == 'bandwidth' && isset($_REQUEST['newBandwidth']) ) {
$_COOKIE['zmBandwidth'] = validStr($_REQUEST['newBandwidth']);
setcookie('zmBandwidth', validStr($_REQUEST['newBandwidth']), time()+3600*24*30*12*10);
$refreshParent = true;
}
// Event scope actions, view permissions only required
if ( canView('Events') ) {
if ( isset($_REQUEST['object']) and ( $_REQUEST['object'] == 'filter' ) ) {
if ( $action == 'addterm' ) {
$_REQUEST['filter'] = addFilterTerm($_REQUEST['filter'], $_REQUEST['line']);
} elseif ( $action == 'delterm' ) {
$_REQUEST['filter'] = delFilterTerm($_REQUEST['filter'], $_REQUEST['line']);
} else if ( canEdit('Events') ) {
if ( $action == 'delete' ) {
if ( ! empty($_REQUEST['Id']) ) {
dbQuery('DELETE FROM Filters WHERE Id=?', array($_REQUEST['Id']));
}
} else if ( ( $action == 'Save' ) or ( $action == 'SaveAs' ) or ( $action == 'execute' ) ) {
# or ( $action == 'submit' ) ) {
$sql = '';
$_REQUEST['filter']['Query']['sort_field'] = validStr($_REQUEST['filter']['Query']['sort_field']);
$_REQUEST['filter']['Query']['sort_asc'] = validStr($_REQUEST['filter']['Query']['sort_asc']);
$_REQUEST['filter']['Query']['limit'] = validInt($_REQUEST['filter']['Query']['limit']);
if ( $action == 'execute' ) {
$tempFilterName = '_TempFilter'.time();
$sql .= ' Name = \''.$tempFilterName.'\'';
} else {
$sql .= ' Name = '.dbEscape($_REQUEST['filter']['Name']);
}
$sql .= ', Query = '.dbEscape(jsonEncode($_REQUEST['filter']['Query']));
$sql .= ', AutoArchive = '.(!empty($_REQUEST['filter']['AutoArchive']) ? 1 : 0);
$sql .= ', AutoVideo = '. ( !empty($_REQUEST['filter']['AutoVideo']) ? 1 : 0);
$sql .= ', AutoUpload = '. ( !empty($_REQUEST['filter']['AutoUpload']) ? 1 : 0);
$sql .= ', AutoEmail = '. ( !empty($_REQUEST['filter']['AutoEmail']) ? 1 : 0);
$sql .= ', AutoMessage = '. ( !empty($_REQUEST['filter']['AutoMessage']) ? 1 : 0);
$sql .= ', AutoExecute = '. ( !empty($_REQUEST['filter']['AutoExecute']) ? 1 : 0);
$sql .= ', AutoExecuteCmd = '.dbEscape($_REQUEST['filter']['AutoExecuteCmd']);
$sql .= ', AutoDelete = '. ( !empty($_REQUEST['filter']['AutoDelete']) ? 1 : 0);
if ( !empty($_REQUEST['filter']['AutoMove']) ? 1 : 0) {
$sql .= ', AutoMove = 1, AutoMoveTo='. validInt($_REQUEST['filter']['AutoMoveTo']);
} else {
$sql .= ', AutoMove = 0';
}
$sql .= ', UpdateDiskSpace = '. ( !empty($_REQUEST['filter']['UpdateDiskSpace']) ? 1 : 0);
$sql .= ', Background = '. ( !empty($_REQUEST['filter']['Background']) ? 1 : 0);
$sql .= ', Concurrent = '. ( !empty($_REQUEST['filter']['Concurrent']) ? 1 : 0);
if ( $_REQUEST['Id'] and ( $action == 'Save' ) ) {
dbQuery('UPDATE Filters SET ' . $sql. ' WHERE Id=?', array($_REQUEST['Id']));
} else {
dbQuery('INSERT INTO Filters SET' . $sql);
$_REQUEST['Id'] = dbInsertId();
}
if ( $action == 'execute' ) {
executeFilter( $tempFilterName );
}
} // end if save or execute
} // end if canEdit(Events)
return;
} // end if object == filter
else {
// Event scope actions, edit permissions required
if ( canEdit('Events') ) {
if ( ($action == 'rename') && isset($_REQUEST['eventName']) && !empty($_REQUEST['eid']) ) {
dbQuery('UPDATE Events SET Name=? WHERE Id=?', array($_REQUEST['eventName'], $_REQUEST['eid']));
} else if ( $action == 'eventdetail' ) {
if ( !empty($_REQUEST['eid']) ) {
dbQuery('UPDATE Events SET Cause=?, Notes=? WHERE Id=?',
array($_REQUEST['newEvent']['Cause'], $_REQUEST['newEvent']['Notes'], $_REQUEST['eid']) );
} else {
$dbConn->beginTransaction();
foreach( getAffectedIds('markEid') as $markEid ) {
dbQuery('UPDATE Events SET Cause=?, Notes=? WHERE Id=?',
array($_REQUEST['newEvent']['Cause'], $_REQUEST['newEvent']['Notes'], $markEid) );
}
$dbConn->commit();
}
$refreshParent = true;
$closePopup = true;
} elseif ( $action == 'archive' || $action == 'unarchive' ) {
$archiveVal = ($action == 'archive')?1:0;
if ( !empty($_REQUEST['eid']) ) {
dbQuery('UPDATE Events SET Archived=? WHERE Id=?', array($archiveVal, $_REQUEST['eid']));
} else {
$dbConn->beginTransaction();
foreach( getAffectedIds('markEid') as $markEid ) {
dbQuery('UPDATE Events SET Archived=? WHERE Id=?', array($archiveVal, $markEid));
}
$dbConn->commit();
$refreshParent = true;
}
} elseif ( $action == 'delete' ) {
$dbConn->beginTransaction();
foreach( getAffectedIds('eids') as $markEid ) {
deleteEvent($markEid);
}
$dbConn->commit();
$refreshParent = true;
}
} // end if canEdit(Events)
} // end if filter or something else
} // end canView(Events)
// Monitor control actions, require a monitor id and control view permissions for that monitor
if ( !empty($_REQUEST['mid']) && canView('Control', $_REQUEST['mid']) ) {
require_once('control_functions.php');
require_once('Monitor.php');
$mid = validInt($_REQUEST['mid']);
if ( $action == 'control' ) {
$monitor = new Monitor($mid);
$ctrlCommand = buildControlCommand($monitor);
sendControlCommand($monitor->Id(), $ctrlCommand);
} else if ( $action == 'settings' ) {
$args = ' -m ' . escapeshellarg($mid);
$args .= ' -B' . escapeshellarg($_REQUEST['newBrightness']);
$args .= ' -C' . escapeshellarg($_REQUEST['newContrast']);
$args .= ' -H' . escapeshellarg($_REQUEST['newHue']);
$args .= ' -O' . escapeshellarg($_REQUEST['newColour']);
$zmuCommand = getZmuCommand($args);
$zmuOutput = exec($zmuCommand);
list($brightness, $contrast, $hue, $colour) = explode(' ', $zmuOutput);
dbQuery(
'UPDATE Monitors SET Brightness = ?, Contrast = ?, Hue = ?, Colour = ? WHERE Id = ?',
array($brightness, $contrast, $hue, $colour, $mid));
}
}
// Control capability actions, require control edit permissions
if ( canEdit('Control') ) {
if ( $action == 'controlcap' ) {
require_once('Control.php');
$Control = new Control( !empty($_REQUEST['cid']) ? $_REQUEST['cid'] : null );
//$changes = getFormChanges( $control, $_REQUEST['newControl'], $types, $columns );
$Control->save($_REQUEST['newControl']);
$refreshParent = true;
$view = 'none';
} elseif ( $action == 'delete' ) {
if ( isset($_REQUEST['markCids']) ) {
foreach( $_REQUEST['markCids'] as $markCid ) {
dbQuery('DELETE FROM Controls WHERE Id = ?', array($markCid));
dbQuery('UPDATE Monitors SET Controllable = 0, ControlId = 0 WHERE ControlId = ?', array($markCid));
$refreshParent = true;
}
}
} // end if action
} // end if canEdit Controls
if ( isset($_REQUEST['object']) and $_REQUEST['object'] == 'Monitor' ) {
if ( $action == 'save' ) {
foreach ( $_REQUEST['mids'] as $mid ) {
$mid = ValidInt($mid);
if ( ! canEdit('Monitors', $mid) ) {
Warning("Cannot edit monitor $mid");
continue;
}
$Monitor = new Monitor($mid);
if ( $Monitor->Type() != 'WebSite' ) {
$Monitor->zmaControl('stop');
$Monitor->zmcControl('stop');
}
$Monitor->save($_REQUEST['newMonitor']);
if ( $Monitor->Function() != 'None' && $Monitor->Type() != 'WebSite' ) {
$Monitor->zmcControl('start');
if ( $Monitor->Enabled() ) {
$Monitor->zmaControl('start');
}
}
} // end foreach mid
$refreshParent = true;
} // end if action == save
} // end if object is Monitor
// Monitor edit actions, require a monitor id and edit permissions for that monitor
if ( !empty($_REQUEST['mid']) && canEdit('Monitors', $_REQUEST['mid']) ) {
$mid = validInt($_REQUEST['mid']);
if ( $action == 'function' ) {
$monitor = dbFetchOne('SELECT * FROM Monitors WHERE Id=?', NULL, array($mid));
$newFunction = validStr($_REQUEST['newFunction']);
# Because we use a checkbox, it won't get passed in the request. So not being in _REQUEST means 0
$newEnabled = ( !isset($_REQUEST['newEnabled']) or $_REQUEST['newEnabled'] != '1' ) ? '0' : '1';
$oldFunction = $monitor['Function'];
$oldEnabled = $monitor['Enabled'];
if ( $newFunction != $oldFunction || $newEnabled != $oldEnabled ) {
dbQuery('UPDATE Monitors SET Function=?, Enabled=? WHERE Id=?',
array($newFunction, $newEnabled, $mid));
$monitor['Function'] = $newFunction;
$monitor['Enabled'] = $newEnabled;
if ( daemonCheck() && ($monitor['Type'] != 'WebSite') ) {
$restart = ($oldFunction == 'None') || ($newFunction == 'None') || ($newEnabled != $oldEnabled);
zmaControl($monitor, 'stop');
zmcControl($monitor, $restart?'restart':'');
zmaControl($monitor, 'start');
}
$refreshParent = true;
}
} else if ( $action == 'zone' && isset($_REQUEST['zid']) ) {
$zid = validInt($_REQUEST['zid']);
$monitor = dbFetchOne('SELECT * FROM Monitors WHERE Id=?', NULL, array($mid));
if ( !empty($zid) ) {
$zone = dbFetchOne('SELECT * FROM Zones WHERE MonitorId=? AND Id=?', NULL, array($mid, $zid));
} else {
$zone = array();
}
if ( $_REQUEST['newZone']['Units'] == 'Percent' ) {
$_REQUEST['newZone']['MinAlarmPixels'] = intval(($_REQUEST['newZone']['MinAlarmPixels']*$_REQUEST['newZone']['Area'])/100);
$_REQUEST['newZone']['MaxAlarmPixels'] = intval(($_REQUEST['newZone']['MaxAlarmPixels']*$_REQUEST['newZone']['Area'])/100);
if ( isset($_REQUEST['newZone']['MinFilterPixels']) )
$_REQUEST['newZone']['MinFilterPixels'] = intval(($_REQUEST['newZone']['MinFilterPixels']*$_REQUEST['newZone']['Area'])/100);
if ( isset($_REQUEST['newZone']['MaxFilterPixels']) )
$_REQUEST['newZone']['MaxFilterPixels'] = intval(($_REQUEST['newZone']['MaxFilterPixels']*$_REQUEST['newZone']['Area'])/100);
if ( isset($_REQUEST['newZone']['MinBlobPixels']) )
$_REQUEST['newZone']['MinBlobPixels'] = intval(($_REQUEST['newZone']['MinBlobPixels']*$_REQUEST['newZone']['Area'])/100);
if ( isset($_REQUEST['newZone']['MaxBlobPixels']) )
$_REQUEST['newZone']['MaxBlobPixels'] = intval(($_REQUEST['newZone']['MaxBlobPixels']*$_REQUEST['newZone']['Area'])/100);
}
unset( $_REQUEST['newZone']['Points'] );
$types = array();
$changes = getFormChanges($zone, $_REQUEST['newZone'], $types);
if ( count($changes) ) {
if ( $zid > 0 ) {
dbQuery('UPDATE Zones SET '.implode(', ', $changes).' WHERE MonitorId=? AND Id=?', array($mid, $zid));
} else {
dbQuery('INSERT INTO Zones SET MonitorId=?, '.implode(', ', $changes), array($mid));
}
if ( daemonCheck() && ($monitor['Type'] != 'WebSite') ) {
if ( $_REQUEST['newZone']['Type'] == 'Privacy' ) {
zmaControl($monitor, 'stop');
zmcControl($monitor, 'restart');
zmaControl($monitor, 'start');
} else {
zmaControl($monitor, 'restart');
}
}
if ( ($_REQUEST['newZone']['Type'] == 'Privacy') && $monitor['Controllable'] ) {
require_once('control_functions.php');
sendControlCommand($mid, 'quit');
}
$refreshParent = true;
}
$view = 'none';
} elseif ( $action == 'plugin' && isset($_REQUEST['pl']) ) {
$sql = 'SELECT * FROM PluginsConfig WHERE MonitorId=? AND ZoneId=? AND pluginName=?';
$pconfs=dbFetchAll($sql, NULL, array($mid, $_REQUEST['zid'], $_REQUEST['pl']));
$changes = 0;
foreach ( $pconfs as $pconf ) {
$value = $_REQUEST['pluginOpt'][$pconf['Name']];
if ( array_key_exists($pconf['Name'], $_REQUEST['pluginOpt']) && ($pconf['Value'] != $value) ) {
dbQuery('UPDATE PluginsConfig SET Value=? WHERE id=?', array($value, $pconf['Id']));
$changes++;
}
}
if ( $changes > 0 ) {
if ( daemonCheck() && ($monitor['Type'] != 'WebSite') ) {
zmaControl($mid, 'restart');
}
$refreshParent = true;
}
$view = 'none';
} elseif ( ($action == 'sequence') && isset($_REQUEST['smid']) ) {
$smid = validInt($_REQUEST['smid']);
$monitor = dbFetchOne('SELECT * FROM Monitors WHERE Id = ?', NULL, array($mid));
$smonitor = dbFetchOne('SELECT * FROM Monitors WHERE Id = ?', NULL, array($smid));
dbQuery('UPDATE Monitors SET Sequence=? WHERE Id=?', array($smonitor['Sequence'], $monitor['Id']));
dbQuery('UPDATE Monitors SET Sequence=? WHERE Id=?', array($monitor['Sequence'], $smonitor['Id']));
$refreshParent = true;
fixSequences();
} elseif ( $action == 'delete' ) {
if ( isset($_REQUEST['markZids']) ) {
$deletedZid = 0;
foreach ( $_REQUEST['markZids'] as $markZid ) {
$zone = dbFetchOne('SELECT * FROM Zones WHERE Id=?', NULL, array($markZid));
dbQuery('DELETE FROM Zones WHERE MonitorId=? AND Id=?', array($mid, $markZid));
$deletedZid = 1;
}
if ( $deletedZid ) {
if ( daemonCheck() && $monitor['Type'] != 'WebSite' ) {
if ( $zone['Type'] == 'Privacy' ) {
zmaControl($mid, 'stop');
zmcControl($mid, 'restart');
zmaControl($mid, 'start');
} else {
zmaControl($mid, 'restart');
}
} // end if daemonCheck()
$refreshParent = true;
} // end if deletedzid
} // end if isset($_REQUEST['markZids'])
} // end if action
} // end if $mid and canEdit($mid)
// Monitor edit actions, monitor id derived, require edit permissions for that monitor
if ( canEdit('Monitors') ) {
if ( $action == 'monitor' ) {
$mid = 0;
if ( !empty($_REQUEST['mid']) ) {
$mid = validInt($_REQUEST['mid']);
$monitor = dbFetchOne('SELECT * FROM Monitors WHERE Id=?', NULL, array($mid));
if ( ZM_OPT_X10 ) {
$x10Monitor = dbFetchOne('SELECT * FROM TriggersX10 WHERE MonitorId=?', NULL, array($mid));
if ( !$x10Monitor )
$x10Monitor = array();
}
} else {
$monitor = array();
if ( ZM_OPT_X10 ) {
$x10Monitor = array();
}
}
$Monitor = new Monitor($monitor);
// Define a field type for anything that's not simple text equivalent
$types = array(
'Triggers' => 'set',
'Controllable' => 'toggle',
'TrackMotion' => 'toggle',
'Enabled' => 'toggle',
'DoNativeMotDet' => 'toggle',
'Exif' => 'toggle',
'RTSPDescribe' => 'toggle',
'RecordAudio' => 'toggle',
'Method' => 'raw',
);
if ( $_REQUEST['newMonitor']['ServerId'] == 'auto' ) {
$_REQUEST['newMonitor']['ServerId'] = dbFetchOne(
'SELECT Id FROM Servers WHERE Status=\'Running\' ORDER BY FreeMem DESC, CpuLoad ASC LIMIT 1', 'Id');
Logger::Debug('Auto selecting server: Got ' . $_REQUEST['newMonitor']['ServerId'] );
if ( ( ! $_REQUEST['newMonitor'] ) and defined('ZM_SERVER_ID') ) {
$_REQUEST['newMonitor']['ServerId'] = ZM_SERVER_ID;
Logger::Debug('Auto selecting server to ' . ZM_SERVER_ID);
}
}
$columns = getTableColumns('Monitors');
$changes = getFormChanges($monitor, $_REQUEST['newMonitor'], $types, $columns);
if ( count($changes) ) {
if ( $mid ) {
# If we change anything that changes the shared mem size, zma can complain. So let's stop first.
if ( $monitor['Type'] != 'WebSite' ) {
zmaControl($monitor, 'stop');
zmcControl($monitor, 'stop');
}
dbQuery('UPDATE Monitors SET '.implode(', ', $changes).' WHERE Id=?', array($mid));
// Groups will be added below
if ( isset($changes['Name']) or isset($changes['StorageId']) ) {
$OldStorage = new Storage($monitor['StorageId']);
$saferOldName = basename($monitor['Name']);
if ( file_exists($OldStorage->Path().'/'.$saferOldName) )
unlink($OldStorage->Path().'/'.$saferOldName);
$NewStorage = new Storage($_REQUEST['newMonitor']['StorageId']);
if ( ! file_exists($NewStorage->Path().'/'.$mid) )
mkdir($NewStorage->Path().'/'.$mid, 0755);
$saferNewName = basename($_REQUEST['newMonitor']['Name']);
symlink($mid, $NewStorage->Path().'/'.$saferNewName);
}
if ( isset($changes['Width']) || isset($changes['Height']) ) {
$newW = $_REQUEST['newMonitor']['Width'];
$newH = $_REQUEST['newMonitor']['Height'];
$newA = $newW * $newH;
$oldW = $monitor['Width'];
$oldH = $monitor['Height'];
$oldA = $oldW * $oldH;
$zones = dbFetchAll('SELECT * FROM Zones WHERE MonitorId=?', NULL, array($mid));
foreach ( $zones as $zone ) {
$newZone = $zone;
$points = coordsToPoints($zone['Coords']);
for ( $i = 0; $i < count($points); $i++ ) {
$points[$i]['x'] = intval(($points[$i]['x']*($newW-1))/($oldW-1));
$points[$i]['y'] = intval(($points[$i]['y']*($newH-1))/($oldH-1));
}
$newZone['Coords'] = pointsToCoords($points);
$newZone['Area'] = intval(round(($zone['Area']*$newA)/$oldA));
$newZone['MinAlarmPixels'] = intval(round(($newZone['MinAlarmPixels']*$newA)/$oldA));
$newZone['MaxAlarmPixels'] = intval(round(($newZone['MaxAlarmPixels']*$newA)/$oldA));
$newZone['MinFilterPixels'] = intval(round(($newZone['MinFilterPixels']*$newA)/$oldA));
$newZone['MaxFilterPixels'] = intval(round(($newZone['MaxFilterPixels']*$newA)/$oldA));
$newZone['MinBlobPixels'] = intval(round(($newZone['MinBlobPixels']*$newA)/$oldA));
$newZone['MaxBlobPixels'] = intval(round(($newZone['MaxBlobPixels']*$newA)/$oldA));
$changes = getFormChanges($zone, $newZone, $types);
if ( count($changes) ) {
dbQuery('UPDATE Zones SET '.implode(', ', $changes).' WHERE MonitorId=? AND Id=?',
array($mid, $zone['Id']));
}
} // end foreach zone
} // end if width and height
$restart = true;
} else if ( ! $user['MonitorIds'] ) {
// Can only create new monitors if we are not restricted to specific monitors
# FIXME This is actually a race condition. Should lock the table.
$maxSeq = dbFetchOne('SELECT MAX(Sequence) AS MaxSequence FROM Monitors', 'MaxSequence');
$changes[] = 'Sequence = '.($maxSeq+1);
$sql = 'INSERT INTO Monitors SET '.implode(', ', $changes);
if ( dbQuery($sql) ) {
$mid = dbInsertId();
$zoneArea = $_REQUEST['newMonitor']['Width'] * $_REQUEST['newMonitor']['Height'];
dbQuery("INSERT INTO Zones SET MonitorId = ?, Name = 'All', Type = 'Active', Units = 'Percent', NumCoords = 4, Coords = ?, Area=?, AlarmRGB = 0xff0000, CheckMethod = 'Blobs', MinPixelThreshold = 25, MinAlarmPixels=?, MaxAlarmPixels=?, FilterX = 3, FilterY = 3, MinFilterPixels=?, MaxFilterPixels=?, MinBlobPixels=?, MinBlobs = 1", array( $mid, sprintf( "%d,%d %d,%d %d,%d %d,%d", 0, 0, $_REQUEST['newMonitor']['Width']-1, 0, $_REQUEST['newMonitor']['Width']-1, $_REQUEST['newMonitor']['Height']-1, 0, $_REQUEST['newMonitor']['Height']-1 ), $zoneArea, intval(($zoneArea*3)/100), intval(($zoneArea*75)/100), intval(($zoneArea*3)/100), intval(($zoneArea*75)/100), intval(($zoneArea*2)/100) ) );
//$view = 'none';
$Storage = new Storage($_REQUEST['newMonitor']['StorageId']);
mkdir($Storage->Path().'/'.$mid, 0755);
$saferName = basename($_REQUEST['newMonitor']['Name']);
symlink($mid, $Storage->Path().'/'.$saferName);
} else {
Error('Error saving new Monitor.');
$error_message = dbError($sql);
return;
}
} else {
Error('Users with Monitors restrictions cannot create new monitors.');
return;
}
$restart = true;
} else {
Logger::Debug('No action due to no changes to Monitor');
} # end if count(changes)
if (
( !isset($_POST['newMonitor']['GroupIds']) )
or
( count($_POST['newMonitor']['GroupIds']) != count($Monitor->GroupIds()) )
or
array_diff($_POST['newMonitor']['GroupIds'], $Monitor->GroupIds())
) {
if ( $Monitor->Id() )
dbQuery('DELETE FROM Groups_Monitors WHERE MonitorId=?', array($mid));
if ( isset($_POST['newMonitor']['GroupIds']) ) {
foreach ( $_POST['newMonitor']['GroupIds'] as $group_id ) {
dbQuery('INSERT INTO Groups_Monitors (GroupId,MonitorId) VALUES (?,?)', array($group_id, $mid));
}
}
} // end if there has been a change of groups
if ( ZM_OPT_X10 ) {
$x10Changes = getFormChanges($x10Monitor, $_REQUEST['newX10Monitor']);
if ( count($x10Changes) ) {
if ( $x10Monitor && isset($_REQUEST['newX10Monitor']) ) {
dbQuery('UPDATE TriggersX10 SET '.implode(', ', $x10Changes).' WHERE MonitorId=?', array($mid));
} elseif ( !$user['MonitorIds'] ) {
if ( !$x10Monitor ) {
dbQuery('INSERT INTO TriggersX10 SET MonitorId = ?, '.implode(', ', $x10Changes), array($mid));
} else {
dbQuery('DELETE FROM TriggersX10 WHERE MonitorId = ?', array($mid));
}
}
$restart = true;
} # end if has x10Changes
} # end if ZM_OPT_X10
if ( $restart ) {
$new_monitor = new Monitor($mid);
//fixDevices();
if ( $new_monitor->Type() != 'WebSite' ) {
$new_monitor->zmcControl('start');
$new_monitor->zmaControl('start');
}
if ( $new_monitor->Controllable() ) {
require_once('control_functions.php');
sendControlCommand($mid, 'quit');
}
// really should thump zmwatch and maybe zmtrigger too.
//daemonControl( 'restart', 'zmwatch.pl' );
$refreshParent = true;
} // end if restart
$view = 'none';
} elseif ( $action == 'delete' ) {
if ( isset($_REQUEST['markMids']) && !$user['MonitorIds'] ) {
require_once('Monitor.php');
foreach ( $_REQUEST['markMids'] as $markMid ) {
if ( canEdit('Monitors', $markMid) ) {
// This could be faster as a select all
if ( $monitor = dbFetchOne('SELECT * FROM Monitors WHERE Id = ?', NULL, array($markMid)) ) {
$Monitor = new Monitor($monitor);
$Monitor->delete();
} // end if monitor found in db
} // end if canedit this monitor
} // end foreach monitor in MarkMid
} // markMids is set and we aren't limited to specific monitors
} // end if action == Delete
}
// Device view actions
if ( canEdit('Devices') ) {
if ( $action == 'device' ) {
if ( !empty($_REQUEST['command']) ) {
setDeviceStatusX10($_REQUEST['key'], $_REQUEST['command']);
} else if ( isset($_REQUEST['newDevice']) ) {
if ( isset($_REQUEST['did']) ) {
dbQuery('UPDATE Devices SET Name=?, KeyString=? WHERE Id=?',
array($_REQUEST['newDevice']['Name'], $_REQUEST['newDevice']['KeyString'], $_REQUEST['did']) );
} else {
dbQuery('INSERT INTO Devices SET Name=?, KeyString=?',
array($_REQUEST['newDevice']['Name'], $_REQUEST['newDevice']['KeyString']) );
}
$refreshParent = true;
$view = 'none';
}
} elseif ( $action == 'delete' ) {
if ( isset($_REQUEST['markDids']) ) {
foreach( $_REQUEST['markDids'] as $markDid ) {
dbQuery('DELETE FROM Devices WHERE Id=?', array($markDid));
$refreshParent = true;
}
}
} // end if action
} // end if canedit devices
// Group view actions
if ( canView('Groups') && ($action == 'setgroup') ) {
if ( !empty($_REQUEST['gid']) ) {
setcookie('zmGroup', validInt($_REQUEST['gid']), time()+3600*24*30*12*10);
} else {
setcookie('zmGroup', '', time()-3600*24*2);
}
$refreshParent = true;
}
// Group edit actions
# Should probably verify that each monitor id is a valid monitor, that we have access to.
# However at the moment, you have to have System permissions to do this
if ( canEdit('Groups') ) {
if ( $action == 'group' ) {
$monitors = empty($_POST['newGroup']['MonitorIds']) ? '' : implode(',', $_POST['newGroup']['MonitorIds']);
$group_id = null;
if ( !empty($_POST['gid']) ) {
$group_id = $_POST['gid'];
dbQuery(
'UPDATE Groups SET Name=?, ParentId=? WHERE Id=?',
array(
$_POST['newGroup']['Name'],
( $_POST['newGroup']['ParentId'] == '' ? null : $_POST['newGroup']['ParentId'] ),
$group_id,
)
);
dbQuery('DELETE FROM Groups_Monitors WHERE GroupId=?', array($group_id));
} else {
dbQuery(
'INSERT INTO Groups (Name,ParentId) VALUES (?,?)',
array(
$_POST['newGroup']['Name'],
( $_POST['newGroup']['ParentId'] == '' ? null : $_POST['newGroup']['ParentId'] ),
)
);
$group_id = dbInsertId();
}
if ( $group_id ) {
foreach ( $_POST['newGroup']['MonitorIds'] as $mid ) {
dbQuery('INSERT INTO Groups_Monitors (GroupId,MonitorId) VALUES (?,?)', array($group_id, $mid));
}
}
$view = 'none';
$refreshParent = true;
} else if ( $action == 'delete' ) {
if ( !empty($_REQUEST['gid']) ) {
foreach ( Group::find(array('Id'=>$_REQUEST['gid'])) as $Group ) {
$Group->delete();
}
}
$redirect = ZM_BASE_URL.$_SERVER['PHP_SELF'].'?view=groups';
$refreshParent = true;
} # end if action
} // end if can edit groups
// System edit actions
if ( canEdit('System') ) {
if ( isset($_REQUEST['object']) ) {
if ( $_REQUEST['object'] == 'MontageLayout' ) {
require_once('MontageLayout.php');
if ( $action == 'Save' ) {
$Layout = null;
if ( $_REQUEST['Name'] != '' ) {
$Layout = new MontageLayout();
$Layout->Name($_REQUEST['Name']);
} else {
$Layout = new MontageLayout($_REQUEST['zmMontageLayout']);
}
$Layout->Positions($_REQUEST['Positions']);
$Layout->save();
session_start();
$_SESSION['zmMontageLayout'] = $Layout->Id();
setcookie('zmMontageLayout', $Layout->Id(), 1);
session_write_close();
$redirect = ZM_BASE_URL.$_SERVER['PHP_SELF'].'?view=montage';
} // end if save
} else if ( $_REQUEST['object'] == 'server' ) {
if ( $action == 'Save' ) {
if ( !empty($_REQUEST['id']) ) {
$dbServer = dbFetchOne(
'SELECT * FROM Servers WHERE Id=?',
NULL,
array($_REQUEST['id']) );
} else {
$dbServer = array();
}
$types = array();
$changes = getFormChanges($dbServer, $_REQUEST['newServer'], $types);
if ( count($changes) ) {
if ( !empty($_REQUEST['id']) ) {
dbQuery('UPDATE Servers SET '.implode(', ', $changes).' WHERE Id = ?',
array($_REQUEST['id']) );
} else {
dbQuery('INSERT INTO Servers SET '.implode(', ', $changes));
}
$refreshParent = true;
}
$view = 'none';
} else if ( $action == 'delete' ) {
if ( !empty($_REQUEST['markIds']) ) {
foreach( $_REQUEST['markIds'] as $Id )
dbQuery('DELETE FROM Servers WHERE Id=?', array($Id));
}
$refreshParent = true;
} else {
Error("Unknown action $action in saving Server");
}
} else if ( $_REQUEST['object'] == 'storage' ) {
if ( $action == 'Save' ) {
if ( !empty($_REQUEST['id']) )
$dbStorage = dbFetchOne('SELECT * FROM Storage WHERE Id=?', NULL, array($_REQUEST['id']));
else
$dbStorage = array();
$types = array();
$changes = getFormChanges($dbStorage, $_REQUEST['newStorage'], $types);
if ( count($changes) ) {
if ( !empty($_REQUEST['id']) ) {
dbQuery('UPDATE Storage SET '.implode(', ', $changes).' WHERE Id = ?', array($_REQUEST['id']));
} else {
dbQuery('INSERT INTO Storage set '.implode(', ', $changes));
}
$refreshParent = true;
}
$view = 'none';
} else if ( $action == 'delete' ) {
if ( !empty($_REQUEST['markIds']) ) {
foreach( $_REQUEST['markIds'] as $Id )
dbQuery('DELETE FROM Storage WHERE Id=?', array($Id));
}
$refreshParent = true;
} else {
Error("Unknown action $action in saving Storage");
}
} # end if isset($_REQUEST['object'] )
} else if ( $action == 'version' && isset($_REQUEST['option']) ) {
$option = $_REQUEST['option'];
switch( $option ) {
case 'go' :
{
// Ignore this, the caller will open the page itself
break;
}
case 'ignore' :
{
dbQuery("UPDATE Config SET Value = '".ZM_DYN_LAST_VERSION."' WHERE Name = 'ZM_DYN_CURR_VERSION'");
break;
}
case 'hour' :
case 'day' :
case 'week' :
{
$nextReminder = time();
if ( $option == 'hour' ) {
$nextReminder += 60*60;
} elseif ( $option == 'day' ) {
$nextReminder += 24*60*60;
} elseif ( $option == 'week' ) {
$nextReminder += 7*24*60*60;
}
dbQuery("UPDATE Config SET Value = '".$nextReminder."' WHERE Name = 'ZM_DYN_NEXT_REMINDER'");
break;
}
case 'never' :
{
dbQuery("UPDATE Config SET Value = '0' WHERE Name = 'ZM_CHECK_FOR_UPDATES'");
break;
}
}
}
if ( $action == 'donate' && isset($_REQUEST['option']) ) {
$option = $_REQUEST['option'];
switch( $option ) {
case 'go' :
{
// Ignore this, the caller will open the page itself
break;
}
case 'hour' :
case 'day' :
case 'week' :
case 'month' :
{
$nextReminder = time();
if ( $option == 'hour' ) {
$nextReminder += 60*60;
} elseif ( $option == 'day' ) {
$nextReminder += 24*60*60;
} elseif ( $option == 'week' ) {
$nextReminder += 7*24*60*60;
} elseif ( $option == 'month' ) {
$nextReminder += 30*24*60*60;
}
dbQuery("UPDATE Config SET Value = '".$nextReminder."' WHERE Name = 'ZM_DYN_DONATE_REMINDER_TIME'");
break;
}
case 'never' :
case 'already' :
{
dbQuery("UPDATE Config SET Value = '0' WHERE Name = 'ZM_DYN_SHOW_DONATE_REMINDER'");
break;
}
} // end switch option
}
if ( ($action == 'privacy') && isset($_REQUEST['option']) ) {
switch( $_REQUEST['option'] ) {
case 'decline' :
{
dbQuery("UPDATE Config SET Value = '0' WHERE Name = 'ZM_SHOW_PRIVACY'");
dbQuery("UPDATE Config SET Value = '0' WHERE Name = 'ZM_TELEMETRY_DATA'");
$redirect = ZM_BASE_URL.$_SERVER['PHP_SELF'].'?view=console';
break;
}
case 'accept' :
{
dbQuery("UPDATE Config SET Value = '0' WHERE Name = 'ZM_SHOW_PRIVACY'");
dbQuery("UPDATE Config SET Value = '1' WHERE Name = 'ZM_TELEMETRY_DATA'");
$redirect = ZM_BASE_URL.$_SERVER['PHP_SELF'].'?view=console';
break;
}
default: # Enable the privacy statement if we somehow submit something other than accept or decline
dbQuery("UPDATE Config SET Value = '1' WHERE Name = 'ZM_SHOW_PRIVACY'");
} // end switch option
return;
}
if ( $action == 'options' && isset($_REQUEST['tab']) ) {
$config = array();
$configCat = array();
$configCats = array();
$result = $dbConn->query('SELECT * FROM Config ORDER BY Id ASC');
if ( !$result )
echo mysql_error();
while( $row = dbFetchNext($result) ) {
$config[$row['Name']] = $row;
if ( !($configCat = &$configCats[$row['Category']]) ) {
$configCats[$row['Category']] = array();
$configCat = &$configCats[$row['Category']];
}
$configCat[$row['Name']] = $row;
}
$configCat = $configCats[$_REQUEST['tab']];
$changed = false;
foreach ( $configCat as $name=>$value ) {
unset($newValue);
if ( $value['Type'] == 'boolean' && empty($_REQUEST['newConfig'][$name]) ) {
$newValue = 0;
} else if ( isset($_REQUEST['newConfig'][$name]) ) {
$newValue = preg_replace("/\r\n/", "\n", stripslashes($_REQUEST['newConfig'][$name]));
}
if ( isset($newValue) && ($newValue != $value['Value']) ) {
dbQuery('UPDATE Config SET Value=? WHERE Name=?', array($newValue, $name));
$changed = true;
}
} // end foreach config entry
if ( $changed ) {
switch( $_REQUEST['tab'] ) {
case 'system' :
case 'config' :
$restartWarning = true;
break;
case 'web' :
case 'tools' :
break;
case 'logging' :
case 'network' :
case 'mail' :
case 'upload' :
$restartWarning = true;
break;
case 'highband' :
case 'medband' :
case 'lowband' :
break;
}
$redirect = ZM_BASE_URL.$_SERVER['PHP_SELF'].'?view=options&tab='.$_REQUEST['tab'];
}
loadConfig(false);
return;
} elseif ( $action == 'user' ) {
if ( !empty($_REQUEST['uid']) )
$dbUser = dbFetchOne('SELECT * FROM Users WHERE Id=?', NULL, array($_REQUEST['uid']));
else
$dbUser = array();
$types = array();
$changes = getFormChanges($dbUser, $_REQUEST['newUser'], $types);
if ( $_REQUEST['newUser']['Password'] )
$changes['Password'] = 'Password = password('.dbEscape($_REQUEST['newUser']['Password']).')';
else
unset($changes['Password']);
if ( count($changes) ) {
if ( !empty($_REQUEST['uid']) ) {
dbQuery('UPDATE Users SET '.implode(', ', $changes).' WHERE Id = ?', array($_REQUEST['uid']));
# If we are updating the logged in user, then update our session user data.
if ( $user and ( $dbUser['Username'] == $user['Username'] ) )
userLogin($dbUser['Username'], $dbUser['Password']);
} else {
dbQuery('INSERT INTO Users SET '.implode(', ', $changes));
}
$refreshParent = true;
}
$view = 'none';
} elseif ( $action == 'state' ) {
if ( !empty($_REQUEST['runState']) ) {
//if ( $cookies ) session_write_close();
packageControl($_REQUEST['runState']);
$refreshParent = true;
}
} elseif ( $action == 'save' ) {
if ( !empty($_REQUEST['runState']) || !empty($_REQUEST['newState']) ) {
$sql = 'SELECT Id,Function,Enabled FROM Monitors ORDER BY Id';
$definitions = array();
foreach( dbFetchAll($sql) as $monitor ) {
$definitions[] = $monitor['Id'].':'.$monitor['Function'].':'.$monitor['Enabled'];
}
$definition = join(',', $definitions);
if ( $_REQUEST['newState'] )
$_REQUEST['runState'] = $_REQUEST['newState'];
dbQuery('REPLACE INTO States SET Name=?, Definition=?', array($_REQUEST['runState'],$definition));
}
} elseif ( $action == 'delete' ) {
if ( isset($_REQUEST['runState']) )
dbQuery('DELETE FROM States WHERE Name=?', array($_REQUEST['runState']));
if ( isset($_REQUEST['markUids']) ) {
foreach( $_REQUEST['markUids'] as $markUid )
dbQuery('DELETE FROM Users WHERE Id = ?', array($markUid));
if ( $markUid == $user['Id'] )
userLogout();
}
}
} else {
if ( ZM_USER_SELF_EDIT && $action == 'user' ) {
$uid = $user['Id'];
$dbUser = dbFetchOne('SELECT Id, Password, Language FROM Users WHERE Id = ?', NULL, array($uid));
$types = array();
$changes = getFormChanges($dbUser, $_REQUEST['newUser'], $types);
if ( !empty($_REQUEST['newUser']['Password']) )
$changes['Password'] = 'Password = password('.dbEscape($_REQUEST['newUser']['Password']).')';
else
unset($changes['Password']);
if ( count($changes) ) {
dbQuery('UPDATE Users SET '.implode(', ', $changes).' WHERE Id=?', array($uid));
$refreshParent = true;
}
$view = 'none';
}
}
if ( $action == 'reset' ) {
session_start();
$_SESSION['zmEventResetTime'] = strftime(STRF_FMT_DATETIME_DB);
setcookie('zmEventResetTime', $_SESSION['zmEventResetTime'], time()+3600*24*30*12*10);
session_write_close();
}
?>

View File

@ -202,6 +202,9 @@ if ( $action == 'save' ) {
} // end foreach zone
} // end if rotation or just size change
} // end if changes in width or height
} else {
global $error_message;
$error_message = dbError();
} // end if successful save
$restart = true;
} else { // new monitor

View File

@ -112,10 +112,10 @@ function dbLog($sql, $update=false) {
function dbError($sql) {
global $dbConn;
$error = $dbConn->errorInfo();
if ( ! $error[0] )
if ( !$error[0] )
return '';
$message = "SQL-ERR '".implode("\n",$dbConn->errorInfo())."', statement was '".$sql."'";
$message = "SQL-ERR '".implode("\n", $dbConn->errorInfo())."', statement was '".$sql."'";
ZM\Error($message);
return $message;
}

View File

@ -230,6 +230,7 @@ if (
( $request != 'control' ) &&
//( $view != 'frames' ) && // big html can overflow ob
( $view != 'archive' ) // returns data
&& ( (!isset($_SERVER['CONTENT_TYPE']) or ($_SERVER['CONTENT_TYPE'] != 'application/csp-report')) )
) {
require_once('includes/csrf/csrf-magic.php');
#ZM\Debug("Calling csrf_check with the following values: \$request = \"$request\", \$view = \"$view\", \$action = \"$action\"");

View File

@ -913,3 +913,7 @@ function initThumbAnimation() {
});
}
}
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}

View File

@ -134,8 +134,6 @@ for ( $i = 0; $i < count($displayMonitors); $i++ ) {
if ( $maxWidth < $scaleWidth ) $maxWidth = $scaleWidth;
if ( $maxHeight < $scaleHeight ) $maxHeight = $scaleHeight;
}
#$monitor['zmc'] = zmcStatus( $monitor );
#$monitor['zma'] = zmaStatus( $monitor );
$zoneCount += $monitor['ZoneCount'];
$counts = array();

Some files were not shown because too many files have changed in this diff Show More