Merge branch 'zma_to_thread' of http://github.com/connortechnology/ZoneMinder into zma_to_thread

This commit is contained in:
APHW2 MFGENG 2017-12-01 11:50:05 -08:00
commit c26d7fec7b
29 changed files with 473 additions and 709 deletions

View File

@ -3,28 +3,26 @@
# Create files from the .in files
configure_file(zm_config.h.in "${CMAKE_CURRENT_BINARY_DIR}/zm_config.h" @ONLY)
# Group together all the source files that are used by all the binaries (zmc, zma, zmu, zms etc)
# Group together all the source files that are used by all the binaries (zmc, zmu, zms etc)
set(ZM_BIN_SRC_FILES zm_analysis_thread.cpp zm_box.cpp zm_buffer.cpp zm_camera.cpp zm_comms.cpp zm_config.cpp zm_coord.cpp zm_curl_camera.cpp zm.cpp zm_db.cpp zm_logger.cpp zm_event.cpp zm_eventstream.cpp zm_exception.cpp zm_file_camera.cpp zm_ffmpeg_camera.cpp zm_image.cpp zm_jpeg.cpp zm_libvlc_camera.cpp zm_local_camera.cpp zm_monitor.cpp zm_monitorstream.cpp zm_ffmpeg.cpp zm_ffmpeg_input.cpp zm_mpeg.cpp zm_packet.cpp zm_packetqueue.cpp zm_poly.cpp zm_regexp.cpp zm_remote_camera.cpp zm_remote_camera_http.cpp zm_remote_camera_nvsocket.cpp zm_remote_camera_rtsp.cpp zm_rtp.cpp zm_rtp_ctrl.cpp zm_rtp_data.cpp zm_rtp_source.cpp zm_rtsp.cpp zm_rtsp_auth.cpp zm_sdp.cpp zm_signal.cpp zm_stream.cpp zm_swscale.cpp zm_thread.cpp zm_time.cpp zm_timer.cpp zm_user.cpp zm_utils.cpp zm_video.cpp zm_videostore.cpp zm_zone.cpp zm_storage.cpp)
# A fix for cmake recompiling the source files for every target.
add_library(zm STATIC ${ZM_BIN_SRC_FILES})
add_executable(zmc zmc.cpp)
add_executable(zma zma.cpp)
add_executable(zmu zmu.cpp)
add_executable(zms zms.cpp)
target_link_libraries(zmc zm ${ZM_EXTRA_LIBS} ${ZM_BIN_LIBS})
target_link_libraries(zma zm ${ZM_EXTRA_LIBS} ${ZM_BIN_LIBS})
target_link_libraries(zmu zm ${ZM_EXTRA_LIBS} ${ZM_BIN_LIBS})
target_link_libraries(zms zm ${ZM_EXTRA_LIBS} ${ZM_BIN_LIBS})
# Generate man files for the binaries destined for the bin folder
FOREACH(CBINARY zma zmc zmu)
FOREACH(CBINARY zmc zmu)
POD2MAN(${CMAKE_CURRENT_SOURCE_DIR}/${CBINARY}.cpp zoneminder-${CBINARY} 8)
ENDFOREACH(CBINARY zma zmc zmu)
ENDFOREACH(CBINARY zmc zmu)
install(TARGETS zmc zma zmu RUNTIME DESTINATION "${CMAKE_INSTALL_FULL_BINDIR}" PERMISSIONS OWNER_WRITE OWNER_READ OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
install(TARGETS zmc zmu RUNTIME DESTINATION "${CMAKE_INSTALL_FULL_BINDIR}" PERMISSIONS OWNER_WRITE OWNER_READ OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
install(TARGETS zms RUNTIME DESTINATION "${ZM_CGIDIR}" PERMISSIONS OWNER_WRITE OWNER_READ OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
install(CODE "execute_process(COMMAND ln -sf zms nph-zms WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})" )
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/nph-zms DESTINATION "${ZM_CGIDIR}")

View File

@ -11,11 +11,15 @@ AnalysisThread::~AnalysisThread() {
}
int AnalysisThread::run() {
Debug(2, "In run");
useconds_t analysis_rate = monitor->GetAnalysisRate();
Debug(2, "after getanalysisrate");
unsigned int analysis_update_delay = monitor->GetAnalysisUpdateDelay();
Debug(2, "after getanalysisUpdateDelay");
time_t last_analysis_update_time, cur_time;
monitor->UpdateAdaptiveSkip();
Debug(2, "after UpdateAdaptiveSkip");
last_analysis_update_time = time(0);
Debug(2, "THREAD: Getting ref image");
@ -37,7 +41,7 @@ int AnalysisThread::run() {
if ( !monitor->Analyse() ) {
Debug(2, "Sleeping for %d", monitor->Active()?ZM_SAMPLE_RATE:ZM_SUSPENDED_RATE);
usleep(100*(monitor->Active()?ZM_SAMPLE_RATE:ZM_SUSPENDED_RATE));
usleep(10*(monitor->Active()?ZM_SAMPLE_RATE:ZM_SUSPENDED_RATE));
} else if ( analysis_rate ) {
Debug(2, "Sleeping for %d", analysis_rate);
usleep(analysis_rate);

View File

@ -428,15 +428,17 @@ void Event::AddFramesInternal( int n_frames, int start_frame, Image **images, st
}
void Event::AddPacket( ZMPacket *packet, int score, Image *alarm_image ) {
frames++;
have_video_keyframe = have_video_keyframe || ( ( packet->codec_type == AVMEDIA_TYPE_VIDEO ) && packet->keyframe );
if ( videoStore ) {
have_video_keyframe = have_video_keyframe || ( packet->codec_type == AVMEDIA_TYPE_VIDEO && ( packet->packet.flags & AV_PKT_FLAG_KEY ) );
if ( have_video_keyframe )
if ( have_video_keyframe ) {
videoStore->writePacket( packet );
} else {
Debug(2, "No video keyframe yet, not writing");
}
//FIXME if it fails, we should write a jpeg
}
if ( packet->codec_type == AVMEDIA_TYPE_VIDEO ) {
if ( have_video_keyframe && ( packet->codec_type == AVMEDIA_TYPE_VIDEO ) ) {
AddFrame( packet->image, *packet->timestamp, score, alarm_image );
} // end if is video
return;

View File

@ -106,26 +106,25 @@ static int parse_key_value_pair(AVDictionary **pm, const char **buf,
return ret;
}
int av_dict_parse_string(AVDictionary **pm, const char *str,
const char *key_val_sep, const char *pairs_sep,
int flags)
{
if (!str)
return 0;
/* ignore STRDUP flags */
flags &= ~(AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
while (*str) {
int ret;
if ( (ret = parse_key_value_pair(pm, &str, key_val_sep, pairs_sep, flags)) < 0)
return ret;
if (*str)
str++;
}
return 0;
const char *key_val_sep, const char *pairs_sep,
int flags) {
if (!str)
return 0;
/* ignore STRDUP flags */
flags &= ~(AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
while (*str) {
int ret;
if ( (ret = parse_key_value_pair(pm, &str, key_val_sep, pairs_sep, flags)) < 0)
return ret;
if (*str)
str++;
}
return 0;
}
#endif
#endif // HAVE_LIBAVUTIL

View File

@ -148,7 +148,7 @@ int FfmpegCamera::PrimeCapture() {
}
return 0;
#else
return OpenFfmpeg();
return ! OpenFfmpeg();
#endif
}
@ -206,7 +206,6 @@ int FfmpegCamera::PostCapture() {
int FfmpegCamera::OpenFfmpeg() {
Debug ( 2, "OpenFfmpeg called." );
int ret;
mOpenStart = time(NULL);
@ -218,10 +217,12 @@ int FfmpegCamera::OpenFfmpeg() {
if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) != 0 )
#else
// Handle options
AVDictionary *opts = 0;
AVDictionary *opts = NULL;
ret = av_dict_parse_string(&opts, Options().c_str(), "=", ",", 0);
if ( ret < 0 ) {
Warning("Could not parse ffmpeg input options list '%s'\n", Options().c_str());
Warning("Could not parse ffmpeg input options list '%s'", Options().c_str());
} else {
Debug(2,"Could not parse ffmpeg input options list '%s'", Options().c_str());
}
// Set transport method as specified by method field, rtpUni is default
@ -240,15 +241,12 @@ int FfmpegCamera::OpenFfmpeg() {
Warning("Could not set rtsp_transport method '%s'\n", method.c_str());
}
Debug ( 1, "Calling avformat_open_input for %s", mPath.c_str() );
Debug ( 1, "Calling avformat_alloc_context for %s", mPath.c_str() );
mFormatContext = avformat_alloc_context( );
//mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
//mFormatContext->interrupt_callback.opaque = this;
// Speed up find_stream_info
//FIXME can speed up initial analysis but need sensible parameters...
//mFormatContext->probesize = 32;
//mFormatContext->max_analyze_duration = 32;
monitor->GetLastEventId() ;
Debug(2, "before avformat_open_input" );
if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, &opts ) != 0 )
#endif
@ -257,12 +255,16 @@ int FfmpegCamera::OpenFfmpeg() {
Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
return -1;
}
Debug(2, "afte avformat_open_input" );
monitor->GetLastEventId() ;
AVDictionaryEntry *e = NULL;
while ( (e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL ) {
Warning( "Option %s not recognized by ffmpeg", e->key);
}
monitor->GetLastEventId() ;
mIsOpening = false;
Debug ( 1, "Opened input" );
@ -380,12 +382,13 @@ int FfmpegCamera::OpenFfmpeg() {
} else {
Debug(1, "Video Found decoder");
zm_dump_stream_format(mFormatContext, mVideoStreamId, 0, 0);
// Open the codec
// Open the codec
#if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0)
Debug ( 1, "Calling avcodec_open" );
if ( avcodec_open(mVideoCodecContext, mVideoCodec) < 0 ){
Debug ( 1, "Calling avcodec_open" );
if ( avcodec_open(mVideoCodecContext, mVideoCodec) < 0 ){
#else
Debug ( 1, "Calling avcodec_open2" );
Debug ( 1, "Calling video avcodec_open2" );
if ( avcodec_open2(mVideoCodecContext, mVideoCodec, &opts) < 0 ) {
#endif
AVDictionaryEntry *e = NULL;
@ -430,7 +433,7 @@ int FfmpegCamera::OpenFfmpeg() {
}
} // end if have audio stream
Debug ( 1, "Opened codec" );
Debug ( 1, "Opened audio codec" );
if ( (unsigned int)mVideoCodecContext->width != width || (unsigned int)mVideoCodecContext->height != height ) {
Warning( "Monitor dimensions are %dx%d but camera is sending %dx%d", width, height, mVideoCodecContext->width, mVideoCodecContext->height );
@ -478,12 +481,16 @@ int FfmpegCamera::CloseFfmpeg() {
if ( mVideoCodecContext ) {
avcodec_close(mVideoCodecContext);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
avcodec_free_context(&mVideoCodecContext);
#endif
mVideoCodecContext = NULL; // Freed by av_close_input_file
}
if ( mAudioCodecContext ) {
avcodec_close(mAudioCodecContext);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
avcodec_free_context(&mAudioCodecContext);
#endif
mAudioCodecContext = NULL; // Freed by av_close_input_file
}
@ -535,6 +542,5 @@ void *FfmpegCamera::ReopenFfmpegThreadCallback(void *ctx) {
return NULL;
}
}
} // end void *FfmpegCamera::ReopenFfmpegThreadCallback(void *ctx)
}
#endif // HAVE_LIBAVFORMAT

View File

@ -1944,7 +1944,6 @@ int LocalCamera::Capture( ZMPacket &zm_packet ) {
//vid_buf.memory = V4L2_MEMORY_MMAP;
vid_buf.memory = v4l2_data.reqbufs.memory;
Debug( 3, "Capturing %d frames", captures_per_frame );
while ( captures_per_frame ) {
Debug( 3, "Capturing %d frames", captures_per_frame );
if ( vidioctl( vid_fd, VIDIOC_DQBUF, &vid_buf ) < 0 ) {
@ -1955,7 +1954,7 @@ int LocalCamera::Capture( ZMPacket &zm_packet ) {
}
return -1;
}
Debug( 3, "Capturing %d frames", captures_per_frame );
Debug(3, "Captured a frame");
v4l2_data.bufptr = &vid_buf;
capture_frame = v4l2_data.bufptr->index;

View File

@ -159,7 +159,7 @@ public:
int Capture(ZMPacket &p);
int PostCapture();
static bool GetCurrentSettings( const char *device, char *output, int version, bool verbose );
AVStream* get_VideoStream();
AVStream* get_VideoStream();
};
#endif // ZM_HAS_V4L

View File

@ -63,7 +63,12 @@
#define MAP_LOCKED 0
#endif
Monitor::MonitorLink::MonitorLink( int p_id, const char *p_name ) : id( p_id ) {
Monitor::MonitorLink::MonitorLink( int p_id, const char *p_name ) :
id( p_id ),
shared_data(NULL),
trigger_data(NULL),
video_store_data(NULL)
{
strncpy( name, p_name, sizeof(name) );
#if ZM_MEM_MAPPED
@ -446,6 +451,7 @@ Monitor::Monitor(
start_time = last_fps_time = time( 0 );
event = 0;
last_section_mod =0;
Debug( 1, "Monitor %s\
function: %d\
@ -536,8 +542,8 @@ bool Monitor::connect() {
shared_timestamps = (struct timeval *)((char *)video_store_data + sizeof(VideoStoreData));
shared_images = (unsigned char *)((char *)shared_timestamps + (image_buffer_count*sizeof(struct timeval)));
analysis_it = packetqueue.pktQueue.begin();
packetqueue = NULL;
if ( ((unsigned long)shared_images % 64) != 0 ) {
/* Align images buffer to nearest 64 byte boundary */
Debug(3,"Aligning shared memory images to the next 64 byte boundary");
@ -568,7 +574,8 @@ Monitor::~Monitor() {
delete videoStore;
videoStore = NULL;
}
packetqueue.clearQueue();
delete packetqueue;
packetqueue = NULL;
if ( timestamps ) {
delete[] timestamps;
@ -724,11 +731,6 @@ ZMPacket *Monitor::getSnapshot( int index ) {
}
return &image_buffer[index];
for ( std::list<ZMPacket *>::iterator it = packetqueue.pktQueue.begin(); it != packetqueue.pktQueue.end(); ++it ) {
ZMPacket *zm_packet = *it;
if ( zm_packet->image_index == index )
return zm_packet;
}
return NULL;
}
@ -749,17 +751,25 @@ unsigned int Monitor::GetLastWriteIndex() const {
return( shared_data->last_write_index!=(unsigned int)image_buffer_count?shared_data->last_write_index:-1 );
}
unsigned int Monitor::GetLastEvent() const {
uint32_t Monitor::GetLastEventId() const {
Debug(2, "mem_ptr(%x), State(%d) last_read_index(%d) last_read_time(%d) last_event(%d)",
mem_ptr,
shared_data->state,
shared_data->last_read_index,
shared_data->last_read_time,
shared_data->last_event_id
);
return( shared_data->last_event_id );
}
// This function is crap.
double Monitor::GetFPS() const {
int index1 = shared_data->last_write_index;
if ( index1 == image_buffer_count ) {
if ( index1 >= image_buffer_count ) {
// last_write_index only has this value on startup before capturing anything.
return 0.0;
}
Debug(2, "index1(%d)", index1);
ZMPacket *snap1 = &image_buffer[index1];
if ( !snap1->timestamp->tv_sec ) {
// This should be impossible
@ -770,6 +780,7 @@ double Monitor::GetFPS() const {
int image_count = image_buffer_count;
int index2 = (index1+1)%image_buffer_count;
Debug(2, "index2(%d)", index2);
ZMPacket *snap2 = &image_buffer[index2];
// the timestamp pointers are initialized on connection, so that's redundant
// tv_sec is probably only zero during the first loop of capturing, so this basically just counts the unused images.
@ -1226,63 +1237,30 @@ bool Monitor::Analyse() {
return false;
}
// if have event, sent frames until we find a video packet, at which point do analysis. Adaptive skip should only affect which frames we do analysis on.
// Keeps minimum video frames. This should be ok, because we SHOULD be staying close to the head of the queue in analysis
if ( ! event ) {
if ( packetqueue.video_packet_count > pre_event_count ) {
mutex.lock();
packetqueue.clearQueue( pre_event_count, video_stream_id );
mutex.unlock();
}
}
// If do have an event, then analysis_it should point to the head of the queue, because we would have emptied it on event creation.
unsigned int index = ( shared_data->last_read_index + 1 ) % image_buffer_count;
if ( ! packetqueue.size() ) {
Debug(2, "PacketQueue is empty" );
return false;
}
Debug(2, "PacketQueue size (%d)", packetqueue.size() );
// The idea is that iterator_id never gets to the end.
// analysis_it is == end when queue is empty?
Debug(2, "Analysis index (%d), last_Write(%d)", index, shared_data->last_write_index );
if ( analysis_it == packetqueue.pktQueue.end() ) {
Debug(2, "Analysis index (%d), last_Write(%d), at end of queue", index, shared_data->last_write_index );
analysis_it = packetqueue.pktQueue.begin();
return false;
}
std::list<ZMPacket *>::iterator next_it = analysis_it;
Debug(2, "next_it (%x)", next_it );
++next_it;
struct timeval now;
gettimeofday( &now, NULL );
int packets_processed = 0;
// Move to next packet.
while ( ( index != shared_data->last_write_index ) && ( next_it != packetqueue.pktQueue.end() ) ) {
analysis_it = next_it;
++next_it;
Debug(2, "Analysis index (%d), last_Write(%d), *it: (%x) (%d)", index, shared_data->last_write_index, *analysis_it, (*analysis_it)->image_index );
ZMPacket *snap;
while ( ( snap = packetqueue->get_analysis_packet() ) && ( snap->score == -1 ) ) {
Debug(2, "Analysis index (%d), last_Write(%d)", index, shared_data->last_write_index);
packets_processed += 1;
ZMPacket *snap = *analysis_it;
struct timeval *timestamp = snap->timestamp;
Image *snap_image = snap->image;
Debug(2, "Analysing image (%d)", snap->image_index );
if ( snap->image_index == -1 ) {
Debug(2, "skipping because audio");
if ( ! packetqueue->increment_analysis_it() ) {
Debug(2, "No more packets to analyse");
return false;
}
continue;
}
int last_section_mod = 0;
// signal is set by capture
bool signal = shared_data->signal;
bool signal_change = (signal != last_signal);
@ -1393,7 +1371,7 @@ bool Monitor::Analyse() {
if ( section_length ) {
// TODO: Wouldn't this be clearer if we just did something like if now - event->start > section_length ?
int section_mod = timestamp->tv_sec % section_length;
Debug( 3, "Section length (%d) Last Section Mod(%d), new section mod(%d)", section_length, last_section_mod, section_mod );
Debug( 3, "Section length (%d) Last Section Mod(%d), tv_sec(%d) new section mod(%d)", section_length, last_section_mod, timestamp->tv_sec, section_mod );
// This is not clear, but basically due to pauses, etc we might not get section_mod == 0
if ( section_mod < last_section_mod ) {
Info( "%s: %03d - Closing event %d, section end forced ", name, image_count, event->Id() );
@ -1427,18 +1405,6 @@ bool Monitor::Analyse() {
if ( (function != MOCORD && state != ALERT) ) {
event = new Event( this, *timestamp, cause, noteSetMap );
shared_data->last_event_id = event->Id();
mutex.lock();
std::list<ZMPacket *>::iterator it = packetqueue.pktQueue.begin();
while( packetqueue.size() && ( it != analysis_it ) ) {
ZMPacket *queued_packet = packetqueue.popPacket();
event->AddPacket( queued_packet );
if ( queued_packet->image_index == -1 ) {
delete queued_packet;
}
it = packetqueue.pktQueue.begin();
}
mutex.unlock();
// The analysis packet will be added below.
}
} else if ( state != PREALARM ) {
Info( "%s: %03d - Gone into prealarm state", name, image_count );
@ -1473,6 +1439,9 @@ bool Monitor::Analyse() {
Event::EmptyPreAlarmFrames();
} // end if score or not
// Flag the packet so we don't analyse it again
snap->score = score;
if ( state == PREALARM || state == ALARM ) {
if ( config.create_analysis_images ) {
bool got_anal_image = false;
@ -1490,7 +1459,7 @@ bool Monitor::Analyse() {
}
} // end foreach zone
if ( got_anal_image ) {
(*analysis_it)->analysis_image = anal_image;
snap->analysis_image = anal_image;
} else {
delete anal_image;
}
@ -1502,45 +1471,14 @@ bool Monitor::Analyse() {
} // end foreach zone
} // analsys_images or record stats
mutex.lock();
ZMPacket *pack = packetqueue.popPacket();
mutex.unlock();
event->AddPacket( pack, score );
if ( pack->image_index == -1 ) {
delete pack;
pack = NULL;
}
if ( noteSetMap.size() > 0 )
event->updateNotes( noteSetMap );
} else if ( state == ALERT ) {
// Alert means this frame has no motion, but we were alarmed and are still recording.
mutex.lock();
ZMPacket *pack = packetqueue.popPacket();
mutex.unlock();
event->AddPacket( pack, score );
if ( pack->image_index == -1 ) {
delete pack;
pack = NULL;
}
if ( noteSetMap.size() > 0 )
event->updateNotes( noteSetMap );
} else if ( state == TAPE ) {
if ( !(image_count%(frame_skip+1)) ) {
mutex.lock();
ZMPacket *pack = packetqueue.popPacket();
Debug(2,"adding packet (%x)", pack );
Debug(2,"adding packet (%x) (%d)", pack, pack->image_index );
mutex.unlock();
if ( config.bulk_frame_interval > 1 ) {
event->AddPacket( pack, (event->Frames()<pre_event_count?0:-1) );
} else {
event->AddPacket( pack );
}
if ( pack->image_index == -1 ) {
delete pack;
pack = NULL;
}
}
}
if ( function == MODECT || function == MOCORD ) {
@ -1562,15 +1500,31 @@ Debug(2,"adding packet (%x) (%d)", pack, pack->image_index );
last_section_mod = 0;
} // end if ( trigger_data->trigger_state != TRIGGER_OFF )
if ( event ) {
ZMPacket *queued_packet;
//popPacket will increment analysis_it if neccessary, so this will write out all packets in queue
while ( ( queued_packet = packetqueue->popPacket() ) ) {
Debug(2,"adding packet (%x) (%d)", queued_packet, queued_packet->image_index );
event->AddPacket( queued_packet );
if ( queued_packet->image_index == -1 ) {
delete queued_packet;
queued_packet = NULL;
}
} // end while write out queued_packets
} else {
packetqueue->increment_analysis_it();
}
shared_data->last_read_index = snap->image_index;
struct timeval now;
gettimeofday(&now, NULL);
shared_data->last_read_time = now.tv_sec;
analysis_image_count++;
} // end while not at end of packetqueue
//mutex.unlock();
if ( packets_processed > 0 )
return true;
return false;
}
if ( packets_processed > 0 )
return true;
return false;
} // end Monitor::Analyze
void Monitor::Reload() {
Debug( 1, "Reloading monitor %s", name );
@ -2780,8 +2734,6 @@ Monitor *Monitor::Load( unsigned int p_id, bool load_zones, Purpose purpose ) {
* Returns -1 on failure.
*/
int Monitor::Capture() {
Debug(2,"Capture::Lock");
mutex.lock();
static int FirstCapture = 1; // Used in de-interlacing to indicate whether this is the even or odd image
unsigned int index = image_count % image_buffer_count;
@ -2815,7 +2767,6 @@ int Monitor::Capture() {
if ( FirstCapture ) {
FirstCapture = 0;
mutex.unlock();
return 0;
}
} else {
@ -2830,7 +2781,6 @@ int Monitor::Capture() {
signalcolor = rgb_convert(signal_check_colour, ZM_SUBPIX_ORDER_BGR);
capture_image->Fill(signalcolor);
shared_data->signal = false;
mutex.unlock();
return -1;
} else if ( captureResult > 0 ) {
@ -2838,14 +2788,15 @@ int Monitor::Capture() {
if ( packet->packet.stream_index != video_stream_id ) {
Debug(2, "Have audio packet (%d) != videostream_id:(%d) q.vpktcount(%d) event?(%d) ",
packet->packet.stream_index, video_stream_id, packetqueue.video_packet_count, ( event ? 1 : 0 ) );
packet->packet.stream_index, video_stream_id, packetqueue->video_packet_count, ( event ? 1 : 0 ) );
// Only queue if we have some video packets in there.
if ( packetqueue.video_packet_count || event ) {
mutex.lock();
if ( packetqueue->video_packet_count || event ) {
// Need to copy it into another ZMPacket.
ZMPacket *audio_packet = new ZMPacket( *packet );
audio_packet->codec_type = camera->get_AudioStream()->codecpar->codec_type;
Debug(2, "Queueing packet");
packetqueue.queuePacket( audio_packet );
packetqueue->queuePacket( audio_packet );
}
// Don't update last_write_index because that is used for live streaming
//shared_data->last_write_time = image_buffer[index].timestamp->tv_sec;
@ -2857,20 +2808,24 @@ int Monitor::Capture() {
packet->codec_type = camera->get_VideoStream()->codecpar->codec_type;
if ( packet->packet.size && ! packet->in_frame ) {
Debug(2,"About to decode");
//Debug(2,"About to decode");
if ( packet->decode( camera->get_VideoCodecContext() ) ) {
Debug(2,"Getimage");
//Debug(2,"Getimage");
packet->get_image();
}
// Have an av_packet,
if ( packetqueue.video_packet_count || ( packet->packet.flags & AV_PKT_FLAG_KEY ) || event ) {
Debug(2, "Queueing packet");
packetqueue.queuePacket( packet );
mutex.lock();
if ( packetqueue->video_packet_count || packet->keyframe || event ) {
//Debug(2, "Queueing video packet");
packetqueue->queuePacket( packet );
}
mutex.unlock();
} else {
mutex.lock();
// Non-avpackets are all keyframes.
Debug(2, "Queueing packet");
packetqueue.queuePacket( packet );
Debug(2, "Queueing decoded video packet");
packetqueue->queuePacket( packet );
mutex.unlock();
}
/* Deinterlacing */
@ -2940,9 +2895,6 @@ int Monitor::Capture() {
} // end if result
} // end if deinterlacing
Debug(2,"Capture unlock");
mutex.unlock();
// Icon: I'm not sure these should be here. They have nothing to do with capturing
if ( shared_data->action & GET_SETTINGS ) {
shared_data->brightness = camera->Brightness();
@ -3238,7 +3190,10 @@ unsigned int Monitor::SubpixelOrder() const { return( camera->SubpixelOrder() );
int Monitor::PrimeCapture() {
int ret = camera->PrimeCapture();
video_stream_id = ret ? -1 : camera->get_VideoStreamId();
if ( ret == 0 ) {
video_stream_id = camera->get_VideoStreamId();
packetqueue = new zm_packetqueue( pre_event_buffer_count, video_stream_id );
}
Debug(2, "Video stream id is (%d)", video_stream_id );
return ret;
}
@ -3256,6 +3211,7 @@ void Monitor::get_ref_image() {
( shared_data->last_write_index == (unsigned int)image_buffer_count )
&&
( shared_data->last_write_time == 0 )
&& ! zm_terminate
) {
Warning( "Waiting for capture daemon" );
usleep( 100000 );

View File

@ -164,9 +164,8 @@ protected:
} VideoStoreData;
VideoStore *videoStore;
zm_packetqueue packetqueue;
zm_packetqueue *packetqueue;
Mutex mutex;
std::list<ZMPacket *>::iterator analysis_it; // Iterator into the packetqueue. Theoretically points to the most recently analyzed packet
class MonitorLink {
protected:
@ -292,6 +291,7 @@ protected:
int first_alarm_count;
int last_alarm_count;
bool last_signal;
int last_section_mod;
int buffer_count;
int prealarm_count;
State state;
@ -454,7 +454,6 @@ public:
const std::string &OutputCodec() const { return output_codec; }
const std::string &OutputContainer() const { return output_container; }
uint32_t GetLastEventId() const { return shared_data->last_event_id; }
uint32_t GetVideoWriterEventId() const { return video_store_data->current_event; }
void SetVideoWriterEventId( uint32_t p_event_id ) { video_store_data->current_event = p_event_id; }
@ -470,7 +469,7 @@ public:
int GetAlarmCaptureDelay() const { return( alarm_capture_delay ); }
unsigned int GetLastReadIndex() const;
unsigned int GetLastWriteIndex() const;
unsigned int GetLastEvent() const;
uint32_t GetLastEventId() const;
double GetFPS() const;
void UpdateAnalysisFPS();
void ForceAlarmOn( int force_score, const char *force_case, const char *force_text="" );

View File

@ -36,6 +36,7 @@ ZMPacket::ZMPacket( ) {
timestamp = NULL;
analysis_image = NULL;
image_index = -1;
score = -1;
}
ZMPacket::ZMPacket( ZMPacket &p ) {
@ -53,6 +54,7 @@ ZMPacket::ZMPacket( ZMPacket &p ) {
*timestamp = *p.timestamp;
analysis_image = NULL;
image_index = -1;
score = -1;
}
ZMPacket::~ZMPacket() {
@ -103,6 +105,8 @@ void ZMPacket::reset() {
timestamp = NULL;
}
#endif
score = -1;
keyframe = 0;
}
int ZMPacket::decode( AVCodecContext *ctx ) {

View File

@ -44,6 +44,7 @@ class ZMPacket {
struct timeval *timestamp;
AVMediaType codec_type;
int image_index;
public:
AVPacket *av_packet() { return &packet; }
AVPacket *set_packet( AVPacket *p ) ;

View File

@ -24,8 +24,11 @@
#define VIDEO_QUEUESIZE 200
#define AUDIO_QUEUESIZE 50
zm_packetqueue::zm_packetqueue() {
zm_packetqueue::zm_packetqueue( unsigned int video_image_count, int p_video_stream_id ) {
video_stream_id = p_video_stream_id;
max_video_packet_count = video_image_count;
video_packet_count = 0;
analysis_it = pktQueue.begin();
}
zm_packetqueue::~zm_packetqueue() {
@ -34,8 +37,17 @@ zm_packetqueue::~zm_packetqueue() {
bool zm_packetqueue::queuePacket( ZMPacket* zm_packet ) {
pktQueue.push_back( zm_packet );
if ( zm_packet->codec_type == AVMEDIA_TYPE_VIDEO )
if ( zm_packet->codec_type == AVMEDIA_TYPE_VIDEO ) {
video_packet_count += 1;
if ( video_packet_count > max_video_packet_count )
clearQueue( max_video_packet_count, video_stream_id );
}
if ( analysis_it == pktQueue.end() ) {
// ANalsys_it should only point to end when queue it empty
Debug(2,"pointing analysis_it to begining");
analysis_it = pktQueue.begin();
}
return true;
}
@ -46,6 +58,9 @@ ZMPacket* zm_packetqueue::popPacket( ) {
}
ZMPacket *packet = pktQueue.front();
if ( *analysis_it == packet )
analysis_it ++;
pktQueue.pop_front();
if ( packet->codec_type == AVMEDIA_TYPE_VIDEO )
video_packet_count -= 1;
@ -71,7 +86,7 @@ unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream
AVPacket *av_packet = &(zm_packet->packet);
Debug(4, "Looking at packet with stream index (%d) with keyframe(%d), frames_to_keep is (%d)",
av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ), frames_to_keep );
av_packet->stream_index, zm_packet->keyframe, frames_to_keep );
// Want frames_to_keep video frames. Otherwise, we may not have enough
if ( av_packet->stream_index == stream_id ) {
@ -84,8 +99,6 @@ unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream
Debug(4, "Hit end of queue, still need (%d) video keyframes", frames_to_keep );
} else {
if ( it != pktQueue.rend() ) {
Debug(4, "Not rend");
ZMPacket *zm_packet = *it;
Debug(4, "packet %x %d", zm_packet, zm_packet->image_index);
@ -93,10 +106,10 @@ unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream
while (
( it != pktQueue.rend() )
&&
(( av_packet->stream_index != stream_id ) || ! ( av_packet->flags & AV_PKT_FLAG_KEY ))
(( av_packet->stream_index != stream_id ) || !zm_packet->keyframe )
) {
zm_packet = *it;
Debug(4, "packet %x %d", zm_packet, zm_packet->image_index);
//Debug(4, "packet %x %d", zm_packet, zm_packet->image_index);
++it;
av_packet = &( (*it)->packet );
}
@ -108,6 +121,8 @@ unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream
Debug(4, "Deleting a packet from the front, count is (%d)", delete_count );
packet = pktQueue.front();
if ( *analysis_it == packet )
analysis_it ++;
if ( packet->codec_type == AVMEDIA_TYPE_VIDEO )
video_packet_count -= 1;
pktQueue.pop_front();
@ -115,7 +130,7 @@ unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream
delete packet;
delete_count += 1;
}
} // while our iterator is not the first packet
Debug(3, "Deleted (%d) packets", delete_count );
return delete_count;
} // end unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream_id )
@ -129,6 +144,7 @@ void zm_packetqueue::clearQueue() {
delete packet;
}
video_packet_count = 0;
analysis_it = pktQueue.begin();
}
unsigned int zm_packetqueue::size() {
@ -139,60 +155,29 @@ unsigned int zm_packetqueue::get_video_packet_count() {
return video_packet_count;
}
void zm_packetqueue::clear_unwanted_packets( timeval *recording_started, int mVideoStreamId ) {
// Need to find the keyframe <= recording_started. Can get rid of audio packets.
if ( pktQueue.empty() ) {
return;
}
// Returns a packet to analyse or NULL
ZMPacket *zm_packetqueue::get_analysis_packet() {
// Step 1 - find keyframe < recording_started.
// Step 2 - pop packets until we get to the packet in step 2
std::list<ZMPacket *>::reverse_iterator it;
if ( ! pktQueue.size() )
return NULL;
if ( analysis_it == pktQueue.end() )
return NULL;
Debug(3, "Looking for keyframe after start recording stream id (%d)", mVideoStreamId );
for ( it = pktQueue.rbegin(); it != pktQueue.rend(); ++ it ) {
ZMPacket *zm_packet = *it;
AVPacket *av_packet = &(zm_packet->packet);
if (
( av_packet->flags & AV_PKT_FLAG_KEY )
&&
( av_packet->stream_index == mVideoStreamId )
&&
timercmp( zm_packet->timestamp, recording_started, < )
) {
Debug(3, "Found keyframe before start with stream index (%d) with keyframe (%d)", av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ) );
break;
}
Debug(2, "Distance from head: (%d)", std::distance( pktQueue.begin(), analysis_it ) );
Debug(2, "Distance from end: (%d)", std::distance( analysis_it, pktQueue.end() ) );
return *analysis_it;
} // end ZMPacket *zm_packetqueue::get_analysis_packet()
// The idea is that analsys_it will only be == end() if the queue is empty
// probvlem here is that we don't want to analyse a packet twice. Maybe we can flag the packet analysed
bool zm_packetqueue::increment_analysis_it( ) {
// We do this instead of distance becuase distance will traverse the entire list in the worst case
std::list<ZMPacket *>::iterator next_it = analysis_it;
next_it ++;
if ( next_it == pktQueue.end() ) {
return false;
}
if ( it == pktQueue.rend() ) {
Debug(1, "Didn't find a keyframe packet keeping all" );
return;
}
ZMPacket *zm_packet = *it;
AVPacket *av_packet = &(zm_packet->packet);
Debug(3, "Found packet before start with stream index (%d) with keyframe (%d), distance(%d), size(%d)",
av_packet->stream_index,
( av_packet->flags & AV_PKT_FLAG_KEY ),
distance( it, pktQueue.rend() ),
pktQueue.size() );
unsigned int deleted_frames = 0;
ZMPacket *packet = NULL;
while ( distance( it, pktQueue.rend() ) > 1 ) {
//while ( pktQueue.rend() != it ) {
packet = pktQueue.front();
pktQueue.pop_front();
if ( packet->image_index == -1 )
delete packet;
deleted_frames += 1;
}
zm_packet = pktQueue.front();
av_packet = &(zm_packet->packet);
if ( ( ! ( av_packet->flags & AV_PKT_FLAG_KEY ) ) || ( av_packet->stream_index != mVideoStreamId ) ) {
Error( "Done looking for keyframe. Deleted %d frames. Remaining frames in queue: %d stream of head packet is (%d), keyframe (%d), distance(%d), packets(%d)", deleted_frames, pktQueue.size(), av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ), distance( it, pktQueue.rend() ), pktQueue.size() );
} else {
Debug(1, "Done looking for keyframe. Deleted %d frames. Remaining frames in queue: %d stream of head packet is (%d), keyframe (%d), distance(%d), packets(%d)", deleted_frames, pktQueue.size(), av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ), distance( it, pktQueue.rend() ), pktQueue.size() );
}
}
analysis_it = next_it;
return true;
} // end bool zm_packetqueue::increment_analysis_it( )

View File

@ -31,8 +31,16 @@ extern "C" {
}
class zm_packetqueue {
public: // For now just to ease development
std::list<ZMPacket *> pktQueue;
std::list<ZMPacket *>::iterator analysis_it;
int video_stream_id;
int video_packet_count; // keep track of how many video packets we have, because we shouldn't have more than image_buffer_count
unsigned int max_video_packet_count;
public:
zm_packetqueue();
zm_packetqueue( unsigned int p_max_video_packet_count, int p_video_stream_id );
virtual ~zm_packetqueue();
bool queuePacket( ZMPacket* packet );
ZMPacket * popPacket( );
@ -41,8 +49,10 @@ public:
unsigned int size();
unsigned int get_video_packet_count();
void clear_unwanted_packets( timeval *recording, int mVideoStreamId );
std::list<ZMPacket *> pktQueue;
int video_packet_count; // keep track of how many video packets we have, because we shouldn't have more than image_buffer_count
// Functions to manage the analysis frame logic
bool increment_analysis_it();
ZMPacket *get_analysis_packet();
};
#endif /* ZM_PACKETQUEUE_H */

View File

@ -69,8 +69,8 @@ void StreamBase::updateFrameRate( double fps ) {
while( effective_fps > maxfps ) {
effective_fps /= 2.0;
frame_mod *= 2;
}
Debug( 3, "aEFPS:%.2f, aFM:%d", effective_fps, frame_mod );
}
}
bool StreamBase::checkCommandQueue() {

View File

@ -27,8 +27,7 @@
#include <errno.h>
#include <sys/time.h>
struct timespec getTimeout( int secs )
{
struct timespec getTimeout( int secs ) {
struct timespec timeout;
struct timeval temp_timeout;
gettimeofday( &temp_timeout, 0 );
@ -37,65 +36,56 @@ struct timespec getTimeout( int secs )
return( timeout );
}
struct timespec getTimeout( double secs )
{
struct timespec getTimeout( double secs ) {
struct timespec timeout;
struct timeval temp_timeout;
gettimeofday( &temp_timeout, 0 );
timeout.tv_sec = temp_timeout.tv_sec + int(secs);
timeout.tv_nsec = temp_timeout.tv_usec += (long int)(1000000000.0*(secs-int(secs)));
if ( timeout.tv_nsec > 1000000000 )
{
if ( timeout.tv_nsec > 1000000000 ) {
timeout.tv_sec += 1;
timeout.tv_nsec -= 1000000000;
}
return( timeout );
}
Mutex::Mutex()
{
Mutex::Mutex() {
if ( pthread_mutex_init( &mMutex, NULL ) < 0 )
throw ThreadException( stringtf( "Unable to create pthread mutex: %s", strerror(errno) ) );
Fatal( "Unable to create pthread mutex: %s", strerror(errno) );
}
Mutex::~Mutex()
{
Mutex::~Mutex() {
if ( locked() )
Warning( "Destroying mutex when locked" );
if ( pthread_mutex_destroy( &mMutex ) < 0 )
throw ThreadException( stringtf( "Unable to destroy pthread mutex: %s", strerror(errno) ) );
Fatal( "Unable to destroy pthread mutex: %s", strerror(errno) );
}
void Mutex::lock()
{
void Mutex::lock() {
if ( pthread_mutex_lock( &mMutex ) < 0 )
throw ThreadException( stringtf( "Unable to lock pthread mutex: %s", strerror(errno) ) );
Debug(3, "Lock");
//Debug(3, "Lock");
}
void Mutex::lock( int secs )
{
void Mutex::lock( int secs ) {
struct timespec timeout = getTimeout( secs );
if ( pthread_mutex_timedlock( &mMutex, &timeout ) < 0 )
throw ThreadException( stringtf( "Unable to timedlock pthread mutex: %s", strerror(errno) ) );
}
void Mutex::lock( double secs )
{
void Mutex::lock( double secs ) {
struct timespec timeout = getTimeout( secs );
if ( pthread_mutex_timedlock( &mMutex, &timeout ) < 0 )
throw ThreadException( stringtf( "Unable to timedlock pthread mutex: %s", strerror(errno) ) );
}
void Mutex::unlock()
{
void Mutex::unlock() {
if ( pthread_mutex_unlock( &mMutex ) < 0 )
throw ThreadException( stringtf( "Unable to unlock pthread mutex: %s", strerror(errno) ) );
Debug(3, "unLock");
//Debug(3, "unLock");
}
bool Mutex::locked()
{
bool Mutex::locked() {
int state = pthread_mutex_trylock( &mMutex );
if ( state != 0 && state != EBUSY )
throw ThreadException( stringtf( "Unable to trylock pthread mutex: %s", strerror(errno) ) );
@ -104,27 +94,23 @@ bool Mutex::locked()
return( state == EBUSY );
}
Condition::Condition( Mutex &mutex ) : mMutex( mutex )
{
Condition::Condition( Mutex &mutex ) : mMutex( mutex ) {
if ( pthread_cond_init( &mCondition, NULL ) < 0 )
throw ThreadException( stringtf( "Unable to create pthread condition: %s", strerror(errno) ) );
Fatal( "Unable to create pthread condition: %s", strerror(errno) );
}
Condition::~Condition()
{
Condition::~Condition() {
if ( pthread_cond_destroy( &mCondition ) < 0 )
throw ThreadException( stringtf( "Unable to destroy pthread condition: %s", strerror(errno) ) );
Fatal( "Unable to destroy pthread condition: %s", strerror(errno) );
}
void Condition::wait()
{
void Condition::wait() {
// Locking done outside of this function
if ( pthread_cond_wait( &mCondition, mMutex.getMutex() ) < 0 )
throw ThreadException( stringtf( "Unable to wait pthread condition: %s", strerror(errno) ) );
}
bool Condition::wait( int secs )
{
bool Condition::wait( int secs ) {
// Locking done outside of this function
Debug( 8, "Waiting for %d seconds", secs );
struct timespec timeout = getTimeout( secs );
@ -133,8 +119,7 @@ bool Condition::wait( int secs )
return( errno != ETIMEDOUT );
}
bool Condition::wait( double secs )
{
bool Condition::wait( double secs ) {
// Locking done outside of this function
struct timespec timeout = getTimeout( secs );
if ( pthread_cond_timedwait( &mCondition, mMutex.getMutex(), &timeout ) < 0 && errno != ETIMEDOUT )
@ -142,36 +127,31 @@ bool Condition::wait( double secs )
return( errno != ETIMEDOUT );
}
void Condition::signal()
{
void Condition::signal() {
if ( pthread_cond_signal( &mCondition ) < 0 )
throw ThreadException( stringtf( "Unable to signal pthread condition: %s", strerror(errno) ) );
}
void Condition::broadcast()
{
void Condition::broadcast() {
if ( pthread_cond_broadcast( &mCondition ) < 0 )
throw ThreadException( stringtf( "Unable to broadcast pthread condition: %s", strerror(errno) ) );
}
template <class T> const T ThreadData<T>::getValue() const
{
template <class T> const T ThreadData<T>::getValue() const {
mMutex.lock();
const T valueCopy = mValue;
mMutex.unlock();
return( valueCopy );
}
template <class T> T ThreadData<T>::setValue( const T value )
{
template <class T> T ThreadData<T>::setValue( const T value ) {
mMutex.lock();
const T valueCopy = mValue = value;
mMutex.unlock();
return( valueCopy );
}
template <class T> const T ThreadData<T>::getUpdatedValue() const
{
template <class T> const T ThreadData<T>::getUpdatedValue() const {
Debug( 8, "Waiting for value update, %p", this );
mMutex.lock();
mChanged = false;
@ -184,8 +164,7 @@ template <class T> const T ThreadData<T>::getUpdatedValue() const
return( valueCopy );
}
template <class T> const T ThreadData<T>::getUpdatedValue( double secs ) const
{
template <class T> const T ThreadData<T>::getUpdatedValue( double secs ) const {
Debug( 8, "Waiting for value update, %.2f secs, %p", secs, this );
mMutex.lock();
mChanged = false;
@ -198,8 +177,7 @@ template <class T> const T ThreadData<T>::getUpdatedValue( double secs ) const
return( valueCopy );
}
template <class T> const T ThreadData<T>::getUpdatedValue( int secs ) const
{
template <class T> const T ThreadData<T>::getUpdatedValue( int secs ) const {
Debug( 8, "Waiting for value update, %d secs, %p", secs, this );
mMutex.lock();
mChanged = false;
@ -212,8 +190,7 @@ template <class T> const T ThreadData<T>::getUpdatedValue( int secs ) const
return( valueCopy );
}
template <class T> void ThreadData<T>::updateValueSignal( const T value )
{
template <class T> void ThreadData<T>::updateValueSignal( const T value ) {
Debug( 8, "Updating value with signal, %p", this );
mMutex.lock();
mValue = value;
@ -223,8 +200,7 @@ template <class T> void ThreadData<T>::updateValueSignal( const T value )
Debug( 9, "Updated value, %p", this );
}
template <class T> void ThreadData<T>::updateValueBroadcast( const T value )
{
template <class T> void ThreadData<T>::updateValueBroadcast( const T value ) {
Debug( 8, "Updating value with broadcast, %p", this );
mMutex.lock();
mValue = value;
@ -243,33 +219,29 @@ Thread::Thread() :
Debug( 1, "Creating thread" );
}
Thread::~Thread()
{
Thread::~Thread() {
Debug( 1, "Destroying thread %d", mPid );
if ( mStarted )
join();
}
void *Thread::mThreadFunc( void *arg )
{
void *Thread::mThreadFunc( void *arg ) {
Debug( 2, "Invoking thread" );
Thread *thisPtr = (Thread *)arg;
thisPtr->status = 0;
try
{
try {
thisPtr->mThreadMutex.lock();
thisPtr->mPid = thisPtr->id();
thisPtr->mThreadCondition.signal();
thisPtr->mThreadMutex.unlock();
thisPtr->mRunning = true;
Debug(2,"Runnning");
thisPtr->status = thisPtr->run();
thisPtr->mRunning = false;
Debug( 2, "Exiting thread, status %p", (void *)&(thisPtr->status) );
return (void *)&(thisPtr->status);
}
catch ( const ThreadException &e )
{
} catch ( const ThreadException &e ) {
Error( "%s", e.getMessage().c_str() );
thisPtr->mRunning = false;
Debug( 2, "Exiting thread after exception, status %p", (void *)-1 );
@ -277,14 +249,12 @@ void *Thread::mThreadFunc( void *arg )
}
}
void Thread::start()
{
void Thread::start() {
Debug( 1, "Starting thread" );
if ( isThread() )
throw ThreadException( "Can't self start thread" );
mThreadMutex.lock();
if ( !mStarted )
{
if ( !mStarted ) {
pthread_attr_t threadAttrs;
pthread_attr_init( &threadAttrs );
pthread_attr_setscope( &threadAttrs, PTHREAD_SCOPE_SYSTEM );
@ -293,9 +263,7 @@ void Thread::start()
if ( pthread_create( &mThread, &threadAttrs, mThreadFunc, this ) < 0 )
throw ThreadException( stringtf( "Can't create thread: %s", strerror(errno) ) );
pthread_attr_destroy( &threadAttrs );
}
else
{
} else {
Error( "Attempt to start already running thread %d", mPid );
}
mThreadCondition.wait();
@ -303,37 +271,29 @@ void Thread::start()
Debug( 1, "Started thread %d", mPid );
}
void Thread::join()
{
void Thread::join() {
Debug( 1, "Joining thread %d", mPid );
if ( isThread() )
throw ThreadException( "Can't self join thread" );
mThreadMutex.lock();
if ( mPid >= 0 )
{
if ( mStarted )
{
if ( mPid >= 0 ) {
if ( mStarted ) {
void *threadStatus = 0;
if ( pthread_join( mThread, &threadStatus ) < 0 )
throw ThreadException( stringtf( "Can't join sender thread: %s", strerror(errno) ) );
mStarted = false;
Debug( 1, "Thread %d exited, status %p", mPid, threadStatus );
}
else
{
} else {
Warning( "Attempt to join already finished thread %d", mPid );
}
}
else
{
} else {
Warning( "Attempt to join non-started thread %d", mPid );
}
mThreadMutex.unlock();
Debug( 1, "Joined thread %d", mPid );
}
void Thread::kill( int signal )
{
void Thread::kill( int signal ) {
pthread_kill( mThread, signal );
}

View File

@ -43,10 +43,11 @@ VideoStore::VideoStore(
filename = filename_in;
format = format_in;
av_register_all();
packets_written = 0;
frame_count = 0;
av_register_all();
Info("Opening video storage stream %s format: %s", filename, format);
ret = avformat_alloc_output_context2(&oc, NULL, NULL, filename);
@ -62,7 +63,7 @@ VideoStore::VideoStore(
// Couldn't deduce format from filename, trying from format name
if ( !oc ) {
avformat_alloc_output_context2(&oc, NULL, format, filename);
if (!oc) {
if ( !oc ) {
Fatal(
"Could not create video storage stream %s as no out ctx"
" could not be assigned based on filename or format %s",
@ -70,13 +71,12 @@ VideoStore::VideoStore(
} else {
Debug(4, "Success alocating out ctx");
}
} // end if ! oc
} // end if ! oc
Debug(2, "Success opening output contect");
AVDictionary *pmetadata = NULL;
int dsr = av_dict_set(&pmetadata, "title", "Zoneminder Security Recording", 0);
if (dsr < 0) Warning("%s:%d: title set failed", __FILE__, __LINE__);
Debug(2, "Success setting up dictcontect");
ret = av_dict_set(&pmetadata, "title", "Zoneminder Security Recording", 0);
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
oc->metadata = pmetadata;
out_format = oc->oformat;
@ -87,14 +87,12 @@ VideoStore::VideoStore(
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
video_in_ctx = avcodec_alloc_context3(NULL);
Debug(2, "copy to video_in_context");
avcodec_parameters_to_context(video_in_ctx,
video_in_stream->codecpar);
avcodec_parameters_to_context(video_in_ctx, video_in_stream->codecpar);
zm_dump_codecpar( video_in_stream->codecpar );
//video_in_ctx.codec_id = video_in_stream->codecpar.codec_id;
#else
video_in_ctx = video_in_stream->codec;
Debug(2,"Copied video context from input stream");
zm_dump_codec(video_in_ctx);
Debug(2,"Copied video context from input stream");
zm_dump_codec(video_in_ctx);
#endif
// Fix deprecated formats
switch ( video_in_ctx->pix_fmt ) {
@ -124,8 +122,7 @@ Debug(2,"Copied video context from input stream");
// Copy params from instream to ctx
if ( video_in_stream && ( video_in_ctx->codec_id == AV_CODEC_ID_H264 ) ) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
ret = avcodec_parameters_to_context(video_out_ctx,
video_in_stream->codecpar);
ret = avcodec_parameters_to_context(video_out_ctx, video_in_stream->codecpar);
if ( ret < 0 ) {
Error("Could not initialize ctx parameteres");
return;
@ -143,18 +140,25 @@ Debug(2,"Copied video context from input stream");
Debug(3, "Have orientation");
if ( orientation == Monitor::ROTATE_0 ) {
} else if ( orientation == Monitor::ROTATE_90 ) {
dsr = av_dict_set(&video_out_stream->metadata, "rotate", "90", 0);
if ( dsr < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
ret = av_dict_set(&video_out_stream->metadata, "rotate", "90", 0);
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
} else if ( orientation == Monitor::ROTATE_180 ) {
dsr = av_dict_set(&video_out_stream->metadata, "rotate", "180", 0);
if ( dsr < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
ret = av_dict_set(&video_out_stream->metadata, "rotate", "180", 0);
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
} else if ( orientation == Monitor::ROTATE_270 ) {
dsr = av_dict_set(&video_out_stream->metadata, "rotate", "270", 0);
if ( dsr < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
ret = av_dict_set(&video_out_stream->metadata, "rotate", "270", 0);
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
} else {
Warning("Unsupported Orientation(%d)", orientation);
}
}
if ( oc->oformat->flags & AVFMT_GLOBALHEADER ) {
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
video_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#else
video_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
}
// Fix deprecated formats
switch ( video_out_ctx->pix_fmt ) {
case AV_PIX_FMT_YUVJ420P :
@ -170,6 +174,7 @@ Debug(2,"Copied video context from input stream");
video_out_ctx->pix_fmt = AV_PIX_FMT_YUV440P;
break;
default:
video_out_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
break;
}
// Same codec, just copy the packets, otherwise we have to decode/encode
@ -184,19 +189,20 @@ Debug(2,"Copied video context from input stream");
} else {
/** Create a new frame to store the */
if ( !(in_frame = zm_av_frame_alloc()) ) {
Error("Could not allocate in frame");
if ( !(video_in_frame = zm_av_frame_alloc()) ) {
Error("Could not allocate video_in frame");
return;
}
// Don't have an input stream, so need to tell it what we are sending it, or are transcoding
video_out_ctx->width = monitor->Width();
video_out_ctx->height = monitor->Height();
video_out_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
if ( oc->oformat->flags & AVFMT_GLOBALHEADER ) {
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
video_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
video_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#else
video_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
video_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
}
@ -222,8 +228,6 @@ Debug(2,"Copied video context from input stream");
video_out_ctx->codec_id = AV_CODEC_ID_H264;
//video_in_ctx->sample_aspect_ratio;
/* take first format from list of supported formats */
//video_out_ctx->pix_fmt = video_out_codec->pix_fmts[0];
video_out_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
/* video time_base can be set to whatever is handy and supported by encoder */
video_out_ctx->time_base = (AVRational){1, 1000000}; // microseconds as base frame rate
@ -235,6 +239,9 @@ Debug(2,"Copied video context from input stream");
video_out_ctx->qcompress = 0.6;
video_out_ctx->bit_rate = 4000000;
#endif
video_out_ctx->max_b_frames = 1;
if (video_out_ctx->codec_id == AV_CODEC_ID_H264)
av_opt_set(video_out_ctx->priv_data, "preset", "superfast", 0);
AVDictionary *opts = 0;
std::string Options = monitor->GetEncoderOptions();
@ -248,23 +255,6 @@ Debug(2,"Copied video context from input stream");
}
}
#if 0
if ( ! av_dict_get( opts, "preset", NULL, 0 ) ) {
Debug(2,"Setting preset to superfast");
av_dict_set( &opts, "preset", "superfast", 0 );
}
if ( ! av_dict_get( opts, "crf", NULL, 0 ) ) {
Debug(2,"Setting crf to superfast");
av_dict_set( &opts, "crf", "0", 0 );
}
#endif
#if 0
if ( ! av_dict_get( opts, "tune", NULL, 0 ) ) {
Debug(2,"Setting tune to zerolatency");
av_dict_set( &opts, "tune", "zerolatency", 0 );
}
#endif
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
Warning("Can't open video codec (%s)! %s, trying h264",
video_out_codec->name,
@ -279,20 +269,6 @@ Debug(2,"Copied video context from input stream");
return;
}
}
#if 0
if ( ! av_dict_get( opts, "preset", NULL, 0 ) ) {
Debug(2,"Setting preset to superfast");
av_dict_set( &opts, "preset", "ultrafast", 0 );
}
if ( ! av_dict_get( opts, "crf", NULL, 0 ) ) {
Debug(2,"Setting crf to 0");
av_dict_set( &opts, "crf", "0", 0 );
}
if ( ! av_dict_get( opts, "tune", NULL, 0 ) ) {
Debug(2,"Setting tune to zerolatency");
av_dict_set( &opts, "tune", "zerolatency", 0 );
}
#endif
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
Error("Can't open video codec (%s)! %s",
video_out_codec->name,
@ -314,7 +290,7 @@ Debug(2,"Sucess opening codec");
}
} else {
Error("Codec not set");
}// end if codec == h264
} // end if codec == h264
swscale.SetDefaults(
video_in_ctx->pix_fmt,
@ -359,6 +335,7 @@ Error("Codec not set");
converted_in_samples = NULL;
audio_out_codec = NULL;
audio_in_codec = NULL;
audio_in_ctx = NULL;
audio_out_stream = NULL;
out_frame = NULL;
@ -367,10 +344,10 @@ Error("Codec not set");
#endif
if ( audio_in_stream ) {
Debug(3, "Have audio stream");
audio_in_stream_index = audio_in_stream->index;
Debug(3, "Have audio stream");
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
audio_in_ctx = avcodec_alloc_context3(NULL);
ret = avcodec_parameters_to_context(audio_in_ctx,
audio_in_stream->codecpar);
@ -384,7 +361,7 @@ Error("Codec not set");
0);
Debug(2, "Got something other than AAC (%s)", error_buffer);
if (!setup_resampler()) {
if ( !setup_resampler() ) {
return;
}
} else {
@ -396,7 +373,7 @@ Error("Codec not set");
#else
avformat_new_stream(oc, (AVCodec *)audio_in_ctx->codec);
#endif
if (!audio_out_stream) {
if ( !audio_out_stream ) {
Error("Unable to create audio out stream\n");
audio_out_stream = NULL;
} else {
@ -405,37 +382,33 @@ Error("Codec not set");
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
audio_out_ctx = avcodec_alloc_context3(audio_out_codec);
// Copy params from instream to ctx
ret = avcodec_parameters_to_context(audio_out_ctx,
audio_in_stream->codecpar);
if (ret < 0) {
if ( (ret = avcodec_parameters_to_context(audio_out_ctx, audio_in_stream->codecpar) ) < 0 ) {
Error("Unable to copy audio params to ctx %s\n",
av_make_error_string(ret).c_str());
}
ret = avcodec_parameters_from_context(audio_out_stream->codecpar,
audio_out_ctx);
if (ret < 0) {
// Then from ctx to out_stream
ret = avcodec_parameters_from_context(audio_out_stream->codecpar, audio_out_ctx);
if ( ret < 0 ) {
Error("Unable to copy audio params to stream %s\n",
av_make_error_string(ret).c_str());
}
if (!audio_out_ctx->codec_tag) {
if ( !audio_out_ctx->codec_tag ) {
audio_out_ctx->codec_tag = av_codec_get_tag(
oc->oformat->codec_tag, audio_in_ctx->codec_id);
Debug(2, "Setting audio codec tag to %d",
audio_out_ctx->codec_tag);
Debug(2, "Setting audio codec tag to %d", audio_out_ctx->codec_tag);
}
#else
audio_out_ctx = audio_out_stream->codec;
ret = avcodec_copy_context(audio_out_ctx, audio_in_ctx);
audio_out_ctx->codec_tag = 0;
#endif
if (ret < 0) {
if ( ret < 0 ) {
Error("Unable to copy audio ctx %s\n",
av_make_error_string(ret).c_str());
audio_out_stream = NULL;
} else {
if (audio_out_ctx->channels > 1) {
if ( audio_out_ctx->channels > 1 ) {
Warning("Audio isn't mono, changing it.");
audio_out_ctx->channels = 1;
} else {
@ -464,6 +437,7 @@ Error("Codec not set");
video_next_dts = 0;
audio_next_pts = 0;
audio_next_dts = 0;
Debug(2,"End VIdeoStore");
} // VideoStore::VideoStore
bool VideoStore::open() {
@ -477,15 +451,12 @@ bool VideoStore::open() {
}
}
// os->ctx_inited = 1;
// avio_flush(ctx->pb);
// av_dict_free(&opts);
zm_dump_stream_format(oc, 0, 0, 1);
if (audio_out_stream) zm_dump_stream_format(oc, 1, 0, 1);
AVDictionary *opts = NULL;
// av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov", 0);
av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov", 0);
av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov", 0);
// av_dict_set(&opts, "movflags",
// "frag_keyframe+empty_moov+default_base_moof", 0);
if ( (ret = avformat_write_header(oc, &opts)) < 0 ) {
@ -511,34 +482,38 @@ void VideoStore::write_audio_packet( AVPacket &pkt ) {
pkt.pts = audio_next_pts;
pkt.dts = audio_next_dts;
if (pkt.duration > 0)
Debug(2, "writing audio packet pts(%d) dts(%d) duration(%d)", pkt.pts, pkt.dts, pkt.duration);
if ( pkt.duration > 0 ) {
pkt.duration =
av_rescale_q(pkt.duration, audio_out_ctx->time_base,
audio_out_stream->time_base);
}
audio_next_pts += pkt.duration;
audio_next_dts += pkt.duration;
Debug(2, "writing audio packet pts(%d) dts(%d) duration(%d)", pkt.pts,
pkt.dts, pkt.duration);
Debug(2, "writing audio packet pts(%d) dts(%d) duration(%d)", pkt.pts, pkt.dts, pkt.duration);
pkt.stream_index = audio_out_stream->index;
av_interleaved_write_frame(oc, &pkt);
}
} // end void VideoStore::Write_audio_packet( AVPacket &pkt )
VideoStore::~VideoStore() {
if ( video_out_ctx->codec_id != video_in_ctx->codec_id ) {
Debug(2,"Different codecs between in and out");
if ( video_out_ctx->codec_id != video_in_ctx->codec_id || audio_out_codec ) {
Debug(2,"Different codecs between in and out");
// The codec queues data. We need to send a flush command and out
// whatever we get. Failures are not fatal.
AVPacket pkt;
// WIthout these we seg fault I don't know why.
pkt.data = NULL;
pkt.size = 0;
av_init_packet(&pkt);
// I got crashes if the codec didn't do DELAY, so let's test for it.
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
if ( video_out_ctx->codec && ( video_out_ctx->codec->capabilities & AV_CODEC_CAP_DELAY ) ) {
#else
if ( video_out_ctx->codec && ( video_out_ctx->codec->capabilities & CODEC_CAP_DELAY ) ) {
#endif
// The codec queues data. We need to send a flush command and out
// whatever we get. Failures are not fatal.
AVPacket pkt;
av_init_packet(&pkt);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// Put encoder into flushing mode
avcodec_send_frame(video_out_ctx, NULL);
@ -552,31 +527,30 @@ Debug(2,"Different codecs between in and out");
break;
}
#else
while (1) {
// WIthout these we seg fault I don't know why.
pkt.data = NULL;
pkt.size = 0;
av_init_packet(&pkt);
int got_packet = 0;
ret = avcodec_encode_video2(video_out_ctx, &pkt, NULL, &got_packet);
if ( ret < 0 ) {
Error("ERror encoding video while flushing (%d) (%s)", ret,
av_err2str(ret));
break;
}
if (!got_packet) {
break;
}
while (1) {
// WIthout these we seg fault I don't know why.
pkt.data = NULL;
pkt.size = 0;
av_init_packet(&pkt);
int got_packet = 0;
ret = avcodec_encode_video2(video_out_ctx, &pkt, NULL, &got_packet);
if ( ret < 0 ) {
Error("ERror encoding video while flushing (%d) (%s)", ret, av_err2str(ret));
break;
}
if (!got_packet) {
break;
}
#endif
int keyframe = pkt.flags & AV_PKT_FLAG_KEY;
Debug(3, "dts:%d, pts:%d, keyframe:%d", pkt.dts, pkt.pts, keyframe );
//pkt.dts = video_next_dts;
pkt.pts = pkt.dts;
//pkt.duration = video_last_duration;
write_video_packet(pkt);
zm_av_packet_unref(&pkt);
} // while have buffered frames
} // end if have delay capability
int keyframe = pkt.flags & AV_PKT_FLAG_KEY;
Debug(3, "dts:%I64d, pts:%I64d, keyframe:%d", pkt.dts, pkt.pts, keyframe );
//pkt.dts = video_next_dts;
pkt.pts = pkt.dts;
//pkt.duration = video_last_duration;
write_video_packet(pkt);
zm_av_packet_unref(&pkt);
} // while have buffered frames
} // end if have delay capability
} // end if have buffered video
if ( audio_out_codec ) {
@ -592,22 +566,20 @@ Debug(3, "dts:%d, pts:%d, keyframe:%d", pkt.dts, pkt.pts, keyframe );
// Put encoder into flushing mode
avcodec_send_frame(audio_out_ctx, NULL);
while (1) {
ret = avcodec_receive_packet(audio_out_ctx, &pkt);
if (ret < 0) {
if ( (ret = avcodec_receive_packet(audio_out_ctx, &pkt) ) < 0 ) {
if (AVERROR_EOF != ret) {
Error("ERror encoding audio while flushing (%d) (%s)", ret,
av_err2str(ret));
Error("ERror encoding audio while flushing (%d) (%s)", ret, av_err2str(ret));
}
break;
}
#else
while (1) {
pkt.data = NULL;
pkt.size = 0;
av_init_packet(&pkt);
int got_packet = 0;
ret =
avcodec_encode_audio2(audio_out_ctx, &pkt, NULL, &got_packet);
if (ret < 0) {
Error("ERror encoding audio while flushing (%d) (%s)", ret,
av_err2str(ret));
if ( (ret = avcodec_encode_audio2(audio_out_ctx, &pkt, NULL, &got_packet)) < 0 ) {
Error("ERror encoding audio while flushing (%d) (%s)", ret, av_err2str(ret));
break;
}
Debug(1, "Have audio encoder, need to flush it's out");
@ -617,14 +589,14 @@ Debug(3, "dts:%d, pts:%d, keyframe:%d", pkt.dts, pkt.pts, keyframe );
#endif
write_audio_packet(pkt);
zm_av_packet_unref(&pkt);
} // while have buffered frames
} // end if audio_out_codec
} // while have buffered frames
} // end if audio_out_codec
// Flush Queues
av_interleaved_write_frame(oc, NULL);
/* Write the trailer before close */
if (int rc = av_write_trailer(oc)) {
if ( int rc = av_write_trailer(oc) ) {
Error("Error writing trailer %s", av_err2str(rc));
} else {
Debug(3, "Sucess Writing trailer");
@ -635,18 +607,35 @@ Debug(3, "dts:%d, pts:%d, keyframe:%d", pkt.dts, pkt.pts, keyframe );
// allocation/de-allocation constantly, or whether we can just re-use it.
// Just do a file open/close/writeheader/etc.
// What if we were only doing audio recording?
// Used by both audio and video conversions
if ( in_frame ) {
av_frame_free(&in_frame);
in_frame = NULL;
}
if ( audio_in_ctx ) {
avcodec_free_context(&audio_in_ctx);
audio_in_ctx = NULL;
if (video_out_stream) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// We allocate and copy in newer ffmpeg, so need to free it
avcodec_free_context(&video_in_ctx);
#endif
video_in_ctx=NULL;
avcodec_close(video_out_ctx);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
avcodec_free_context(&video_out_ctx);
#endif
video_out_ctx = NULL;
Debug(4, "Success freeing video_out_ctx");
}
if ( audio_out_stream ) {
if ( audio_in_codec ) {
avcodec_close(audio_in_ctx);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// We allocate and copy in newer ffmpeg, so need to free it
avcodec_free_context(&audio_in_ctx);
#endif
audio_in_ctx = NULL;
audio_in_codec = NULL;
} // end if audio_in_codec
avcodec_close(audio_out_ctx);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
avcodec_free_context(&audio_out_ctx);
#endif
audio_out_ctx = NULL;
#ifdef HAVE_LIBAVRESAMPLE
if ( resample_ctx ) {
@ -700,8 +689,7 @@ bool VideoStore::setup_resampler() {
audio_in_ctx->codec_id
#endif
);
ret = avcodec_open2(audio_in_ctx, audio_in_codec, NULL);
if (ret < 0) {
if ( (ret = avcodec_open2(audio_in_ctx, audio_in_codec, NULL)) < 0 ) {
Error("Can't open in codec!");
return false;
}
@ -713,6 +701,10 @@ bool VideoStore::setup_resampler() {
}
Debug(2, "Have audio out codec");
// Now copy them to the out stream
audio_out_stream = avformat_new_stream(oc, audio_out_codec);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// audio_out_ctx = audio_out_stream->codec;
audio_out_ctx = avcodec_alloc_context3(audio_out_codec);
if ( !audio_out_ctx ) {
@ -720,6 +712,9 @@ bool VideoStore::setup_resampler() {
audio_out_stream = NULL;
return false;
}
#else
audio_out_ctx = audio_out_stream->codec;
#endif
/* put sample parameters */
audio_out_ctx->bit_rate = audio_in_ctx->bit_rate;
@ -731,7 +726,7 @@ bool VideoStore::setup_resampler() {
if ( audio_out_codec->supported_samplerates ) {
int found = 0;
for ( int i=0; audio_out_codec->supported_samplerates[i]; i++) {
for ( int i=0; audio_out_codec->supported_samplerates[i]; i++ ) {
if ( audio_out_ctx->sample_rate ==
audio_out_codec->supported_samplerates[i]) {
found = 1;
@ -757,8 +752,6 @@ bool VideoStore::setup_resampler() {
audio_out_ctx->time_base = (AVRational){1, audio_out_ctx->sample_rate};
// Now copy them to the out stream
audio_out_stream = avformat_new_stream(oc, audio_out_codec);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
if ( (ret = avcodec_parameters_from_context(audio_out_stream->codecpar,
@ -766,7 +759,11 @@ bool VideoStore::setup_resampler() {
Error("Could not initialize stream parameteres");
return false;
}
audio_out_stream->codecpar->frame_size = audio_out_ctx->frame_size;
#else
avcodec_copy_context( audio_out_stream->codec, audio_out_ctx );
#endif
audio_out_stream->time_base = (AVRational){1, audio_out_ctx->sample_rate};
AVDictionary *opts = NULL;
av_dict_set(&opts, "strict", "experimental", 0); // Needed to allow AAC
@ -787,6 +784,22 @@ bool VideoStore::setup_resampler() {
audio_out_ctx->channels, audio_out_ctx->sample_fmt,
audio_out_ctx->channel_layout, audio_out_ctx->frame_size);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
Debug(1,
"Audio out bit_rate (%d) sample_rate(%d) channels(%d) fmt(%d) "
"layout(%d) frame_size(%d)",
audio_out_stream->codecpar->bit_rate, audio_out_stream->codecpar->sample_rate,
audio_out_stream->codecpar->channels, audio_out_stream->codecpar->format,
audio_out_stream->codecpar->channel_layout, audio_out_stream->codecpar->frame_size);
#else
Debug(1,
"Audio out bit_rate (%d) sample_rate(%d) channels(%d) fmt(%d) "
"layout(%d) frame_size(%d)",
audio_out_stream->codec->bit_rate, audio_out_stream->codec->sample_rate,
audio_out_stream->codec->channels, audio_out_stream->codec->sample_fmt,
audio_out_stream->codec->channel_layout, audio_out_stream->codec->frame_size);
#endif
/** Create a new frame to store the audio samples. */
if ( ! in_frame ) {
if (!(in_frame = zm_av_frame_alloc())) {
@ -879,7 +892,7 @@ void VideoStore::dumpPacket(AVPacket *pkt) {
snprintf(b, sizeof(b),
" pts: %" PRId64 ", dts: %" PRId64
", data: %p, size: %d, sindex: %d, dflags: %04x, s-pos: %" PRId64
", c-duration: %" PRId64 "\n",
", duration: %" PRId64 "\n",
pkt->pts,
pkt->dts,
pkt->data,
@ -1045,7 +1058,7 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
}
opkt.duration = 0;
Debug(3, "dts:%d, pts:%d, keyframe:%d", opkt.dts, opkt.pts, opkt.flags & AV_PKT_FLAG_KEY );
Debug(3, "dts:%" PRId64 ", pts:%" PRId64 ", keyframe:%d", opkt.dts, opkt.pts, opkt.flags & AV_PKT_FLAG_KEY );
write_video_packet( opkt );
zm_av_packet_unref(&opkt);
@ -1056,7 +1069,7 @@ void VideoStore::write_video_packet( AVPacket &opkt ) {
if ( opkt.dts > opkt.pts ) {
Debug(1,
"opkt.dts(%d) must be <= opkt.pts(%d). Decompression must happen "
"opkt.dts(%" PRId64 ") must be <= opkt.pts(%" PRId64 "). Decompression must happen "
"before presentation.",
opkt.dts, opkt.pts);
opkt.dts = opkt.pts;
@ -1073,7 +1086,7 @@ void VideoStore::write_video_packet( AVPacket &opkt ) {
//av_packet_rescale_ts( &opkt, video_out_ctx->time_base, video_out_stream->time_base );
Debug(1,
"writing video packet pts(%d) dts(%d) duration(%d) packet_count(%d)",
"writing video packet pts(%" PRId64 ") dts(%" PRId64 ") duration(%" PRId64 ") packet_count(%d)",
opkt.pts, opkt.dts, opkt.duration, packets_written );
if ( (opkt.data == NULL) || (opkt.size < 1) ) {
Warning("%s:%d: Mangled AVPacket: discarding frame", __FILE__, __LINE__);
@ -1296,7 +1309,7 @@ int VideoStore::writeAudioFramePacket(ZMPacket *zm_packet) {
// audio_last_dts = ipkt->dts;
if ( opkt.dts > opkt.pts ) {
Debug(1,
"opkt.dts(%d) must be <= opkt.pts(%d). Decompression must happen "
"opkt.dts(%" PRId64 ") must be <= opkt.pts(%" PRId64 "). Decompression must happen "
"before presentation.",
opkt.dts, opkt.pts);
opkt.dts = opkt.pts;
@ -1305,8 +1318,8 @@ int VideoStore::writeAudioFramePacket(ZMPacket *zm_packet) {
//opkt.duration = out_frame ? out_frame->nb_samples : ipkt->duration;
// opkt.duration = av_rescale_q(ipkt->duration, audio_in_stream->time_base,
// audio_out_stream->time_base);
Debug(2, "opkt.pts (%d), opkt.dts(%d) opkt.duration = (%d)", opkt.pts,
opkt.dts, opkt.duration);
Debug(2, "opkt.pts (%" PRId64 "), opkt.dts(%" PRId64 ") opkt.duration = (%" PRId64 ")",
opkt.pts, opkt.dts, opkt.duration);
// pkt.pos: byte position in stream, -1 if unknown
opkt.pos = -1;
@ -1339,7 +1352,8 @@ int VideoStore::write_packets( zm_packetqueue &queue ) {
packet_count += 1;
//Write the packet to our video store
Debug(2, "Writing queued packet stream: %d KEY %d, remaining (%d)", avp->stream_index, avp->flags & AV_PKT_FLAG_KEY, queue.size() );
Debug(2, "Writing queued packet stream: %d KEY %d, remaining (%d)",
avp->stream_index, avp->flags & AV_PKT_FLAG_KEY, queue.size() );
int ret = this->writePacket( queued_packet );
if ( ret < 0 ) {
//Less than zero and we skipped a frame

View File

@ -18,8 +18,6 @@ class VideoStore;
class VideoStore {
private:
unsigned int packets_written;
unsigned int frame_count;
AVOutputFormat *out_format;
AVFormatContext *oc;
@ -37,14 +35,18 @@ int audio_in_stream_index;
// Move this into the object so that we aren't constantly allocating/deallocating it on the stack
AVPacket opkt;
// we are transcoding
AVFrame *video_in_frame;
AVFrame *in_frame;
AVFrame *out_frame;
AVCodecContext *video_in_ctx;
AVCodec *audio_in_codec;
AVCodecContext *audio_in_ctx;
int ret;
SWScale swscale;
unsigned int packets_written;
unsigned int frame_count;
// The following are used when encoding the audio stream to AAC
AVCodec *audio_out_codec;

View File

@ -1,183 +0,0 @@
//
// ZoneMinder Analysis Daemon, $Date$, $Revision$
// Copyright (C) 2001-2008 Philip Coombes
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
//
/*
=head1 NAME
zma - The ZoneMinder Analysis daemon
=head1 SYNOPSIS
zma -m <monitor_id>
zma --monitor <monitor_id>
zma -h
zma --help
zma -v
zma --version
=head1 DESCRIPTION
This is the component that goes through the captured frames and checks them
for motion which might generate an alarm or event. It generally keeps up with
the Capture daemon but if very busy may skip some frames to prevent it falling
behind.
=head1 OPTIONS
-m, --monitor_id - ID of the monitor to analyse
-h, --help - Display usage information
-v, --version - Print the installed version of ZoneMinder
=cut
*/
#include <getopt.h>
#include <signal.h>
#include "zm.h"
#include "zm_db.h"
#include "zm_signal.h"
#include "zm_monitor.h"
void Usage() {
fprintf( stderr, "zma -m <monitor_id>\n" );
fprintf( stderr, "Options:\n" );
fprintf( stderr, " -m, --monitor <monitor_id> : Specify which monitor to use\n" );
fprintf( stderr, " -h, --help : This screen\n" );
fprintf( stderr, " -v, --version : Report the installed version of ZoneMinder\n" );
exit( 0 );
}
int main( int argc, char *argv[] ) {
self = argv[0];
srand( getpid() * time( 0 ) );
int id = -1;
static struct option long_options[] = {
{"monitor", 1, 0, 'm'},
{"help", 0, 0, 'h'},
{"version", 0, 0, 'v'},
{0, 0, 0, 0}
};
while (1) {
int option_index = 0;
int c = getopt_long (argc, argv, "m:h:v", long_options, &option_index);
if ( c == -1 ) {
break;
}
switch (c) {
case 'm':
id = atoi(optarg);
break;
case 'h':
case '?':
Usage();
break;
case 'v':
std::cout << ZM_VERSION << "\n";
exit(0);
default:
//fprintf( stderr, "?? getopt returned character code 0%o ??\n", c );
break;
}
}
if (optind < argc) {
fprintf( stderr, "Extraneous options, " );
while (optind < argc)
printf ("%s ", argv[optind++]);
printf ("\n");
Usage();
}
if ( id < 0 ) {
fprintf( stderr, "Bogus monitor %d\n", id );
Usage();
exit( 0 );
}
char log_id_string[16];
snprintf( log_id_string, sizeof(log_id_string), "zma_m%d", id );
zmLoadConfig();
logInit( log_id_string );
hwcaps_detect();
Monitor *monitor = Monitor::Load( id, true, Monitor::ANALYSIS );
if ( monitor ) {
Info( "In mode %d/%d, warming up", monitor->GetFunction(), monitor->Enabled() );
zmSetDefaultHupHandler();
zmSetDefaultTermHandler();
zmSetDefaultDieHandler();
sigset_t block_set;
sigemptyset( &block_set );
useconds_t analysis_rate = monitor->GetAnalysisRate();
unsigned int analysis_update_delay = monitor->GetAnalysisUpdateDelay();
time_t last_analysis_update_time, cur_time;
monitor->UpdateAdaptiveSkip();
last_analysis_update_time = time( 0 );
while( !zm_terminate ) {
// Process the next image
sigprocmask( SIG_BLOCK, &block_set, 0 );
// Some periodic updates are required for variable capturing framerate
if ( analysis_update_delay ) {
cur_time = time( 0 );
if ( (unsigned int)( cur_time - last_analysis_update_time ) > analysis_update_delay ) {
analysis_rate = monitor->GetAnalysisRate();
monitor->UpdateAdaptiveSkip();
last_analysis_update_time = cur_time;
}
}
if ( !monitor->Analyse() ) {
usleep( monitor->Active()?ZM_SAMPLE_RATE:ZM_SUSPENDED_RATE );
} else if ( analysis_rate ) {
usleep( analysis_rate );
}
if ( zm_reload ) {
monitor->Reload();
zm_reload = false;
}
sigprocmask( SIG_UNBLOCK, &block_set, 0 );
}
delete monitor;
} else {
fprintf( stderr, "Can't find monitor with id of %d\n", id );
}
Image::Deinitialise();
logTerm();
zmDbClose();
return( 0 );
}

View File

@ -317,9 +317,9 @@ int main(int argc, char *argv[]) {
if ( next_delays[i] > 0 ) {
gettimeofday(&now, NULL);
DELTA_TIMEVAL(delta_time, now, last_capture_times[i], DT_PREC_3);
long sleep_time = next_delays[i]-delta_time.delta;
long sleep_time = next_delays[i] - delta_time.delta;
if ( sleep_time > 0 ) {
//Debug(2,"usleeping (%d)", sleep_time*(DT_MAXGRAN/DT_PREC_3) );
Debug(2,"usleeping (%d)", sleep_time*(DT_MAXGRAN/DT_PREC_3) );
usleep(sleep_time*(DT_MAXGRAN/DT_PREC_3));
}
last_capture_times[i] = now;

View File

@ -514,10 +514,10 @@ int main( int argc, char *argv[] ) {
}
if ( function & ZMU_EVENT ) {
if ( verbose )
printf( "Last event id: %d\n", monitor->GetLastEvent() );
printf( "Last event id: %d\n", monitor->GetLastEventId() );
else {
if ( have_output ) printf( "%c", separator );
printf( "%d", monitor->GetLastEvent() );
printf( "%d", monitor->GetLastEventId() );
have_output = true;
}
}
@ -712,7 +712,7 @@ int main( int argc, char *argv[] ) {
tv.tv_sec, tv.tv_usec/10000,
monitor->GetLastReadIndex(),
monitor->GetLastWriteIndex(),
monitor->GetLastEvent(),
monitor->GetLastEventId(),
monitor->GetFPS()
);
delete monitor;

View File

@ -116,7 +116,9 @@ switch ( $data['type'] ) {
case MSG_DATA_WATCH :
{
$data = unpack( "ltype/imonitor/istate/dfps/ilevel/irate/ddelay/izoom/Cdelayed/Cpaused/Cenabled/Cforced", $msg );
Logger::Debug("FPS: " . $data['fps'] );
$data['fps'] = round( $data['fps'], 2 );
Logger::Debug("FPS: " . $data['fps'] );
$data['rate'] /= RATE_BASE;
$data['delay'] = round( $data['delay'], 2 );
$data['zoom'] = round( $data['zoom']/SCALE_BASE, 1 );

View File

@ -189,21 +189,21 @@ function csrf_check($fatal = true) {
$tokens = '';
do {
if (!isset($_POST[$name])) {
Logger::Debug("POST[$name] is not set");
#Logger::Debug("POST[$name] is not set");
break;
} else {
Logger::Debug("POST[$name] is set as " . $_POST[$name] );
#} else {
#Logger::Debug("POST[$name] is set as " . $_POST[$name] );
}
// we don't regenerate a token and check it because some token creation
// schemes are volatile.
$tokens = $_POST[$name];
if (!csrf_check_tokens($tokens)) {
Logger::Debug("Failed checking tokens");
#Logger::Debug("Failed checking tokens");
break;
} else {
Logger::Debug("Token passed");
#} else {
#Logger::Debug("Token passed");
}
$ok = true;
} while (false);
@ -308,27 +308,27 @@ function csrf_check_tokens($tokens) {
* Checks if a token is valid.
*/
function csrf_check_token($token) {
Logger::Debug("Checking CSRF token $token");
#Logger::Debug("Checking CSRF token $token");
if (strpos($token, ':') === false) {
Logger::Debug("Checking CSRF token $token bad because no :");
#Logger::Debug("Checking CSRF token $token bad because no :");
return false;
}
list($type, $value) = explode(':', $token, 2);
if (strpos($value, ',') === false) {
Logger::Debug("Checking CSRF token $token bad because no ,");
#Logger::Debug("Checking CSRF token $token bad because no ,");
return false;
}
list($x, $time) = explode(',', $token, 2);
if ($GLOBALS['csrf']['expires']) {
if (time() > $time + $GLOBALS['csrf']['expires']) {
Logger::Debug("Checking CSRF token $token bad because expired");
#Logger::Debug("Checking CSRF token $token bad because expired");
return false;
}
}
switch ($type) {
case 'sid':
{
Logger::Debug("Checking sid: $value === " . csrf_hash(session_id(), $time) );
#Logger::Debug("Checking sid: $value === " . csrf_hash(session_id(), $time) );
return $value === csrf_hash(session_id(), $time);
}
case 'cookie':
@ -341,7 +341,7 @@ return false;
Logger::Debug("Checking key: no key set" );
return false;
}
Logger::Debug("Checking sid: $value === " . csrf_hash($GLOBALS['csrf']['key'], $time) );
#Logger::Debug("Checking sid: $value === " . csrf_hash($GLOBALS['csrf']['key'], $time) );
return $value === csrf_hash($GLOBALS['csrf']['key'], $time);
// We could disable these 'weaker' checks if 'key' was set, but
// that doesn't make me feel good then about the cookie-based

View File

@ -195,7 +195,7 @@ isset($action) || $action = NULL;
if ( ZM_ENABLE_CSRF_MAGIC && $action != 'login' && $view != 'view_video' && $view != 'video' && $request != 'control' && $view != 'frames') {
require_once( 'includes/csrf/csrf-magic.php' );
Logger::Debug("Calling csrf_check with the following values: \$request = \"$request\", \$view = \"$view\", \$action = \"$action\"");
#Logger::Debug("Calling csrf_check with the following values: \$request = \"$request\", \$view = \"$view\", \$action = \"$action\"");
csrf_check();
}

View File

@ -50,7 +50,10 @@ function logReport( level, message, file, line )
if ( !debugReq )
{
debugParms = "view=request&request=log&task=create&browser[name]="+Browser.name+"&browser[version]="+Browser.version+"&browser[platform]="+Browser.Platform.name;
if ( Browser )
debugParms = "view=request&request=log&task=create&browser[name]="+Browser.name+"&browser[version]="+Browser.version+"&browser[platform]="+(Browser.Platform?Browser.Platform.name:'unknown');
else
debugParms = "view=request&request=log&task=create&browser[name]=unknown&browser[version]=unknown&browser[platform]=unknown";
debugReq = new Request.JSON( { url: thisUrl, method: 'post', timeout: AJAX_TIMEOUT, link: 'chain' } );
}
var requestParms = debugParms;

View File

@ -4,7 +4,7 @@
display: inline-flex;
border: 1px solid black;
width: 25%;
padding: 9px;
padding: 4px;
}
#ScaleDiv label,
#SpeedDiv label {

View File

@ -114,7 +114,6 @@ for ( $i = 0; $i < count($displayMonitors); $i++ ) {
if ( $maxHeight < $scaleHeight ) $maxHeight = $scaleHeight;
}
$monitor['zmc'] = zmcStatus( $monitor );
#$monitor['zma'] = zmaStatus( $monitor );
$zoneCount += $monitor['ZoneCount'];
$counts = array();
@ -189,9 +188,9 @@ for( $monitor_i = 0; $monitor_i < count($displayMonitors); $monitor_i += 1 ) {
$dclass = 'errorText';
} else {
// https://github.com/ZoneMinder/ZoneMinder/issues/1082
if ( !$monitor['zma'] && $monitor['Function']!='Monitor' )
$dclass = 'warnText';
else
//if ( a'] && $monitor['Function']!='Monitor' )
//$dclass = 'warnText';
//else
$dclass = 'infoText';
}
if ( $monitor['Function'] == 'None' )

View File

@ -42,7 +42,7 @@ function evaluateLoadTimes() {
imageLoadTimesEvaluated=0;
setSpeed(speedIndex);
$('fps').innerHTML="Display refresh rate is " + (1000 / currentDisplayInterval).toFixed(1) + " per second, avgFrac=" + avgFrac.toFixed(3) + ".";
}
} // end evaluateLoadTimes()
// time is seconds since epoch
function SetImageSource( monId, time ) {
@ -465,12 +465,12 @@ function setSpeed( speed_index ) {
playSecsperInterval = Math.floor( 1000 * currentSpeed * currentDisplayInterval ) / 1000000;
console.log("playSecsPerInterval: " + playSecsperInterval + " = currentspeed:" + currentSpeed + " * " + currentDisplayInterval + " /1000");
showSpeed(speed_index);
if ( timerInterval != currentDisplayInterval || currentSpeed == 0 ) timerFire(); // if the timer isn't firing we need to trigger it to update
if ( timerInterval != currentDisplayInterval || currentSpeed == 0 ) timerFire(); // if the timer isn't firing we need to trigger it to update
}
function setLive(value) {
liveMode = value;
redrawScreen();
changeDateTime();
}
@ -510,16 +510,12 @@ function clicknav(minSecs,maxSecs,live) {// we use the current time if we can
if ( live == 1 )
liveStr="&live=1";
var fitStr="&fit=0";
if ( fitMode == 1 )
fitStr="&fit=1";
var zoomStr="";
for ( var i=0; i < numMonitors; i++ )
if ( monitorZoomScale[monitorPtr[i]] < 0.99 || monitorZoomScale[monitorPtr[i]] > 1.01 ) // allow for some up/down changes and just treat as 1 of almost 1
zoomStr += "&z" + monitorPtr[i].toString() + "=" + monitorZoomScale[monitorPtr[i]].toFixed(2);
var uri = "?view=" + currentView + fitStr + groupStr + minStr + maxStr + currentStr + intervalStr + liveStr + zoomStr + "&scale=" + $j("#scaleslider")[0].value + "&speed=" + speeds[$j("#speedslider")[0].value];
var uri = "?view=" + currentView + '&fit='+(fitMode==1?'1':'0') + groupStr + minStr + maxStr + currentStr + intervalStr + liveStr + zoomStr + "&scale=" + $j("#scaleslider")[0].value + "&speed=" + speeds[$j("#speedslider")[0].value];
window.location = uri;
} // end function clicknav

View File

@ -31,14 +31,18 @@ var groupStr=<?php echo $group_id ? "'&group=$group_id'" : '""'; ?>;
// Because we might not have time as the criteria, figure out the min/max time when we run the query
$minTimeSecs = strtotime('2036-01-01 01:01:01');
$maxTimeSecs = strtotime('1950-01-01 01:01:01');
if ( ! $maxTimeSecs )
$maxTimeSecs = time();
if ( ! $minTimeSecs )
$minTimeSecs = strtotime('2010-01-01 01:01:01');
// This builds the list of events that are eligible from this range
$index = 0;
$anyAlarms = false;
if ( ! $initialModeIsLive ) {
$result = dbQuery( $eventsSql );
if ( ! $result ) {
Fatal('SQL-ERR');
@ -81,6 +85,7 @@ if ( !isset($minTime) || !isset($maxTime) ) {
$minTime = strftime($minTimeSecs);
} else {
$minTimeSecs = strtotime($minTime);
$maxTimeSecs = strtotime($maxTime);
}
@ -136,6 +141,7 @@ if ( $mId > 0 ) {
}
echo "var maxScore=$maxScore;\n"; // used to skip frame load if we find no alarms.
} // end if initialmodeislive
echo "var monitorName = [];\n";
echo "var monitorLoading = [];\n";
echo "var monitorImageObject = [];\n";

View File

@ -106,6 +106,7 @@ if ( !isset($_REQUEST['minTime']) && !isset($_REQUEST['maxTime']) ) {
$time = time();
$maxTime = strftime("%FT%T",$time);
$minTime = strftime("%FT%T",$time - 3600);
Logger::Debug("Defaulting to $minTime to $maxTime");
}
if ( isset($_REQUEST['minTime']) )
$minTime = validHtmlStr($_REQUEST['minTime']);
@ -113,7 +114,7 @@ if ( isset($_REQUEST['minTime']) )
if ( isset($_REQUEST['maxTime']) )
$maxTime = validHtmlStr($_REQUEST['maxTime']);
// AS a special case a "all" is passed in as an exterme interval - if so , clear them here and let the database query find them
// AS a special case a "all" is passed in as an extreme interval - if so, clear them here and let the database query find them
if ( (strtotime($maxTime) - strtotime($minTime))/(365*24*3600) > 30 ) {
// test years
@ -161,6 +162,7 @@ $eventsSql .= ' GROUP BY E.Id,E.Name,E.StartTime,E.Length,E.Frames,E.MaxScore,E.
if ( isset($minTime) && isset($maxTime) ) {
$minTimeSecs = strtotime($minTime);
$maxTimeSecs = strtotime($maxTime);
Logger::Debug("Min/max time secs: $minTimeSecs $maxTimeSecs");
$eventsSql .= " HAVING CalcEndTimeSecs > '" . $minTimeSecs . "' AND StartTimeSecs < '" . $maxTimeSecs . "'";
$frameSql .= " AND TimeStamp > '" . $minTime . "' AND TimeStamp < '" . $maxTime . "'";
}
@ -190,31 +192,31 @@ xhtmlHeaders(__FILE__, translate('MontageReview') );
<input type="datetime-local" name="maxTime" id="maxTime" value="<?php echo preg_replace('/ /', 'T', $maxTime ) ?>" onchange="changeDateTime(this);">
</div>
<div id="ScaleDiv">
<label for="scaleslider"><?php echo translate('Scale')?></label>
<input id="scaleslider" type="range" min="0.1" max="1.0" value="<?php echo $defaultScale ?>" step="0.10" onchange="setScale(this.value);" oninput="showScale(this.value);"/>
<span id="scaleslideroutput"><?php echo number_format((float)$defaultScale,2,'.','')?> x</span>
<label for="scaleslider"><?php echo translate('Scale')?></label>
<input id="scaleslider" type="range" min="0.1" max="1.0" value="<?php echo $defaultScale ?>" step="0.10" onchange="setScale(this.value);" oninput="showScale(this.value);"/>
<span id="scaleslideroutput"><?php echo number_format((float)$defaultScale,2,'.','')?> x</span>
</div>
<div id="SpeedDiv">
<label for="speedslider"><?php echo translate('Speed') ?></label>
<input id="speedslider" type="range" min="0" max="<?php echo count($speeds)-1?>" value="<?php echo $speedIndex ?>" step="1" onchange="setSpeed(this.value);" oninput="showSpeed(this.value);"/>
<span id="speedslideroutput"><?php echo $speeds[$speedIndex] ?> fps</span>
<label for="speedslider"><?php echo translate('Speed') ?></label>
<input id="speedslider" type="range" min="0" max="<?php echo count($speeds)-1?>" value="<?php echo $speedIndex ?>" step="1" onchange="setSpeed(this.value);" oninput="showSpeed(this.value);"/>
<span id="speedslideroutput"><?php echo $speeds[$speedIndex] ?> fps</span>
</div>
<div style="display: inline-flex; border: 1px solid black; flex-flow: row wrap;">
<button type="button" id="panleft" onclick="click_panleft();" >&lt; <?php echo translate('Pan') ?></button>
<button type="button" id="zoomin" onclick="click_zoomin();" ><?php echo translate('In +') ?></button>
<button type="button" id="zoomout" onclick="click_zoomout();" ><?php echo translate('Out -') ?></button>
<button type="button" id="lasteight" onclick="click_lastEight();" ><?php echo translate('8 Hour') ?></button>
<button type="button" id="lasthour" onclick="click_lastHour();" ><?php echo translate('1 Hour') ?></button>
<button type="button" id="allof" onclick="click_all_events();" ><?php echo translate('All Events') ?></button>
<button type="button" id="live" onclick="setLive(1-liveMode);"><?php echo translate('Live') ?></button>
<button type="button" id="fit" onclick="setFit(1-fitMode);" ><?php echo translate('Fit') ?></button>
<button type="button" id="panright" onclick="click_panright();" ><?php echo translate('Pan') ?> &gt;</button>
<button type="button" id="panleft" onclick="click_panleft();" >&lt; <?php echo translate('Pan') ?></button>
<button type="button" id="zoomin" onclick="click_zoomin();" ><?php echo translate('In +') ?></button>
<button type="button" id="zoomout" onclick="click_zoomout();" ><?php echo translate('Out -') ?></button>
<button type="button" id="lasteight" onclick="click_lastEight();" ><?php echo translate('8 Hour') ?></button>
<button type="button" id="lasthour" onclick="click_lastHour();" ><?php echo translate('1 Hour') ?></button>
<button type="button" id="allof" onclick="click_all_events();" ><?php echo translate('All Events') ?></button>
<button type="button" id="live" onclick="setLive(1-liveMode);"><?php echo translate('Live') ?></button>
<button type="button" id="fit" onclick="setFit(1-fitMode);" ><?php echo translate('Fit') ?></button>
<button type="button" id="panright" onclick="click_panright();" ><?php echo translate('Pan') ?> &gt;</button>
</div>
<div id="timelinediv">
<canvas id="timeline" onmousemove="mmove(event);" ontouchmove="tmove(event);" onmousedown="mdown(event);" onmouseup="mup(event);" onmouseout="mout(event);"></canvas>
<span id="scrubleft"></span>
<span id="scrubright"></span>
<span id="scruboutput"></span>
<canvas id="timeline" onmousemove="mmove(event);" ontouchmove="tmove(event);" onmousedown="mdown(event);" onmouseup="mup(event);" onmouseout="mout(event);"></canvas>
<span id="scrubleft"></span>
<span id="scrubright"></span>
<span id="scruboutput"></span>
</div>
</div>
</div>