seems to work
This commit is contained in:
commit
0865201e1e
|
@ -41,7 +41,7 @@ int AnalysisThread::run() {
|
|||
|
||||
if ( !monitor->Analyse() ) {
|
||||
Debug(2, "Sleeping for %d", monitor->Active()?ZM_SAMPLE_RATE:ZM_SUSPENDED_RATE);
|
||||
usleep(100*(monitor->Active()?ZM_SAMPLE_RATE:ZM_SUSPENDED_RATE));
|
||||
usleep(10*(monitor->Active()?ZM_SAMPLE_RATE:ZM_SUSPENDED_RATE));
|
||||
} else if ( analysis_rate ) {
|
||||
Debug(2, "Sleeping for %d", analysis_rate);
|
||||
usleep(analysis_rate);
|
||||
|
|
|
@ -429,7 +429,7 @@ void Event::AddFramesInternal( int n_frames, int start_frame, Image **images, st
|
|||
|
||||
void Event::AddPacket( ZMPacket *packet, int score, Image *alarm_image ) {
|
||||
|
||||
have_video_keyframe = have_video_keyframe || ( packet->codec_type == AVMEDIA_TYPE_VIDEO && ( packet->packet.flags & AV_PKT_FLAG_KEY ) );
|
||||
have_video_keyframe = have_video_keyframe || ( ( packet->codec_type == AVMEDIA_TYPE_VIDEO ) && packet->keyframe );
|
||||
if ( videoStore ) {
|
||||
if ( have_video_keyframe ) {
|
||||
videoStore->writePacket( packet );
|
||||
|
|
|
@ -1944,7 +1944,6 @@ int LocalCamera::Capture( ZMPacket &zm_packet ) {
|
|||
//vid_buf.memory = V4L2_MEMORY_MMAP;
|
||||
vid_buf.memory = v4l2_data.reqbufs.memory;
|
||||
|
||||
Debug( 3, "Capturing %d frames", captures_per_frame );
|
||||
while ( captures_per_frame ) {
|
||||
Debug( 3, "Capturing %d frames", captures_per_frame );
|
||||
if ( vidioctl( vid_fd, VIDIOC_DQBUF, &vid_buf ) < 0 ) {
|
||||
|
@ -1955,7 +1954,7 @@ int LocalCamera::Capture( ZMPacket &zm_packet ) {
|
|||
}
|
||||
return -1;
|
||||
}
|
||||
Debug( 3, "Capturing %d frames", captures_per_frame );
|
||||
Debug(3, "Captured a frame");
|
||||
|
||||
v4l2_data.bufptr = &vid_buf;
|
||||
capture_frame = v4l2_data.bufptr->index;
|
||||
|
|
|
@ -159,7 +159,7 @@ public:
|
|||
int Capture(ZMPacket &p);
|
||||
int PostCapture();
|
||||
static bool GetCurrentSettings( const char *device, char *output, int version, bool verbose );
|
||||
AVStream* get_VideoStream();
|
||||
AVStream* get_VideoStream();
|
||||
};
|
||||
|
||||
#endif // ZM_HAS_V4L
|
||||
|
|
|
@ -542,7 +542,7 @@ bool Monitor::connect() {
|
|||
shared_timestamps = (struct timeval *)((char *)video_store_data + sizeof(VideoStoreData));
|
||||
shared_images = (unsigned char *)((char *)shared_timestamps + (image_buffer_count*sizeof(struct timeval)));
|
||||
|
||||
analysis_it = packetqueue.pktQueue.begin();
|
||||
packetqueue = NULL;
|
||||
|
||||
if ( ((unsigned long)shared_images % 64) != 0 ) {
|
||||
/* Align images buffer to nearest 64 byte boundary */
|
||||
|
@ -574,7 +574,8 @@ Monitor::~Monitor() {
|
|||
delete videoStore;
|
||||
videoStore = NULL;
|
||||
}
|
||||
packetqueue.clearQueue();
|
||||
delete packetqueue;
|
||||
packetqueue = NULL;
|
||||
|
||||
if ( timestamps ) {
|
||||
delete[] timestamps;
|
||||
|
@ -730,11 +731,6 @@ ZMPacket *Monitor::getSnapshot( int index ) {
|
|||
}
|
||||
return &image_buffer[index];
|
||||
|
||||
for ( std::list<ZMPacket *>::iterator it = packetqueue.pktQueue.begin(); it != packetqueue.pktQueue.end(); ++it ) {
|
||||
ZMPacket *zm_packet = *it;
|
||||
if ( zm_packet->image_index == index )
|
||||
return zm_packet;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1243,28 +1239,13 @@ bool Monitor::Analyse() {
|
|||
|
||||
// if have event, sent frames until we find a video packet, at which point do analysis. Adaptive skip should only affect which frames we do analysis on.
|
||||
|
||||
// Keeps minimum video frames. This should be ok, because we SHOULD be staying close to the head of the queue in analysis
|
||||
if ( ! event ) {
|
||||
if ( packetqueue.video_packet_count > pre_event_count ) {
|
||||
mutex.lock();
|
||||
packetqueue.clearQueue( pre_event_count, video_stream_id );
|
||||
mutex.unlock();
|
||||
}
|
||||
}
|
||||
// If do have an event, then analysis_it should point to the head of the queue, because we would have emptied it on event creation.
|
||||
unsigned int index = ( shared_data->last_read_index + 1 ) % image_buffer_count;
|
||||
|
||||
if ( ! packetqueue.size() ) {
|
||||
Debug(2, "PacketQueue is empty" );
|
||||
return false;
|
||||
}
|
||||
|
||||
struct timeval now;
|
||||
gettimeofday(&now, NULL);
|
||||
int packets_processed = 0;
|
||||
|
||||
ZMPacket *snap;
|
||||
while ( ( snap = packetqueue.get_analysis_packet() ) && ( snap->score == -1 ) ) {
|
||||
while ( ( snap = packetqueue->get_analysis_packet() ) && ( snap->score == -1 ) ) {
|
||||
Debug(2, "Analysis index (%d), last_Write(%d)", index, shared_data->last_write_index);
|
||||
packets_processed += 1;
|
||||
|
||||
|
@ -1273,9 +1254,9 @@ bool Monitor::Analyse() {
|
|||
Debug(2, "Analysing image (%d)", snap->image_index );
|
||||
if ( snap->image_index == -1 ) {
|
||||
Debug(2, "skipping because audio");
|
||||
if ( ! packetqueue.increment_analysis_it() ) {
|
||||
if ( ! packetqueue->increment_analysis_it() ) {
|
||||
Debug(2, "No more packets to analyse");
|
||||
break;
|
||||
return false;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -1478,7 +1459,7 @@ bool Monitor::Analyse() {
|
|||
}
|
||||
} // end foreach zone
|
||||
if ( got_anal_image ) {
|
||||
(*analysis_it)->analysis_image = anal_image;
|
||||
snap->analysis_image = anal_image;
|
||||
} else {
|
||||
delete anal_image;
|
||||
}
|
||||
|
@ -1522,7 +1503,7 @@ bool Monitor::Analyse() {
|
|||
if ( event ) {
|
||||
ZMPacket *queued_packet;
|
||||
//popPacket will increment analysis_it if neccessary, so this will write out all packets in queue
|
||||
while ( ( queued_packet = packetqueue.popPacket() ) ) {
|
||||
while ( ( queued_packet = packetqueue->popPacket() ) ) {
|
||||
Debug(2,"adding packet (%x) (%d)", queued_packet, queued_packet->image_index );
|
||||
event->AddPacket( queued_packet );
|
||||
if ( queued_packet->image_index == -1 ) {
|
||||
|
@ -1531,10 +1512,12 @@ bool Monitor::Analyse() {
|
|||
}
|
||||
} // end while write out queued_packets
|
||||
} else {
|
||||
packetqueue.increment_analysis_it();
|
||||
packetqueue->increment_analysis_it();
|
||||
}
|
||||
|
||||
shared_data->last_read_index = snap->image_index;
|
||||
struct timeval now;
|
||||
gettimeofday(&now, NULL);
|
||||
shared_data->last_read_time = now.tv_sec;
|
||||
analysis_image_count++;
|
||||
} // end while not at end of packetqueue
|
||||
|
@ -2751,7 +2734,6 @@ Monitor *Monitor::Load( unsigned int p_id, bool load_zones, Purpose purpose ) {
|
|||
* Returns -1 on failure.
|
||||
*/
|
||||
int Monitor::Capture() {
|
||||
mutex.lock();
|
||||
static int FirstCapture = 1; // Used in de-interlacing to indicate whether this is the even or odd image
|
||||
|
||||
unsigned int index = image_count % image_buffer_count;
|
||||
|
@ -2785,7 +2767,6 @@ int Monitor::Capture() {
|
|||
|
||||
if ( FirstCapture ) {
|
||||
FirstCapture = 0;
|
||||
mutex.unlock();
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
|
@ -2800,7 +2781,6 @@ int Monitor::Capture() {
|
|||
signalcolor = rgb_convert(signal_check_colour, ZM_SUBPIX_ORDER_BGR);
|
||||
capture_image->Fill(signalcolor);
|
||||
shared_data->signal = false;
|
||||
mutex.unlock();
|
||||
return -1;
|
||||
} else if ( captureResult > 0 ) {
|
||||
|
||||
|
@ -2808,14 +2788,15 @@ int Monitor::Capture() {
|
|||
|
||||
if ( packet->packet.stream_index != video_stream_id ) {
|
||||
Debug(2, "Have audio packet (%d) != videostream_id:(%d) q.vpktcount(%d) event?(%d) ",
|
||||
packet->packet.stream_index, video_stream_id, packetqueue.video_packet_count, ( event ? 1 : 0 ) );
|
||||
packet->packet.stream_index, video_stream_id, packetqueue->video_packet_count, ( event ? 1 : 0 ) );
|
||||
// Only queue if we have some video packets in there.
|
||||
if ( packetqueue.video_packet_count || event ) {
|
||||
mutex.lock();
|
||||
if ( packetqueue->video_packet_count || event ) {
|
||||
// Need to copy it into another ZMPacket.
|
||||
ZMPacket *audio_packet = new ZMPacket( *packet );
|
||||
audio_packet->codec_type = camera->get_AudioStream()->codecpar->codec_type;
|
||||
Debug(2, "Queueing packet");
|
||||
packetqueue.queuePacket( audio_packet );
|
||||
packetqueue->queuePacket( audio_packet );
|
||||
}
|
||||
// Don't update last_write_index because that is used for live streaming
|
||||
//shared_data->last_write_time = image_buffer[index].timestamp->tv_sec;
|
||||
|
@ -2833,14 +2814,18 @@ int Monitor::Capture() {
|
|||
packet->get_image();
|
||||
}
|
||||
// Have an av_packet,
|
||||
if ( packetqueue.video_packet_count || ( packet->packet.flags & AV_PKT_FLAG_KEY ) || event ) {
|
||||
mutex.lock();
|
||||
if ( packetqueue->video_packet_count || packet->keyframe || event ) {
|
||||
//Debug(2, "Queueing video packet");
|
||||
packetqueue.queuePacket( packet );
|
||||
packetqueue->queuePacket( packet );
|
||||
}
|
||||
mutex.unlock();
|
||||
} else {
|
||||
mutex.lock();
|
||||
// Non-avpackets are all keyframes.
|
||||
//Debug(2, "Queueing video packet");
|
||||
packetqueue.queuePacket( packet );
|
||||
Debug(2, "Queueing decoded video packet");
|
||||
packetqueue->queuePacket( packet );
|
||||
mutex.unlock();
|
||||
}
|
||||
|
||||
/* Deinterlacing */
|
||||
|
@ -2910,9 +2895,6 @@ int Monitor::Capture() {
|
|||
} // end if result
|
||||
} // end if deinterlacing
|
||||
|
||||
Debug(2,"Capture unlock");
|
||||
mutex.unlock();
|
||||
|
||||
// Icon: I'm not sure these should be here. They have nothing to do with capturing
|
||||
if ( shared_data->action & GET_SETTINGS ) {
|
||||
shared_data->brightness = camera->Brightness();
|
||||
|
@ -3208,7 +3190,10 @@ unsigned int Monitor::SubpixelOrder() const { return( camera->SubpixelOrder() );
|
|||
|
||||
int Monitor::PrimeCapture() {
|
||||
int ret = camera->PrimeCapture();
|
||||
video_stream_id = ret ? -1 : camera->get_VideoStreamId();
|
||||
if ( ret == 0 ) {
|
||||
video_stream_id = camera->get_VideoStreamId();
|
||||
packetqueue = new zm_packetqueue( pre_event_buffer_count, video_stream_id );
|
||||
}
|
||||
Debug(2, "Video stream id is (%d)", video_stream_id );
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -164,9 +164,8 @@ protected:
|
|||
} VideoStoreData;
|
||||
|
||||
VideoStore *videoStore;
|
||||
zm_packetqueue packetqueue;
|
||||
zm_packetqueue *packetqueue;
|
||||
Mutex mutex;
|
||||
std::list<ZMPacket *>::iterator analysis_it; // Iterator into the packetqueue. Theoretically points to the most recently analyzed packet
|
||||
|
||||
class MonitorLink {
|
||||
protected:
|
||||
|
|
|
@ -106,6 +106,7 @@ void ZMPacket::reset() {
|
|||
}
|
||||
#endif
|
||||
score = -1;
|
||||
keyframe = 0;
|
||||
}
|
||||
|
||||
int ZMPacket::decode( AVCodecContext *ctx ) {
|
||||
|
|
|
@ -24,7 +24,9 @@
|
|||
#define VIDEO_QUEUESIZE 200
|
||||
#define AUDIO_QUEUESIZE 50
|
||||
|
||||
zm_packetqueue::zm_packetqueue() {
|
||||
zm_packetqueue::zm_packetqueue( unsigned int video_image_count, int p_video_stream_id ) {
|
||||
video_stream_id = p_video_stream_id;
|
||||
max_video_packet_count = video_image_count;
|
||||
video_packet_count = 0;
|
||||
analysis_it = pktQueue.begin();
|
||||
}
|
||||
|
@ -35,8 +37,11 @@ zm_packetqueue::~zm_packetqueue() {
|
|||
|
||||
bool zm_packetqueue::queuePacket( ZMPacket* zm_packet ) {
|
||||
pktQueue.push_back( zm_packet );
|
||||
if ( zm_packet->codec_type == AVMEDIA_TYPE_VIDEO )
|
||||
if ( zm_packet->codec_type == AVMEDIA_TYPE_VIDEO ) {
|
||||
video_packet_count += 1;
|
||||
if ( video_packet_count > max_video_packet_count )
|
||||
clearQueue( max_video_packet_count, video_stream_id );
|
||||
}
|
||||
|
||||
if ( analysis_it == pktQueue.end() ) {
|
||||
// ANalsys_it should only point to end when queue it empty
|
||||
|
@ -81,7 +86,7 @@ unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream
|
|||
AVPacket *av_packet = &(zm_packet->packet);
|
||||
|
||||
Debug(4, "Looking at packet with stream index (%d) with keyframe(%d), frames_to_keep is (%d)",
|
||||
av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ), frames_to_keep );
|
||||
av_packet->stream_index, zm_packet->keyframe, frames_to_keep );
|
||||
|
||||
// Want frames_to_keep video frames. Otherwise, we may not have enough
|
||||
if ( av_packet->stream_index == stream_id ) {
|
||||
|
@ -94,8 +99,6 @@ unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream
|
|||
Debug(4, "Hit end of queue, still need (%d) video keyframes", frames_to_keep );
|
||||
} else {
|
||||
if ( it != pktQueue.rend() ) {
|
||||
Debug(4, "Not rend");
|
||||
|
||||
ZMPacket *zm_packet = *it;
|
||||
Debug(4, "packet %x %d", zm_packet, zm_packet->image_index);
|
||||
|
||||
|
@ -103,10 +106,10 @@ unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream
|
|||
while (
|
||||
( it != pktQueue.rend() )
|
||||
&&
|
||||
(( av_packet->stream_index != stream_id ) || ! ( av_packet->flags & AV_PKT_FLAG_KEY ))
|
||||
(( av_packet->stream_index != stream_id ) || !zm_packet->keyframe )
|
||||
) {
|
||||
zm_packet = *it;
|
||||
Debug(4, "packet %x %d", zm_packet, zm_packet->image_index);
|
||||
//Debug(4, "packet %x %d", zm_packet, zm_packet->image_index);
|
||||
++it;
|
||||
av_packet = &( (*it)->packet );
|
||||
}
|
||||
|
@ -127,7 +130,7 @@ unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream
|
|||
delete packet;
|
||||
|
||||
delete_count += 1;
|
||||
}
|
||||
} // while our iterator is not the first packet
|
||||
Debug(3, "Deleted (%d) packets", delete_count );
|
||||
return delete_count;
|
||||
} // end unsigned int zm_packetqueue::clearQueue( unsigned int frames_to_keep, int stream_id )
|
||||
|
@ -152,64 +155,6 @@ unsigned int zm_packetqueue::get_video_packet_count() {
|
|||
return video_packet_count;
|
||||
}
|
||||
|
||||
void zm_packetqueue::clear_unwanted_packets( timeval *recording_started, int mVideoStreamId ) {
|
||||
// Need to find the keyframe <= recording_started. Can get rid of audio packets.
|
||||
if ( pktQueue.empty() ) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Step 1 - find keyframe < recording_started.
|
||||
// Step 2 - pop packets until we get to the packet in step 2
|
||||
std::list<ZMPacket *>::reverse_iterator it;
|
||||
|
||||
Debug(3, "Looking for keyframe after start recording stream id (%d)", mVideoStreamId );
|
||||
for ( it = pktQueue.rbegin(); it != pktQueue.rend(); ++ it ) {
|
||||
ZMPacket *zm_packet = *it;
|
||||
AVPacket *av_packet = &(zm_packet->packet);
|
||||
if (
|
||||
( av_packet->flags & AV_PKT_FLAG_KEY )
|
||||
&&
|
||||
( av_packet->stream_index == mVideoStreamId )
|
||||
&&
|
||||
timercmp( zm_packet->timestamp, recording_started, < )
|
||||
) {
|
||||
Debug(3, "Found keyframe before start with stream index (%d) with keyframe (%d)", av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ) );
|
||||
break;
|
||||
}
|
||||
}
|
||||
if ( it == pktQueue.rend() ) {
|
||||
Debug(1, "Didn't find a keyframe packet keeping all" );
|
||||
return;
|
||||
}
|
||||
|
||||
ZMPacket *zm_packet = *it;
|
||||
AVPacket *av_packet = &(zm_packet->packet);
|
||||
Debug(3, "Found packet before start with stream index (%d) with keyframe (%d), distance(%d), size(%d)",
|
||||
av_packet->stream_index,
|
||||
( av_packet->flags & AV_PKT_FLAG_KEY ),
|
||||
distance( it, pktQueue.rend() ),
|
||||
pktQueue.size() );
|
||||
|
||||
unsigned int deleted_frames = 0;
|
||||
ZMPacket *packet = NULL;
|
||||
while ( distance( it, pktQueue.rend() ) > 1 ) {
|
||||
//while ( pktQueue.rend() != it ) {
|
||||
packet = pktQueue.front();
|
||||
pktQueue.pop_front();
|
||||
if ( packet->image_index == -1 )
|
||||
delete packet;
|
||||
deleted_frames += 1;
|
||||
}
|
||||
|
||||
zm_packet = pktQueue.front();
|
||||
av_packet = &(zm_packet->packet);
|
||||
if ( ( ! ( av_packet->flags & AV_PKT_FLAG_KEY ) ) || ( av_packet->stream_index != mVideoStreamId ) ) {
|
||||
Error( "Done looking for keyframe. Deleted %d frames. Remaining frames in queue: %d stream of head packet is (%d), keyframe (%d), distance(%d), packets(%d)", deleted_frames, pktQueue.size(), av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ), distance( it, pktQueue.rend() ), pktQueue.size() );
|
||||
} else {
|
||||
Debug(1, "Done looking for keyframe. Deleted %d frames. Remaining frames in queue: %d stream of head packet is (%d), keyframe (%d), distance(%d), packets(%d)", deleted_frames, pktQueue.size(), av_packet->stream_index, ( av_packet->flags & AV_PKT_FLAG_KEY ), distance( it, pktQueue.rend() ), pktQueue.size() );
|
||||
}
|
||||
} // end void zm_packetqueue::clear_unwanted_packets( timeval *recording_started, int mVideoStreamId )
|
||||
|
||||
// Returns a packet to analyse or NULL
|
||||
ZMPacket *zm_packetqueue::get_analysis_packet() {
|
||||
|
||||
|
|
|
@ -35,10 +35,12 @@ class zm_packetqueue {
|
|||
std::list<ZMPacket *> pktQueue;
|
||||
std::list<ZMPacket *>::iterator analysis_it;
|
||||
|
||||
int video_stream_id;
|
||||
int video_packet_count; // keep track of how many video packets we have, because we shouldn't have more than image_buffer_count
|
||||
unsigned int max_video_packet_count;
|
||||
|
||||
public:
|
||||
zm_packetqueue();
|
||||
zm_packetqueue( unsigned int p_max_video_packet_count, int p_video_stream_id );
|
||||
virtual ~zm_packetqueue();
|
||||
bool queuePacket( ZMPacket* packet );
|
||||
ZMPacket * popPacket( );
|
||||
|
|
|
@ -140,14 +140,14 @@ VideoStore::VideoStore(
|
|||
Debug(3, "Have orientation");
|
||||
if ( orientation == Monitor::ROTATE_0 ) {
|
||||
} else if ( orientation == Monitor::ROTATE_90 ) {
|
||||
dsr = av_dict_set(&video_out_stream->metadata, "rotate", "90", 0);
|
||||
if ( dsr < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
|
||||
ret = av_dict_set(&video_out_stream->metadata, "rotate", "90", 0);
|
||||
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
|
||||
} else if ( orientation == Monitor::ROTATE_180 ) {
|
||||
dsr = av_dict_set(&video_out_stream->metadata, "rotate", "180", 0);
|
||||
if ( dsr < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
|
||||
ret = av_dict_set(&video_out_stream->metadata, "rotate", "180", 0);
|
||||
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
|
||||
} else if ( orientation == Monitor::ROTATE_270 ) {
|
||||
dsr = av_dict_set(&video_out_stream->metadata, "rotate", "270", 0);
|
||||
if ( dsr < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
|
||||
ret = av_dict_set(&video_out_stream->metadata, "rotate", "270", 0);
|
||||
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
|
||||
} else {
|
||||
Warning("Unsupported Orientation(%d)", orientation);
|
||||
}
|
||||
|
@ -188,8 +188,8 @@ VideoStore::VideoStore(
|
|||
} else {
|
||||
|
||||
/** Create a new frame to store the */
|
||||
if ( !(in_frame = zm_av_frame_alloc()) ) {
|
||||
Error("Could not allocate in frame");
|
||||
if ( !(video_in_frame = zm_av_frame_alloc()) ) {
|
||||
Error("Could not allocate video_in frame");
|
||||
return;
|
||||
}
|
||||
// Don't have an input stream, so need to tell it what we are sending it, or are transcoding
|
||||
|
@ -238,6 +238,9 @@ VideoStore::VideoStore(
|
|||
video_out_ctx->qcompress = 0.6;
|
||||
video_out_ctx->bit_rate = 4000000;
|
||||
#endif
|
||||
video_out_ctx->max_b_frames = 1;
|
||||
if (video_out_ctx->codec_id == AV_CODEC_ID_H264)
|
||||
av_opt_set(video_out_ctx->priv_data, "preset", "superfast", 0);
|
||||
|
||||
AVDictionary *opts = 0;
|
||||
std::string Options = monitor->GetEncoderOptions();
|
||||
|
@ -251,23 +254,6 @@ VideoStore::VideoStore(
|
|||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
if ( ! av_dict_get( opts, "preset", NULL, 0 ) ) {
|
||||
Debug(2,"Setting preset to superfast");
|
||||
av_dict_set( &opts, "preset", "superfast", 0 );
|
||||
}
|
||||
if ( ! av_dict_get( opts, "crf", NULL, 0 ) ) {
|
||||
Debug(2,"Setting crf to superfast");
|
||||
av_dict_set( &opts, "crf", "0", 0 );
|
||||
}
|
||||
#endif
|
||||
#if 0
|
||||
if ( ! av_dict_get( opts, "tune", NULL, 0 ) ) {
|
||||
Debug(2,"Setting tune to zerolatency");
|
||||
av_dict_set( &opts, "tune", "zerolatency", 0 );
|
||||
}
|
||||
#endif
|
||||
|
||||
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
|
||||
Warning("Can't open video codec (%s)! %s, trying h264",
|
||||
video_out_codec->name,
|
||||
|
@ -282,20 +268,6 @@ VideoStore::VideoStore(
|
|||
return;
|
||||
}
|
||||
}
|
||||
#if 0
|
||||
if ( ! av_dict_get( opts, "preset", NULL, 0 ) ) {
|
||||
Debug(2,"Setting preset to superfast");
|
||||
av_dict_set( &opts, "preset", "ultrafast", 0 );
|
||||
}
|
||||
if ( ! av_dict_get( opts, "crf", NULL, 0 ) ) {
|
||||
Debug(2,"Setting crf to 0");
|
||||
av_dict_set( &opts, "crf", "0", 0 );
|
||||
}
|
||||
if ( ! av_dict_get( opts, "tune", NULL, 0 ) ) {
|
||||
Debug(2,"Setting tune to zerolatency");
|
||||
av_dict_set( &opts, "tune", "zerolatency", 0 );
|
||||
}
|
||||
#endif
|
||||
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
|
||||
Error("Can't open video codec (%s)! %s",
|
||||
video_out_codec->name,
|
||||
|
@ -317,7 +289,7 @@ Debug(2,"Sucess opening codec");
|
|||
}
|
||||
} else {
|
||||
Error("Codec not set");
|
||||
}// end if codec == h264
|
||||
} // end if codec == h264
|
||||
|
||||
swscale.SetDefaults(
|
||||
video_in_ctx->pix_fmt,
|
||||
|
@ -371,10 +343,10 @@ Error("Codec not set");
|
|||
#endif
|
||||
|
||||
if ( audio_in_stream ) {
|
||||
Debug(3, "Have audio stream");
|
||||
audio_in_stream_index = audio_in_stream->index;
|
||||
Debug(3, "Have audio stream");
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
|
||||
audio_in_ctx = avcodec_alloc_context3(NULL);
|
||||
ret = avcodec_parameters_to_context(audio_in_ctx,
|
||||
audio_in_stream->codecpar);
|
||||
|
@ -464,6 +436,7 @@ Error("Codec not set");
|
|||
video_next_dts = 0;
|
||||
audio_next_pts = 0;
|
||||
audio_next_dts = 0;
|
||||
Debug(2,"End VIdeoStore");
|
||||
} // VideoStore::VideoStore
|
||||
|
||||
bool VideoStore::open() {
|
||||
|
@ -524,14 +497,14 @@ void VideoStore::write_audio_packet( AVPacket &pkt ) {
|
|||
|
||||
VideoStore::~VideoStore() {
|
||||
if ( video_out_ctx->codec_id != video_in_ctx->codec_id || audio_out_codec ) {
|
||||
Debug(2,"Different codecs between in and out");
|
||||
// The codec queues data. We need to send a flush command and out
|
||||
// whatever we get. Failures are not fatal.
|
||||
AVPacket pkt;
|
||||
Debug(2,"Different codecs between in and out");
|
||||
// The codec queues data. We need to send a flush command and out
|
||||
// whatever we get. Failures are not fatal.
|
||||
AVPacket pkt;
|
||||
// WIthout these we seg fault I don't know why.
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
av_init_packet(&pkt);
|
||||
av_init_packet(&pkt);
|
||||
|
||||
// I got crashes if the codec didn't do DELAY, so let's test for it.
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
|
@ -561,8 +534,7 @@ Debug(2,"Different codecs between in and out");
|
|||
int got_packet = 0;
|
||||
ret = avcodec_encode_video2(video_out_ctx, &pkt, NULL, &got_packet);
|
||||
if ( ret < 0 ) {
|
||||
Error("ERror encoding video while flushing (%d) (%s)", ret,
|
||||
av_err2str(ret));
|
||||
Error("ERror encoding video while flushing (%d) (%s)", ret, av_err2str(ret));
|
||||
break;
|
||||
}
|
||||
if (!got_packet) {
|
||||
|
@ -583,6 +555,10 @@ Debug(2,"Different codecs between in and out");
|
|||
if ( audio_out_codec ) {
|
||||
// The codec queues data. We need to send a flush command and out
|
||||
// whatever we get. Failures are not fatal.
|
||||
AVPacket pkt;
|
||||
// WIthout these we seg fault I don't know why.
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
|
@ -597,6 +573,9 @@ Debug(2,"Different codecs between in and out");
|
|||
}
|
||||
#else
|
||||
while (1) {
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
av_init_packet(&pkt);
|
||||
int got_packet = 0;
|
||||
if ( (ret = avcodec_encode_audio2(audio_out_ctx, &pkt, NULL, &got_packet)) < 0 ) {
|
||||
Error("ERror encoding audio while flushing (%d) (%s)", ret, av_err2str(ret));
|
||||
|
@ -721,6 +700,10 @@ bool VideoStore::setup_resampler() {
|
|||
}
|
||||
Debug(2, "Have audio out codec");
|
||||
|
||||
// Now copy them to the out stream
|
||||
audio_out_stream = avformat_new_stream(oc, audio_out_codec);
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
// audio_out_ctx = audio_out_stream->codec;
|
||||
audio_out_ctx = avcodec_alloc_context3(audio_out_codec);
|
||||
if ( !audio_out_ctx ) {
|
||||
|
@ -728,6 +711,9 @@ bool VideoStore::setup_resampler() {
|
|||
audio_out_stream = NULL;
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
audio_out_ctx = audio_out_stream->codec;
|
||||
#endif
|
||||
|
||||
/* put sample parameters */
|
||||
audio_out_ctx->bit_rate = audio_in_ctx->bit_rate;
|
||||
|
@ -765,8 +751,6 @@ bool VideoStore::setup_resampler() {
|
|||
|
||||
audio_out_ctx->time_base = (AVRational){1, audio_out_ctx->sample_rate};
|
||||
|
||||
// Now copy them to the out stream
|
||||
audio_out_stream = avformat_new_stream(oc, audio_out_codec);
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
if ( (ret = avcodec_parameters_from_context(audio_out_stream->codecpar,
|
||||
|
@ -774,8 +758,9 @@ bool VideoStore::setup_resampler() {
|
|||
Error("Could not initialize stream parameteres");
|
||||
return false;
|
||||
}
|
||||
audio_out_stream->codecpar->frame_size = audio_out_ctx->frame_size;
|
||||
#else
|
||||
avcodec_copy_context( audio_out_stream->codec, audio_out_ctx );
|
||||
avcodec_copy_context( audio_out_stream->codec, audio_out_ctx );
|
||||
#endif
|
||||
audio_out_stream->time_base = (AVRational){1, audio_out_ctx->sample_rate};
|
||||
|
||||
|
@ -798,6 +783,22 @@ bool VideoStore::setup_resampler() {
|
|||
audio_out_ctx->channels, audio_out_ctx->sample_fmt,
|
||||
audio_out_ctx->channel_layout, audio_out_ctx->frame_size);
|
||||
|
||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||
Debug(1,
|
||||
"Audio out bit_rate (%d) sample_rate(%d) channels(%d) fmt(%d) "
|
||||
"layout(%d) frame_size(%d)",
|
||||
audio_out_stream->codecpar->bit_rate, audio_out_stream->codecpar->sample_rate,
|
||||
audio_out_stream->codecpar->channels, audio_out_stream->codecpar->format,
|
||||
audio_out_stream->codecpar->channel_layout, audio_out_stream->codecpar->frame_size);
|
||||
#else
|
||||
Debug(1,
|
||||
"Audio out bit_rate (%d) sample_rate(%d) channels(%d) fmt(%d) "
|
||||
"layout(%d) frame_size(%d)",
|
||||
audio_out_stream->codec->bit_rate, audio_out_stream->codec->sample_rate,
|
||||
audio_out_stream->codec->channels, audio_out_stream->codec->sample_fmt,
|
||||
audio_out_stream->codec->channel_layout, audio_out_stream->codec->frame_size);
|
||||
#endif
|
||||
|
||||
/** Create a new frame to store the audio samples. */
|
||||
if ( ! in_frame ) {
|
||||
if (!(in_frame = zm_av_frame_alloc())) {
|
||||
|
|
|
@ -35,6 +35,7 @@ int audio_in_stream_index;
|
|||
// Move this into the object so that we aren't constantly allocating/deallocating it on the stack
|
||||
AVPacket opkt;
|
||||
// we are transcoding
|
||||
AVFrame *video_in_frame;
|
||||
AVFrame *in_frame;
|
||||
AVFrame *out_frame;
|
||||
|
||||
|
|
|
@ -317,9 +317,9 @@ int main(int argc, char *argv[]) {
|
|||
if ( next_delays[i] > 0 ) {
|
||||
gettimeofday(&now, NULL);
|
||||
DELTA_TIMEVAL(delta_time, now, last_capture_times[i], DT_PREC_3);
|
||||
long sleep_time = next_delays[i]-delta_time.delta;
|
||||
long sleep_time = next_delays[i] - delta_time.delta;
|
||||
if ( sleep_time > 0 ) {
|
||||
//Debug(2,"usleeping (%d)", sleep_time*(DT_MAXGRAN/DT_PREC_3) );
|
||||
Debug(2,"usleeping (%d)", sleep_time*(DT_MAXGRAN/DT_PREC_3) );
|
||||
usleep(sleep_time*(DT_MAXGRAN/DT_PREC_3));
|
||||
}
|
||||
last_capture_times[i] = now;
|
||||
|
|
|
@ -114,7 +114,6 @@ for ( $i = 0; $i < count($displayMonitors); $i++ ) {
|
|||
if ( $maxHeight < $scaleHeight ) $maxHeight = $scaleHeight;
|
||||
}
|
||||
$monitor['zmc'] = zmcStatus( $monitor );
|
||||
#$monitor['zma'] = zmaStatus( $monitor );
|
||||
$zoneCount += $monitor['ZoneCount'];
|
||||
|
||||
$counts = array();
|
||||
|
@ -189,9 +188,9 @@ for( $monitor_i = 0; $monitor_i < count($displayMonitors); $monitor_i += 1 ) {
|
|||
$dclass = 'errorText';
|
||||
} else {
|
||||
// https://github.com/ZoneMinder/ZoneMinder/issues/1082
|
||||
if ( !$monitor['zma'] && $monitor['Function']!='Monitor' )
|
||||
$dclass = 'warnText';
|
||||
else
|
||||
//if ( a'] && $monitor['Function']!='Monitor' )
|
||||
//$dclass = 'warnText';
|
||||
//else
|
||||
$dclass = 'infoText';
|
||||
}
|
||||
if ( $monitor['Function'] == 'None' )
|
||||
|
|
Loading…
Reference in New Issue