add saveJPEGs to Events

This commit is contained in:
Isaac Connor 2017-11-13 21:34:39 -05:00
parent df0d37f4eb
commit 39b12057f2
10 changed files with 65 additions and 23 deletions

View File

@ -67,3 +67,4 @@ SET @s = (SELECT IF(
PREPARE stmt FROM @s;
EXECUTE stmt;

13
db/zm_update-1.31.13.sql Normal file
View File

@ -0,0 +1,13 @@
ALTER TABLE `Monitors` MODIFY `OutputCodec` enum('h264','mjpeg','mpeg1','mpeg2')
SET @s = (SELECT IF(
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = DATABASE()
AND table_name = 'Events'
AND column_name = 'SaveJPEGs'
) > 0,
"SELECT 'Column SaveJPEGs already exists in Events'",
"ALTER TABLE `Eventss` ADD `SaveJPEGs` TINYINT AFTER `DefaultVideo`"
));
PREPARE stmt FROM @s;
EXECUTE stmt;

View File

@ -71,7 +71,7 @@ Event::Event( Monitor *p_monitor, struct timeval p_start_time, const std::string
static char sql[ZM_SQL_MED_BUFSIZ];
struct tm *stime = localtime( &start_time.tv_sec );
snprintf( sql, sizeof(sql), "insert into Events ( MonitorId, StorageId, Name, StartTime, Width, Height, Cause, Notes, StateId, Orientation, Videoed, DefaultVideo, SaveJPEGS ) values ( %d, %d, 'New Event', from_unixtime( %ld ), %d, %d, '%s', '%s', %d, %d, %d, '', %d )",
snprintf( sql, sizeof(sql), "insert into Events ( MonitorId, StorageId, Name, StartTime, Width, Height, Cause, Notes, StateId, Orientation, Videoed, DefaultVideo, SaveJPEGs ) values ( %d, %d, 'New Event', from_unixtime( %ld ), %d, %d, '%s', '%s', %d, %d, %d, '', %d )",
monitor->Id(),
storage->Id(),
start_time.tv_sec,

View File

@ -36,6 +36,7 @@
#ifdef __FreeBSD__
#include <sys/thr.h>
#endif
#include <cstdarg>
bool Logger::smInitialised = false;
Logger *Logger::smInstance = 0;

View File

@ -28,6 +28,7 @@ ZMPacket::ZMPacket( ) {
keyframe = 0;
image = NULL;
frame = NULL;
buffer = NULL;
av_init_packet( &packet );
packet.size = 0; // So we can detect whether it has been filled.
timestamp = (struct timeval){0};
@ -37,6 +38,7 @@ ZMPacket::ZMPacket( Image *i ) {
keyframe = 1;
image = i;
frame = NULL;
buffer = NULL;
av_init_packet( &packet );
timestamp = (struct timeval){0};
}
@ -45,6 +47,7 @@ ZMPacket::ZMPacket( AVPacket *p ) {
av_init_packet( &packet );
set_packet( p );
keyframe = p->flags & AV_PKT_FLAG_KEY;
buffer = NULL;
}
ZMPacket::ZMPacket( AVPacket *p, struct timeval *t ) {
@ -52,12 +55,14 @@ ZMPacket::ZMPacket( AVPacket *p, struct timeval *t ) {
set_packet( p );
timestamp = *t;
keyframe = p->flags & AV_PKT_FLAG_KEY;
buffer = NULL;
}
ZMPacket::ZMPacket( AVPacket *p, AVFrame *f, Image *i ) {
av_init_packet( &packet );
set_packet( p );
image = i;
frame = f;
buffer = NULL;
}
ZMPacket::~ZMPacket() {
@ -66,15 +71,24 @@ ZMPacket::~ZMPacket() {
//av_free(frame->data);
av_frame_free( &frame );
}
if ( buffer ) {
av_freep( &buffer );
}
// We assume the image was allocated elsewhere, so we just unref it.
image = NULL;
}
void ZMPacket::reset() {
Debug(2,"reset");
zm_av_packet_unref( &packet );
packet.size = 0;
if ( frame ) {
av_frame_free( &frame );
}
if ( buffer ) {
Debug(2,"freeing buffer");
av_freep( &buffer );
}
}
int ZMPacket::decode( AVCodecContext *ctx ) {

View File

@ -36,6 +36,7 @@ class ZMPacket {
int keyframe;
AVPacket packet; // Input packet, undecoded
AVFrame *frame; // Input image, decoded Theoretically only filled if needed.
uint8_t *buffer;
Image *image; // Our internal image object representing this frame
struct timeval timestamp;
public:

View File

@ -78,7 +78,17 @@ int SWScale::SetDefaults(enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf,
return 0;
}
int SWScale::Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
int SWScale::Convert(
const uint8_t* in_buffer,
const size_t in_buffer_size,
uint8_t* out_buffer,
const size_t out_buffer_size,
enum _AVPIXELFORMAT in_pf,
enum _AVPIXELFORMAT out_pf,
unsigned int width,
unsigned int height
) {
/* Parameter checking */
if(in_buffer == NULL || out_buffer == NULL) {
Error("NULL Input or output buffer");
@ -119,14 +129,14 @@ int SWScale::Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint
size_t outsize = avpicture_get_size(out_pf, width, height);
#endif
if(outsize < out_buffer_size) {
if ( outsize < out_buffer_size ) {
Error("The output buffer is undersized for the output format. Required: %d Available: %d", outsize, out_buffer_size);
return -5;
}
/* Get the context */
swscale_ctx = sws_getCachedContext( swscale_ctx, width, height, in_pf, width, height, out_pf, SWS_FAST_BILINEAR, NULL, NULL, NULL );
if(swscale_ctx == NULL) {
if ( swscale_ctx == NULL ) {
Error("Failed getting swscale context");
return -6;
}
@ -163,22 +173,22 @@ int SWScale::Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint
}
int SWScale::Convert(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
if(img->Width() != width) {
if ( img->Width() != width ) {
Error("Source image width differs. Source: %d Output: %d",img->Width(), width);
return -12;
}
if(img->Height() != height) {
if ( img->Height() != height ) {
Error("Source image height differs. Source: %d Output: %d",img->Height(), height);
return -13;
}
return Convert(img->Buffer(),img->Size(),out_buffer,out_buffer_size,in_pf,out_pf,width,height);
return Convert(img->Buffer(), img->Size(), out_buffer, out_buffer_size, in_pf, out_pf, width, height);
}
int SWScale::ConvertDefaults(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size) {
if(!gotdefaults) {
if ( !gotdefaults ) {
Error("Defaults are not set");
return -24;
}
@ -188,7 +198,7 @@ int SWScale::ConvertDefaults(const Image* img, uint8_t* out_buffer, const size_t
int SWScale::ConvertDefaults(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size) {
if(!gotdefaults) {
if ( !gotdefaults ) {
Error("Defaults are not set");
return -24;
}

View File

@ -472,7 +472,8 @@ Debug(2,"Different codecs between in and out");
break;
}
#endif
Debug(3, "dts:%d, pts:%d", pkt.dts, pkt.pts );
int keyframe = pkt.flags & AV_PKT_FLAG_KEY;
Debug(3, "dts:%d, pts:%d, keyframe:%d", pkt.dts, pkt.pts, keyframe );
//pkt.dts = video_next_dts;
pkt.pts = pkt.dts;
//pkt.duration = video_last_duration;
@ -854,11 +855,11 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
video_out_ctx->pix_fmt,
video_out_ctx->width,
video_out_ctx->height, 1);
uint8_t *buffer = (uint8_t *)av_malloc(codec_imgsize);
zm_packet->buffer = (uint8_t *)av_malloc(codec_imgsize);
av_image_fill_arrays(
frame->data,
frame->linesize,
buffer,
zm_packet->buffer,
video_out_ctx->pix_fmt,
video_out_ctx->width,
video_out_ctx->height,
@ -868,10 +869,10 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
video_out_ctx->pix_fmt,
video_out_ctx->width,
video_out_ctx->height);
uint8_t *buffer = (uint8_t *)av_malloc(codec_imgsize);
zm_packet->buffer = (uint8_t *)av_malloc(codec_imgsize);
avpicture_fill(
(AVPicture *)frame,
buffer,
zm_packet->buffer,
video_out_ctx->pix_fmt,
video_out_ctx->width,
video_out_ctx->height
@ -882,7 +883,7 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
frame->height = video_out_ctx->height;
frame->format = video_out_ctx->pix_fmt;
swscale.Convert(zm_packet->image,
buffer,
zm_packet->buffer,
codec_imgsize,
(AVPixelFormat)zm_packet->image->AVPixFormat(),
video_out_ctx->pix_fmt,
@ -942,6 +943,8 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
opkt.flags = ipkt->flags;
}
int keyframe = opkt.flags & AV_PKT_FLAG_KEY;
Debug(3, "dts:%d, pts:%d, keyframe:%d", opkt.dts, opkt.pts, keyframe );
write_video_packet( opkt );
zm_av_packet_unref(&opkt);
@ -950,7 +953,7 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
void VideoStore::write_video_packet( AVPacket &opkt ) {
if (opkt.dts > opkt.pts) {
if ( opkt.dts > opkt.pts ) {
Debug(1,
"opkt.dts(%d) must be <= opkt.pts(%d). Decompression must happen "
"before presentation.",
@ -981,8 +984,7 @@ void VideoStore::write_video_packet( AVPacket &opkt ) {
//dumpPacket(&opkt);
} else {
ret = av_interleaved_write_frame(oc, &opkt);
if (ret < 0) {
if ( (ret = av_interleaved_write_frame(oc, &opkt)) < 0 ) {
// There's nothing we can really do if the frame is rejected, just drop it
// and get on with the next
Warning(

View File

@ -269,8 +269,8 @@ int main(int argc, char *argv[]) {
struct timeval now;
struct DeltaTimeval delta_time;
while ( !zm_terminate ) {
Debug(2,"blocking");
//sigprocmask(SIG_BLOCK, &block_set, 0);
//Debug(2,"blocking");
sigprocmask(SIG_BLOCK, &block_set, 0);
for ( int i = 0; i < n_monitors; i++ ) {
long min_delay = MAXINT;
@ -327,8 +327,8 @@ int main(int argc, char *argv[]) {
} // end if next_delay <= min_delay || next_delays[i] <= 0 )
} // end foreach n_monitors
Debug(2,"unblocking");
//sigprocmask(SIG_UNBLOCK, &block_set, 0);
//Debug(2,"unblocking");
sigprocmask(SIG_UNBLOCK, &block_set, 0);
} // end while ! zm_terminate
for ( int i = 0; i < n_monitors; i++ ) {
if ( analysis_threads[i] ) {

View File

@ -1 +1 @@
1.31.12
1.31.13