split out codec and encoder, allowing one to specify which encoder to use
This commit is contained in:
parent
84b42ef4d7
commit
c443168389
|
@ -710,7 +710,8 @@ CREATE TABLE `Monitors` (
|
||||||
`Deinterlacing` int(10) unsigned NOT NULL default '0',
|
`Deinterlacing` int(10) unsigned NOT NULL default '0',
|
||||||
`SaveJPEGs` TINYINT NOT NULL DEFAULT '3' ,
|
`SaveJPEGs` TINYINT NOT NULL DEFAULT '3' ,
|
||||||
`VideoWriter` TINYINT NOT NULL DEFAULT '0',
|
`VideoWriter` TINYINT NOT NULL DEFAULT '0',
|
||||||
`OutputCodec` enum('h264','mjpeg','mpeg1','mpeg2'),
|
`OutputCodec` int(10) unsigned NOT NULL default 0,
|
||||||
|
`Encoder` enum('auto','h264','h264_omx','mjpeg','mpeg1','mpeg2'),
|
||||||
`OutputContainer` enum('auto','mp4','mkv'),
|
`OutputContainer` enum('auto','mp4','mkv'),
|
||||||
`EncoderParameters` TEXT,
|
`EncoderParameters` TEXT,
|
||||||
`RecordAudio` TINYINT NOT NULL DEFAULT '0',
|
`RecordAudio` TINYINT NOT NULL DEFAULT '0',
|
||||||
|
|
|
@ -176,16 +176,17 @@ BEGIN
|
||||||
WHERE Id=OLD.MonitorId;
|
WHERE Id=OLD.MonitorId;
|
||||||
END IF;
|
END IF;
|
||||||
END IF;
|
END IF;
|
||||||
ELSEIF ( NEW.Archived AND diff ) THEN
|
ELSE
|
||||||
|
IF ( NEW.Archived AND diff ) THEN
|
||||||
UPDATE Events_Archived SET DiskSpace=NEW.DiskSpace WHERE EventId=NEW.Id;
|
UPDATE Events_Archived SET DiskSpace=NEW.DiskSpace WHERE EventId=NEW.Id;
|
||||||
END IF;
|
END IF;
|
||||||
|
END IF;
|
||||||
|
|
||||||
IF ( diff ) THEN
|
IF ( diff ) THEN
|
||||||
UPDATE Monitors SET TotalEventDiskSpace = COALESCE(TotalEventDiskSpace,0) - COALESCE(OLD.DiskSpace,0) + COALESCE(NEW.DiskSpace,0) WHERE Id=OLD.MonitorId;
|
UPDATE Monitors SET TotalEventDiskSpace = COALESCE(TotalEventDiskSpace,0) - COALESCE(OLD.DiskSpace,0) + COALESCE(NEW.DiskSpace,0) WHERE Id=OLD.MonitorId;
|
||||||
END IF;
|
END IF;
|
||||||
|
|
||||||
END;
|
END;
|
||||||
|
|
||||||
//
|
//
|
||||||
|
|
||||||
delimiter ;
|
delimiter ;
|
||||||
|
|
|
@ -190,7 +190,7 @@ Event::Event(
|
||||||
if ( monitor->GetOptVideoWriter() != 0 ) {
|
if ( monitor->GetOptVideoWriter() != 0 ) {
|
||||||
std::string container = monitor->OutputContainer();
|
std::string container = monitor->OutputContainer();
|
||||||
if ( container == "auto" || container == "" ) {
|
if ( container == "auto" || container == "" ) {
|
||||||
if ( monitor->OutputCodec() == "h264" ) {
|
if ( monitor->OutputCodec() == AV_CODEC_ID_H264 ) {
|
||||||
container = "mp4";
|
container = "mp4";
|
||||||
} else {
|
} else {
|
||||||
container = "mkv";
|
container = "mkv";
|
||||||
|
|
|
@ -249,7 +249,8 @@ Monitor::Monitor(
|
||||||
int p_colours,
|
int p_colours,
|
||||||
VideoWriter p_videowriter,
|
VideoWriter p_videowriter,
|
||||||
std::string &p_encoderparams,
|
std::string &p_encoderparams,
|
||||||
std::string &p_output_codec,
|
int p_output_codec,
|
||||||
|
std::string &p_encoder,
|
||||||
std::string &p_output_container,
|
std::string &p_output_container,
|
||||||
bool p_record_audio,
|
bool p_record_audio,
|
||||||
const char *p_event_prefix,
|
const char *p_event_prefix,
|
||||||
|
@ -292,6 +293,7 @@ Monitor::Monitor(
|
||||||
videowriter( p_videowriter ),
|
videowriter( p_videowriter ),
|
||||||
encoderparams( p_encoderparams ),
|
encoderparams( p_encoderparams ),
|
||||||
output_codec( p_output_codec ),
|
output_codec( p_output_codec ),
|
||||||
|
encoder( p_encoder ),
|
||||||
output_container( p_output_container ),
|
output_container( p_output_container ),
|
||||||
record_audio( p_record_audio ),
|
record_audio( p_record_audio ),
|
||||||
label_coord( p_label_coord ),
|
label_coord( p_label_coord ),
|
||||||
|
@ -1761,7 +1763,7 @@ void Monitor::ReloadLinkedMonitors( const char *p_linked_monitors ) {
|
||||||
|
|
||||||
#if ZM_HAS_V4L
|
#if ZM_HAS_V4L
|
||||||
int Monitor::LoadLocalMonitors( const char *device, Monitor **&monitors, Purpose purpose ) {
|
int Monitor::LoadLocalMonitors( const char *device, Monitor **&monitors, Purpose purpose ) {
|
||||||
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Device, Channel, Format, V4LMultiBuffer, V4LCapturesPerFrame, Method, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, SaveJPEGs, VideoWriter, EncoderParameters, OutputCodec, OutputContainer, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, SignalCheckColour, Exif from Monitors where Function != 'None' and Type = 'Local'";
|
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Device, Channel, Format, V4LMultiBuffer, V4LCapturesPerFrame, Method, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, SaveJPEGs, VideoWriter, EncoderParameters, OutputCodec, Encoder, OutputContainer, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, SignalCheckColour, Exif from Monitors where Function != 'None' and Type = 'Local'";
|
||||||
;
|
;
|
||||||
if ( device[0] ) {
|
if ( device[0] ) {
|
||||||
sql += " AND Device='";
|
sql += " AND Device='";
|
||||||
|
@ -1826,7 +1828,8 @@ int Monitor::LoadLocalMonitors( const char *device, Monitor **&monitors, Purpose
|
||||||
int savejpegs = atoi(dbrow[col]); col++;
|
int savejpegs = atoi(dbrow[col]); col++;
|
||||||
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
||||||
std::string encoderparams = dbrow[col] ? dbrow[col] : ""; col++;
|
std::string encoderparams = dbrow[col] ? dbrow[col] : ""; col++;
|
||||||
std::string output_codec = dbrow[col] ? dbrow[col] : ""; col++;
|
int output_codec = dbrow[col] ? atoi(dbrow[col]) : 0; col++;
|
||||||
|
std::string encoder = dbrow[col] ? dbrow[col] : ""; col++;
|
||||||
std::string output_container = dbrow[col] ? dbrow[col] : ""; col++;
|
std::string output_container = dbrow[col] ? dbrow[col] : ""; col++;
|
||||||
bool record_audio = (*dbrow[col] != '0'); col++;
|
bool record_audio = (*dbrow[col] != '0'); col++;
|
||||||
|
|
||||||
|
@ -1907,6 +1910,7 @@ int Monitor::LoadLocalMonitors( const char *device, Monitor **&monitors, Purpose
|
||||||
videowriter,
|
videowriter,
|
||||||
encoderparams,
|
encoderparams,
|
||||||
output_codec,
|
output_codec,
|
||||||
|
encoder,
|
||||||
output_container,
|
output_container,
|
||||||
record_audio,
|
record_audio,
|
||||||
event_prefix,
|
event_prefix,
|
||||||
|
@ -1955,7 +1959,7 @@ int Monitor::LoadLocalMonitors( const char *device, Monitor **&monitors, Purpose
|
||||||
#endif // ZM_HAS_V4L
|
#endif // ZM_HAS_V4L
|
||||||
|
|
||||||
int Monitor::LoadRemoteMonitors( const char *protocol, const char *host, const char *port, const char *path, Monitor **&monitors, Purpose purpose ) {
|
int Monitor::LoadRemoteMonitors( const char *protocol, const char *host, const char *port, const char *path, Monitor **&monitors, Purpose purpose ) {
|
||||||
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Protocol, Method, Host, Port, Path, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, RTSPDescribe, SaveJPEGs, VideoWriter, EncoderParameters, OutputCodec, OutputContainer, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, Exif from Monitors where Function != 'None' and Type = 'Remote'";
|
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Protocol, Method, Host, Port, Path, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, RTSPDescribe, SaveJPEGs, VideoWriter, EncoderParameters, OutputCodec, Encoder, OutputContainer, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, Exif from Monitors where Function != 'None' and Type = 'Remote'";
|
||||||
if ( staticConfig.SERVER_ID ) {
|
if ( staticConfig.SERVER_ID ) {
|
||||||
sql += stringtf( " AND ServerId=%d", staticConfig.SERVER_ID );
|
sql += stringtf( " AND ServerId=%d", staticConfig.SERVER_ID );
|
||||||
}
|
}
|
||||||
|
@ -2001,7 +2005,8 @@ int Monitor::LoadRemoteMonitors( const char *protocol, const char *host, const c
|
||||||
int savejpegs = atoi(dbrow[col]); col++;
|
int savejpegs = atoi(dbrow[col]); col++;
|
||||||
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
||||||
std::string encoderparams = dbrow[col] ? dbrow[col] : ""; col++;
|
std::string encoderparams = dbrow[col] ? dbrow[col] : ""; col++;
|
||||||
std::string output_codec = dbrow[col] ? dbrow[col] : ""; col++;
|
int output_codec = dbrow[col] ? atoi(dbrow[col]) : 0; col++;
|
||||||
|
std::string encoder = dbrow[col] ? dbrow[col] : ""; col++;
|
||||||
std::string output_container = dbrow[col] ? dbrow[col] : ""; col++;
|
std::string output_container = dbrow[col] ? dbrow[col] : ""; col++;
|
||||||
bool record_audio = (*dbrow[col] != '0'); col++;
|
bool record_audio = (*dbrow[col] != '0'); col++;
|
||||||
|
|
||||||
|
@ -2096,6 +2101,7 @@ int Monitor::LoadRemoteMonitors( const char *protocol, const char *host, const c
|
||||||
videowriter,
|
videowriter,
|
||||||
encoderparams,
|
encoderparams,
|
||||||
output_codec,
|
output_codec,
|
||||||
|
encoder,
|
||||||
output_container,
|
output_container,
|
||||||
record_audio,
|
record_audio,
|
||||||
event_prefix,
|
event_prefix,
|
||||||
|
@ -2143,7 +2149,7 @@ int Monitor::LoadRemoteMonitors( const char *protocol, const char *host, const c
|
||||||
}
|
}
|
||||||
|
|
||||||
int Monitor::LoadFileMonitors( const char *file, Monitor **&monitors, Purpose purpose ) {
|
int Monitor::LoadFileMonitors( const char *file, Monitor **&monitors, Purpose purpose ) {
|
||||||
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Path, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, SaveJPEGs, VideoWriter, EncoderParameters, OutputCodec, OutputContainer, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, Exif from Monitors where Function != 'None' and Type = 'File'";
|
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Path, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, SaveJPEGs, VideoWriter, EncoderParameters, OutputCodec, Encoder, OutputContainer, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, Exif from Monitors where Function != 'None' and Type = 'File'";
|
||||||
if ( file[0] ) {
|
if ( file[0] ) {
|
||||||
sql += " AND Path='";
|
sql += " AND Path='";
|
||||||
sql += file;
|
sql += file;
|
||||||
|
@ -2185,7 +2191,8 @@ int Monitor::LoadFileMonitors( const char *file, Monitor **&monitors, Purpose pu
|
||||||
int savejpegs = atoi(dbrow[col]); col++;
|
int savejpegs = atoi(dbrow[col]); col++;
|
||||||
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
||||||
std::string encoderparams = dbrow[col]; col++;
|
std::string encoderparams = dbrow[col]; col++;
|
||||||
std::string output_codec = dbrow[col]; col++;
|
int output_codec = dbrow[col] ? atoi(dbrow[col]) : 0; col++;
|
||||||
|
std::string encoder = dbrow[col]; col++;
|
||||||
std::string output_container = dbrow[col]; col++;
|
std::string output_container = dbrow[col]; col++;
|
||||||
bool record_audio = (*dbrow[col] != '0'); col++;
|
bool record_audio = (*dbrow[col] != '0'); col++;
|
||||||
|
|
||||||
|
@ -2250,6 +2257,7 @@ int Monitor::LoadFileMonitors( const char *file, Monitor **&monitors, Purpose pu
|
||||||
videowriter,
|
videowriter,
|
||||||
encoderparams,
|
encoderparams,
|
||||||
output_codec,
|
output_codec,
|
||||||
|
encoder,
|
||||||
output_container,
|
output_container,
|
||||||
record_audio,
|
record_audio,
|
||||||
event_prefix,
|
event_prefix,
|
||||||
|
@ -2298,7 +2306,7 @@ int Monitor::LoadFileMonitors( const char *file, Monitor **&monitors, Purpose pu
|
||||||
|
|
||||||
#if HAVE_LIBAVFORMAT
|
#if HAVE_LIBAVFORMAT
|
||||||
int Monitor::LoadFfmpegMonitors( const char *file, Monitor **&monitors, Purpose purpose ) {
|
int Monitor::LoadFfmpegMonitors( const char *file, Monitor **&monitors, Purpose purpose ) {
|
||||||
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Path, Method, Options, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, SaveJPEGs, VideoWriter, EncoderParameters, OutputCodec, OutputContainer, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, Exif from Monitors where Function != 'None' and Type = 'Ffmpeg'";
|
std::string sql = "select Id, Name, ServerId, StorageId, Function+0, Enabled, LinkedMonitors, Path, Method, Options, Width, Height, Colours, Palette, Orientation+0, Deinterlacing, SaveJPEGs, VideoWriter, EncoderParameters, OutputCodec, Encoder, OutputContainer, RecordAudio, Brightness, Contrast, Hue, Colour, EventPrefix, LabelFormat, LabelX, LabelY, LabelSize, ImageBufferCount, WarmupCount, PreEventCount, PostEventCount, StreamReplayBuffer, AlarmFrameCount, SectionLength, FrameSkip, MotionFrameSkip, AnalysisFPSLimit, AnalysisUpdateDelay, MaxFPS, AlarmMaxFPS, FPSReportInterval, RefBlendPerc, AlarmRefBlendPerc, TrackMotion, Exif from Monitors where Function != 'None' and Type = 'Ffmpeg'";
|
||||||
if ( file[0] ) {
|
if ( file[0] ) {
|
||||||
sql += " AND Path = '";
|
sql += " AND Path = '";
|
||||||
sql += file;
|
sql += file;
|
||||||
|
@ -2343,7 +2351,8 @@ int Monitor::LoadFfmpegMonitors( const char *file, Monitor **&monitors, Purpose
|
||||||
int savejpegs = atoi(dbrow[col]); col++;
|
int savejpegs = atoi(dbrow[col]); col++;
|
||||||
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
||||||
std::string encoderparams = dbrow[col] ? dbrow[col] : ""; col++;
|
std::string encoderparams = dbrow[col] ? dbrow[col] : ""; col++;
|
||||||
std::string output_codec = dbrow[col] ? dbrow[col] : ""; col++;
|
int output_codec = dbrow[col] ? atoi(dbrow[col]) : 0; col++;
|
||||||
|
std::string encoder = dbrow[col] ? dbrow[col] : ""; col++;
|
||||||
std::string output_container = dbrow[col] ? dbrow[col] : ""; col++;
|
std::string output_container = dbrow[col] ? dbrow[col] : ""; col++;
|
||||||
bool record_audio = (*dbrow[col] != '0'); col++;
|
bool record_audio = (*dbrow[col] != '0'); col++;
|
||||||
|
|
||||||
|
@ -2414,6 +2423,7 @@ int Monitor::LoadFfmpegMonitors( const char *file, Monitor **&monitors, Purpose
|
||||||
videowriter,
|
videowriter,
|
||||||
encoderparams,
|
encoderparams,
|
||||||
output_codec,
|
output_codec,
|
||||||
|
encoder,
|
||||||
output_container,
|
output_container,
|
||||||
record_audio,
|
record_audio,
|
||||||
event_prefix,
|
event_prefix,
|
||||||
|
@ -2524,7 +2534,8 @@ Monitor *Monitor::Load( unsigned int p_id, bool load_zones, Purpose purpose ) {
|
||||||
int savejpegs = atoi(dbrow[col]); col++;
|
int savejpegs = atoi(dbrow[col]); col++;
|
||||||
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
VideoWriter videowriter = (VideoWriter)atoi(dbrow[col]); col++;
|
||||||
std::string encoderparams = dbrow[col] ? dbrow[col] : ""; col++;
|
std::string encoderparams = dbrow[col] ? dbrow[col] : ""; col++;
|
||||||
std::string output_codec = dbrow[col] ? dbrow[col] : ""; col++;
|
int output_codec = dbrow[col] ? atoi(dbrow[col]) : 0; col++;
|
||||||
|
std::string encoder = dbrow[col] ? dbrow[col] : ""; col++;
|
||||||
std::string output_container = dbrow[col] ? dbrow[col] : ""; col++;
|
std::string output_container = dbrow[col] ? dbrow[col] : ""; col++;
|
||||||
bool record_audio = (*dbrow[col] != '0'); col++;
|
bool record_audio = (*dbrow[col] != '0'); col++;
|
||||||
|
|
||||||
|
@ -2746,6 +2757,7 @@ Monitor *Monitor::Load( unsigned int p_id, bool load_zones, Purpose purpose ) {
|
||||||
videowriter,
|
videowriter,
|
||||||
encoderparams,
|
encoderparams,
|
||||||
output_codec,
|
output_codec,
|
||||||
|
encoder,
|
||||||
output_container,
|
output_container,
|
||||||
record_audio,
|
record_audio,
|
||||||
event_prefix,
|
event_prefix,
|
||||||
|
|
|
@ -239,7 +239,8 @@ protected:
|
||||||
int colours;
|
int colours;
|
||||||
VideoWriter videowriter;
|
VideoWriter videowriter;
|
||||||
std::string encoderparams;
|
std::string encoderparams;
|
||||||
std::string output_codec;
|
int output_codec;
|
||||||
|
std::string encoder;
|
||||||
std::string output_container;
|
std::string output_container;
|
||||||
std::vector<EncoderParameter_t> encoderparamsvec;
|
std::vector<EncoderParameter_t> encoderparamsvec;
|
||||||
_AVPIXELFORMAT imagePixFormat;
|
_AVPIXELFORMAT imagePixFormat;
|
||||||
|
@ -359,7 +360,8 @@ public:
|
||||||
int p_colours,
|
int p_colours,
|
||||||
VideoWriter p_videowriter,
|
VideoWriter p_videowriter,
|
||||||
std::string &p_encoderparams,
|
std::string &p_encoderparams,
|
||||||
std::string &p_output_codec,
|
int p_output_codec,
|
||||||
|
std::string &p_encoder,
|
||||||
std::string &p_output_container,
|
std::string &p_output_container,
|
||||||
bool p_record_audio,
|
bool p_record_audio,
|
||||||
const char *p_event_prefix,
|
const char *p_event_prefix,
|
||||||
|
@ -458,7 +460,8 @@ public:
|
||||||
VideoWriter GetOptVideoWriter() const { return( videowriter ); }
|
VideoWriter GetOptVideoWriter() const { return( videowriter ); }
|
||||||
const std::vector<EncoderParameter_t>* GetOptEncoderParams() const { return( &encoderparamsvec ); }
|
const std::vector<EncoderParameter_t>* GetOptEncoderParams() const { return( &encoderparamsvec ); }
|
||||||
const std::string &GetEncoderOptions() const { return( encoderparams ); }
|
const std::string &GetEncoderOptions() const { return( encoderparams ); }
|
||||||
const std::string &OutputCodec() const { return output_codec; }
|
const std::string &Encoder() const { return encoder; }
|
||||||
|
const int OutputCodec() const { return output_codec; }
|
||||||
const std::string &OutputContainer() const { return output_container; }
|
const std::string &OutputContainer() const { return output_container; }
|
||||||
|
|
||||||
uint32_t GetVideoWriterEventId() const { return video_store_data->current_event; }
|
uint32_t GetVideoWriterEventId() const { return video_store_data->current_event; }
|
||||||
|
|
|
@ -31,6 +31,13 @@ extern "C" {
|
||||||
#include "libavutil/time.h"
|
#include "libavutil/time.h"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VideoStore::CodecData VideoStore::codec_data[] = {
|
||||||
|
{ AV_CODEC_ID_H264, "h264", "h264_omx", AV_PIX_FMT_YUV420P },
|
||||||
|
{ AV_CODEC_ID_H264, "h264", "h264", AV_PIX_FMT_YUV420P },
|
||||||
|
{ AV_CODEC_ID_H264, "h264", "libx264", AV_PIX_FMT_YUV420P },
|
||||||
|
{ AV_CODEC_ID_MJPEG, "mjpeg", "mjpeg", AV_PIX_FMT_YUVJ422P },
|
||||||
|
};
|
||||||
|
|
||||||
VideoStore::VideoStore(
|
VideoStore::VideoStore(
|
||||||
const char *filename_in,
|
const char *filename_in,
|
||||||
const char *format_in,
|
const char *format_in,
|
||||||
|
@ -140,88 +147,7 @@ bool VideoStore::open() {
|
||||||
video_in_stream_index = 0;
|
video_in_stream_index = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( monitor->OutputCodec() == "mjpeg" ) {
|
video_out_ctx = avcodec_alloc_context3(NULL);
|
||||||
Debug(2,"Using mjpeg");
|
|
||||||
video_out_codec = avcodec_find_encoder_by_name("mjpeg");
|
|
||||||
if ( ! video_out_codec ) {
|
|
||||||
Debug(1, "Didn't find mjpeg encoder");
|
|
||||||
video_out_codec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
|
|
||||||
}
|
|
||||||
video_out_ctx = avcodec_alloc_context3( video_out_codec );
|
|
||||||
video_out_ctx->codec_id = video_out_codec->id;
|
|
||||||
video_out_ctx->pix_fmt = AV_PIX_FMT_YUVJ422P;
|
|
||||||
|
|
||||||
} else if ( monitor->OutputCodec() == "h264" || monitor->OutputCodec() == "" ) {
|
|
||||||
AVPixelFormat pf = AV_PIX_FMT_YUV420P;
|
|
||||||
|
|
||||||
// First try hardware accell
|
|
||||||
video_out_codec = avcodec_find_encoder_by_name("h264_omx");
|
|
||||||
if ( ! video_out_codec ) {
|
|
||||||
Debug(1, "Didn't find omx");
|
|
||||||
video_out_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
|
|
||||||
}
|
|
||||||
if ( ! video_out_codec ) {
|
|
||||||
if ( AV_CODEC_ID_NONE ==
|
|
||||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
|
||||||
video_in_stream->codecpar->codec_id
|
|
||||||
#else
|
|
||||||
video_in_stream->codec->codec_id
|
|
||||||
#endif
|
|
||||||
) {
|
|
||||||
Debug(1, "trying xh264rgb");
|
|
||||||
// We will be encoding rgb images, so prefer
|
|
||||||
video_out_codec = avcodec_find_encoder_by_name("libx264rgb");
|
|
||||||
if ( ! video_out_codec ) {
|
|
||||||
video_out_codec = avcodec_find_encoder_by_name("libx264");
|
|
||||||
} else {
|
|
||||||
pf =
|
|
||||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
|
||||||
(AVPixelFormat)video_in_stream->codecpar->format;
|
|
||||||
#else
|
|
||||||
video_in_stream->codec->pix_fmt;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
video_out_codec = avcodec_find_encoder_by_name("libx264");
|
|
||||||
pf = AV_PIX_FMT_YUV420P;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Need to do lookup by codec_id
|
|
||||||
if ( ! video_out_codec ) {
|
|
||||||
Error("Didn't find h264 encoder");
|
|
||||||
video_out_codec = NULL;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
Debug(1, "Using %s for codec", video_out_codec->name);
|
|
||||||
video_out_ctx = avcodec_alloc_context3(video_out_codec);
|
|
||||||
if ( AV_CODEC_ID_H264 != video_out_ctx->codec_id ) {
|
|
||||||
Warning("Have to set codec_id?");
|
|
||||||
video_out_ctx->codec_id = AV_CODEC_ID_H264;
|
|
||||||
}
|
|
||||||
|
|
||||||
video_out_ctx->pix_fmt = pf;
|
|
||||||
video_out_ctx->level = 32;
|
|
||||||
|
|
||||||
} else {
|
|
||||||
Error("Unsupported output codec selected");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy params from instream to ctx
|
|
||||||
// // FIXME SHould check that we are set to passthrough
|
|
||||||
if ( video_in_stream && ( video_in_ctx->codec_id == AV_CODEC_ID_H264 ) ) {
|
|
||||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
|
||||||
ret = avcodec_parameters_to_context(video_out_ctx, video_in_stream->codecpar);
|
|
||||||
#else
|
|
||||||
ret = avcodec_copy_context( video_out_ctx, video_in_ctx );
|
|
||||||
#endif
|
|
||||||
if ( ret < 0 ) {
|
|
||||||
Error("Could not initialize ctx parameteres");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
//video_out_ctx->time_base = (AVRational){1, 1000000}; // microseconds as base frame rate
|
|
||||||
video_out_ctx->time_base = video_in_ctx->time_base;
|
|
||||||
|
|
||||||
if ( oc->oformat->flags & AVFMT_GLOBALHEADER ) {
|
if ( oc->oformat->flags & AVFMT_GLOBALHEADER ) {
|
||||||
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
|
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
|
||||||
video_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
video_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||||
|
@ -229,6 +155,26 @@ Debug(2,"Using mjpeg");
|
||||||
video_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
video_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
int wanted_codec = monitor->OutputCodec();
|
||||||
|
if ( ! wanted_codec ) {
|
||||||
|
// default to h264
|
||||||
|
wanted_codec = AV_CODEC_ID_H264;
|
||||||
|
}
|
||||||
|
|
||||||
|
// // FIXME SHould check that we are set to passthrough
|
||||||
|
if ( video_in_stream && ( video_in_ctx->codec_id == wanted_codec ) ) {
|
||||||
|
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||||
|
ret = avcodec_parameters_to_context(video_out_ctx, video_in_stream->codecpar);
|
||||||
|
#else
|
||||||
|
ret = avcodec_copy_context( video_out_ctx, video_in_ctx );
|
||||||
|
#endif
|
||||||
|
// Copy params from instream to ctx
|
||||||
|
if ( ret < 0 ) {
|
||||||
|
Error("Could not initialize ctx parameteres");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
//video_out_ctx->time_base = (AVRational){1, 1000000}; // microseconds as base frame rate
|
||||||
|
video_out_ctx->time_base = video_in_ctx->time_base;
|
||||||
// Fix deprecated formats
|
// Fix deprecated formats
|
||||||
switch ( video_out_ctx->pix_fmt ) {
|
switch ( video_out_ctx->pix_fmt ) {
|
||||||
case AV_PIX_FMT_YUVJ422P :
|
case AV_PIX_FMT_YUVJ422P :
|
||||||
|
@ -246,28 +192,55 @@ Debug(2,"Using mjpeg");
|
||||||
video_out_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
video_out_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
zm_dump_codec(video_out_ctx);
|
// Only set orientation if doing passthrough, otherwise the frame image will be rotated
|
||||||
|
Monitor::Orientation orientation = monitor->getOrientation();
|
||||||
|
if ( orientation ) {
|
||||||
|
Debug(3, "Have orientation");
|
||||||
|
if ( orientation == Monitor::ROTATE_0 ) {
|
||||||
|
} else if ( orientation == Monitor::ROTATE_90 ) {
|
||||||
|
ret = av_dict_set(&video_out_stream->metadata, "rotate", "90", 0);
|
||||||
|
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
|
||||||
|
} else if ( orientation == Monitor::ROTATE_180 ) {
|
||||||
|
ret = av_dict_set(&video_out_stream->metadata, "rotate", "180", 0);
|
||||||
|
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
|
||||||
|
} else if ( orientation == Monitor::ROTATE_270 ) {
|
||||||
|
ret = av_dict_set(&video_out_stream->metadata, "rotate", "270", 0);
|
||||||
|
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
|
||||||
|
} else {
|
||||||
|
Warning("Unsupported Orientation(%d)", orientation);
|
||||||
|
}
|
||||||
|
} // end if orientation
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/** Create a new frame to store the */
|
/** Create a new frame to store the */
|
||||||
if ( !(video_in_frame = zm_av_frame_alloc()) ) {
|
if ( !(video_in_frame = zm_av_frame_alloc()) ) {
|
||||||
Error("Could not allocate video_in frame");
|
Error("Could not allocate video_in frame");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
for (int i = 0; i < sizeof(codec_data) / sizeof(*codec_data); i++ ) {
|
||||||
|
if ( codec_data[i].codec_id != monitor->OutputCodec() )
|
||||||
|
continue;
|
||||||
|
|
||||||
|
video_out_codec = avcodec_find_encoder_by_name(codec_data[i].codec_name);
|
||||||
|
if ( ! video_out_codec ) {
|
||||||
|
Debug(1, "Didn't find encoder for %s", codec_data[i].codec_name);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
Debug(1, "Using %s for codec", video_out_codec->name);
|
||||||
|
//video_out_ctx = avcodec_alloc_context3(video_out_codec);
|
||||||
|
if ( video_out_codec->id != video_out_ctx->codec_id ) {
|
||||||
|
Warning("Have to set codec_id?");
|
||||||
|
video_out_ctx->codec_id = AV_CODEC_ID_H264;
|
||||||
|
}
|
||||||
|
|
||||||
|
video_out_ctx->pix_fmt = codec_data[i].pix_fmt;
|
||||||
|
video_out_ctx->level = 32;
|
||||||
|
|
||||||
// Don't have an input stream, so need to tell it what we are sending it, or are transcoding
|
// Don't have an input stream, so need to tell it what we are sending it, or are transcoding
|
||||||
video_out_ctx->width = monitor->Width();
|
video_out_ctx->width = monitor->Width();
|
||||||
video_out_ctx->height = monitor->Height();
|
video_out_ctx->height = monitor->Height();
|
||||||
video_out_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
|
video_out_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||||
|
|
||||||
if ( oc->oformat->flags & AVFMT_GLOBALHEADER ) {
|
|
||||||
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
|
|
||||||
video_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
|
||||||
#else
|
|
||||||
video_out_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
/* video time_base can be set to whatever is handy and supported by encoder */
|
/* video time_base can be set to whatever is handy and supported by encoder */
|
||||||
//video_out_ctx->time_base = (AVRational){1, 1000000}; // microseconds as base frame rate
|
//video_out_ctx->time_base = (AVRational){1, 1000000}; // microseconds as base frame rate
|
||||||
video_out_ctx->time_base = (AVRational){1, 30}; // microseconds as base frame rate
|
video_out_ctx->time_base = (AVRational){1, 30}; // microseconds as base frame rate
|
||||||
|
@ -310,44 +283,42 @@ Debug(2,"Using mjpeg");
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
|
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
|
||||||
Warning("Can't open video codec (%s)! %s, trying h264",
|
Warning("Can't open video codec (%s) %s",
|
||||||
video_out_codec->name,
|
video_out_codec->name,
|
||||||
av_make_error_string(ret).c_str()
|
av_make_error_string(ret).c_str()
|
||||||
);
|
);
|
||||||
video_out_codec = avcodec_find_encoder_by_name("h264");
|
video_out_codec = NULL;
|
||||||
if ( ! video_out_codec ) {
|
|
||||||
Error("Can't find h264 encoder");
|
|
||||||
video_out_codec = avcodec_find_encoder_by_name("libx264");
|
|
||||||
if ( ! video_out_codec ) {
|
|
||||||
Error("Can't find libx264 encoder");
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
|
|
||||||
Error("Can't open video codec (%s)! %s", video_out_codec->name,
|
|
||||||
av_make_error_string(ret).c_str() );
|
|
||||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
|
||||||
// We allocate and copy in newer ffmpeg, so need to free it
|
|
||||||
avcodec_free_context(&video_out_ctx);
|
|
||||||
#endif
|
|
||||||
video_out_ctx=NULL;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} // end if can't open codec
|
|
||||||
Debug(2,"Sucess opening codec");
|
|
||||||
AVDictionaryEntry *e = NULL;
|
AVDictionaryEntry *e = NULL;
|
||||||
while ( (e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL ) {
|
while ( (e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL ) {
|
||||||
Warning( "Encoder Option %s not recognized by ffmpeg codec", e->key);
|
Warning( "Encoder Option %s not recognized by ffmpeg codec", e->key);
|
||||||
}
|
}
|
||||||
av_dict_free(&opts);
|
av_dict_free(&opts);
|
||||||
|
if ( video_out_codec ) break;
|
||||||
|
|
||||||
|
} // end foreach codec
|
||||||
|
|
||||||
|
if ( ! video_out_codec ) {
|
||||||
|
Error("Can't open video codec!");
|
||||||
|
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||||
|
// We allocate and copy in newer ffmpeg, so need to free it
|
||||||
|
avcodec_free_context(&video_out_ctx);
|
||||||
|
#endif
|
||||||
|
video_out_ctx = NULL;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
} // end if can't open codec
|
||||||
|
|
||||||
|
Debug(2,"Sucess opening codec");
|
||||||
|
|
||||||
|
} // end if copying or trasncoding
|
||||||
|
|
||||||
if ( !video_out_ctx->codec_tag ) {
|
if ( !video_out_ctx->codec_tag ) {
|
||||||
video_out_ctx->codec_tag =
|
video_out_ctx->codec_tag =
|
||||||
av_codec_get_tag(oc->oformat->codec_tag, video_out_ctx->codec_id );
|
av_codec_get_tag(oc->oformat->codec_tag, video_out_ctx->codec_id );
|
||||||
Debug(2, "No codec_tag, setting to h264 ? ");
|
Debug(2, "No codec_tag, setting to h264 ? ");
|
||||||
}
|
}
|
||||||
} // end if copying or trasncoding
|
|
||||||
|
|
||||||
video_out_stream = avformat_new_stream(oc, video_out_codec);
|
video_out_stream = avformat_new_stream(oc, video_out_codec);
|
||||||
if ( ! video_out_stream ) {
|
if ( ! video_out_stream ) {
|
||||||
|
@ -364,27 +335,6 @@ Debug(2,"Using mjpeg");
|
||||||
avcodec_copy_context(video_out_stream->codec, video_out_ctx);
|
avcodec_copy_context(video_out_stream->codec, video_out_ctx);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if ( video_in_stream && ( video_in_ctx->codec_id == AV_CODEC_ID_H264 ) ) {
|
|
||||||
// Only set orientation if doing passthrough, otherwise the frame image will be rotated
|
|
||||||
Monitor::Orientation orientation = monitor->getOrientation();
|
|
||||||
if ( orientation ) {
|
|
||||||
Debug(3, "Have orientation");
|
|
||||||
if ( orientation == Monitor::ROTATE_0 ) {
|
|
||||||
} else if ( orientation == Monitor::ROTATE_90 ) {
|
|
||||||
ret = av_dict_set(&video_out_stream->metadata, "rotate", "90", 0);
|
|
||||||
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
|
|
||||||
} else if ( orientation == Monitor::ROTATE_180 ) {
|
|
||||||
ret = av_dict_set(&video_out_stream->metadata, "rotate", "180", 0);
|
|
||||||
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
|
|
||||||
} else if ( orientation == Monitor::ROTATE_270 ) {
|
|
||||||
ret = av_dict_set(&video_out_stream->metadata, "rotate", "270", 0);
|
|
||||||
if ( ret < 0 ) Warning("%s:%d: title set failed", __FILE__, __LINE__);
|
|
||||||
} else {
|
|
||||||
Warning("Unsupported Orientation(%d)", orientation);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( audio_in_stream ) {
|
if ( audio_in_stream ) {
|
||||||
audio_in_stream_index = audio_in_stream->index;
|
audio_in_stream_index = audio_in_stream->index;
|
||||||
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
|
||||||
|
@ -470,12 +420,10 @@ Debug(2,"Using mjpeg");
|
||||||
} // end if audio_in_stream
|
} // end if audio_in_stream
|
||||||
|
|
||||||
/* open the out file, if needed */
|
/* open the out file, if needed */
|
||||||
if (!(out_format->flags & AVFMT_NOFILE)) {
|
if ( !(out_format->flags & AVFMT_NOFILE) ) {
|
||||||
ret = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE, NULL, NULL);
|
if ( (ret = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE, NULL, NULL) ) < 0 ) {
|
||||||
if (ret < 0) {
|
|
||||||
Error("Could not open out file '%s': %s\n", filename,
|
Error("Could not open out file '%s': %s\n", filename,
|
||||||
av_make_error_string(ret).c_str());
|
av_make_error_string(ret).c_str());
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -503,10 +451,10 @@ Debug(2,"Using mjpeg");
|
||||||
zm_dump_stream_format(oc, 0, 0, 1);
|
zm_dump_stream_format(oc, 0, 0, 1);
|
||||||
if (audio_out_stream) zm_dump_stream_format(oc, 1, 0, 1);
|
if (audio_out_stream) zm_dump_stream_format(oc, 1, 0, 1);
|
||||||
return true;
|
return true;
|
||||||
} // end bool VideoStore::open()
|
} // end bool VideoStore::open()
|
||||||
|
|
||||||
void VideoStore::write_audio_packet( AVPacket &pkt ) {
|
void VideoStore::write_audio_packet( AVPacket &pkt ) {
|
||||||
//Debug(2, "writing audio packet pts(%d) dts(%d) duration(%d)", pkt.pts,
|
//Debug(2, "writing audio packet pts(%d) dts(%d) duration(%d)", pkt.pts,
|
||||||
//pkt.dts, pkt.duration);
|
//pkt.dts, pkt.duration);
|
||||||
pkt.pts = audio_next_pts;
|
pkt.pts = audio_next_pts;
|
||||||
pkt.dts = audio_next_dts;
|
pkt.dts = audio_next_dts;
|
||||||
|
@ -523,9 +471,9 @@ void VideoStore::write_audio_packet( AVPacket &pkt ) {
|
||||||
Debug(2, "writing audio packet pts(%d) dts(%d) duration(%d)", pkt.pts, pkt.dts, pkt.duration);
|
Debug(2, "writing audio packet pts(%d) dts(%d) duration(%d)", pkt.pts, pkt.dts, pkt.duration);
|
||||||
pkt.stream_index = audio_out_stream->index;
|
pkt.stream_index = audio_out_stream->index;
|
||||||
av_interleaved_write_frame(oc, &pkt);
|
av_interleaved_write_frame(oc, &pkt);
|
||||||
} // end void VideoStore::Write_audio_packet( AVPacket &pkt )
|
} // end void VideoStore::Write_audio_packet( AVPacket &pkt )
|
||||||
|
|
||||||
VideoStore::~VideoStore() {
|
VideoStore::~VideoStore() {
|
||||||
if ( oc->pb ) {
|
if ( oc->pb ) {
|
||||||
if ( ( video_out_ctx->codec_id != video_in_ctx->codec_id ) || audio_out_codec ) {
|
if ( ( video_out_ctx->codec_id != video_in_ctx->codec_id ) || audio_out_codec ) {
|
||||||
Debug(2,"Different codecs between in and out");
|
Debug(2,"Different codecs between in and out");
|
||||||
|
@ -707,9 +655,9 @@ VideoStore::~VideoStore() {
|
||||||
|
|
||||||
/* free the stream */
|
/* free the stream */
|
||||||
avformat_free_context(oc);
|
avformat_free_context(oc);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool VideoStore::setup_resampler() {
|
bool VideoStore::setup_resampler() {
|
||||||
//I think this is unneccessary, we should be able to just pass in the decoder from the input.
|
//I think this is unneccessary, we should be able to just pass in the decoder from the input.
|
||||||
#ifdef HAVE_LIBAVRESAMPLE
|
#ifdef HAVE_LIBAVRESAMPLE
|
||||||
// Newer ffmpeg wants to keep everything separate... so have to lookup our own
|
// Newer ffmpeg wants to keep everything separate... so have to lookup our own
|
||||||
|
@ -919,10 +867,10 @@ bool VideoStore::setup_resampler() {
|
||||||
"AAC");
|
"AAC");
|
||||||
return false;
|
return false;
|
||||||
#endif
|
#endif
|
||||||
} // end bool VideoStore::setup_resampler()
|
} // end bool VideoStore::setup_resampler()
|
||||||
|
|
||||||
|
|
||||||
int VideoStore::writePacket( ZMPacket *ipkt ) {
|
int VideoStore::writePacket( ZMPacket *ipkt ) {
|
||||||
if ( ipkt->packet.stream_index == video_in_stream_index ) {
|
if ( ipkt->packet.stream_index == video_in_stream_index ) {
|
||||||
return writeVideoFramePacket( ipkt );
|
return writeVideoFramePacket( ipkt );
|
||||||
} else if ( ipkt->packet.stream_index == audio_in_stream_index ) {
|
} else if ( ipkt->packet.stream_index == audio_in_stream_index ) {
|
||||||
|
@ -932,9 +880,9 @@ int VideoStore::writePacket( ZMPacket *ipkt ) {
|
||||||
ipkt->packet.stream_index, video_in_stream_index, ( audio_in_stream ? audio_in_stream_index : -1 )
|
ipkt->packet.stream_index, video_in_stream_index, ( audio_in_stream ? audio_in_stream_index : -1 )
|
||||||
);
|
);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
|
int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
|
||||||
frame_count += 1;
|
frame_count += 1;
|
||||||
|
|
||||||
// if we have to transcode
|
// if we have to transcode
|
||||||
|
@ -1072,7 +1020,7 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
|
||||||
}
|
}
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
//Debug(2, "Got packet using receive_packet, dts:%" PRId64 ", pts:%" PRId64 ", keyframe:%d", opkt.dts, opkt.pts, opkt.flags & AV_PKT_FLAG_KEY );
|
//Debug(2, "Got packet using receive_packet, dts:%" PRId64 ", pts:%" PRId64 ", keyframe:%d", opkt.dts, opkt.pts, opkt.flags & AV_PKT_FLAG_KEY );
|
||||||
#else
|
#else
|
||||||
av_init_packet(&opkt);
|
av_init_packet(&opkt);
|
||||||
opkt.data = NULL;
|
opkt.data = NULL;
|
||||||
|
@ -1139,9 +1087,9 @@ int VideoStore::writeVideoFramePacket( ZMPacket * zm_packet ) {
|
||||||
zm_av_packet_unref(&opkt);
|
zm_av_packet_unref(&opkt);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
} // end int VideoStore::writeVideoFramePacket( AVPacket *ipkt )
|
} // end int VideoStore::writeVideoFramePacket( AVPacket *ipkt )
|
||||||
|
|
||||||
void VideoStore::write_video_packet( AVPacket &opkt ) {
|
void VideoStore::write_video_packet( AVPacket &opkt ) {
|
||||||
|
|
||||||
if ( opkt.dts > opkt.pts ) {
|
if ( opkt.dts > opkt.pts ) {
|
||||||
Debug(1,
|
Debug(1,
|
||||||
|
@ -1186,9 +1134,9 @@ void VideoStore::write_video_packet( AVPacket &opkt ) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // end void VideoStore::write_video_packet
|
} // end void VideoStore::write_video_packet
|
||||||
|
|
||||||
int VideoStore::writeAudioFramePacket(ZMPacket *zm_packet) {
|
int VideoStore::writeAudioFramePacket(ZMPacket *zm_packet) {
|
||||||
Debug(4, "writeAudioFrame");
|
Debug(4, "writeAudioFrame");
|
||||||
|
|
||||||
AVPacket *ipkt = &zm_packet->packet;
|
AVPacket *ipkt = &zm_packet->packet;
|
||||||
|
@ -1321,12 +1269,12 @@ int VideoStore::writeAudioFramePacket(ZMPacket *zm_packet) {
|
||||||
opkt.duration = ipkt->duration;
|
opkt.duration = ipkt->duration;
|
||||||
}
|
}
|
||||||
|
|
||||||
// PTS is difficult, because of the buffering of the audio packets in the
|
// PTS is difficult, because of the buffering of the audio packets in the
|
||||||
// resampler. So we have to do it once we actually have a packet...
|
// resampler. So we have to do it once we actually have a packet...
|
||||||
// audio_last_pts is the pts of ipkt, audio_next_pts is the last pts of the
|
// audio_last_pts is the pts of ipkt, audio_next_pts is the last pts of the
|
||||||
// out
|
// out
|
||||||
|
|
||||||
// Scale the PTS of the outgoing packet to be the correct time base
|
// Scale the PTS of the outgoing packet to be the correct time base
|
||||||
#if 0
|
#if 0
|
||||||
if ( ipkt->pts != AV_NOPTS_VALUE ) {
|
if ( ipkt->pts != AV_NOPTS_VALUE ) {
|
||||||
if ( !audio_last_pts ) {
|
if ( !audio_last_pts ) {
|
||||||
|
@ -1406,9 +1354,9 @@ int VideoStore::writeAudioFramePacket(ZMPacket *zm_packet) {
|
||||||
}
|
}
|
||||||
zm_av_packet_unref(&opkt);
|
zm_av_packet_unref(&opkt);
|
||||||
return 1;
|
return 1;
|
||||||
} // end int VideoStore::writeAudioFramePacket( AVPacket *ipkt )
|
} // end int VideoStore::writeAudioFramePacket( AVPacket *ipkt )
|
||||||
|
|
||||||
int VideoStore::write_packets( zm_packetqueue &queue ) {
|
int VideoStore::write_packets( zm_packetqueue &queue ) {
|
||||||
// Need to write out all the frames from the last keyframe?
|
// Need to write out all the frames from the last keyframe?
|
||||||
// No... need to write out all frames from when the event began. Due to PreEventFrames, this could be more than since the last keyframe.
|
// No... need to write out all frames from when the event began. Due to PreEventFrames, this could be more than since the last keyframe.
|
||||||
unsigned int packet_count = 0;
|
unsigned int packet_count = 0;
|
||||||
|
@ -1429,4 +1377,4 @@ int VideoStore::write_packets( zm_packetqueue &queue ) {
|
||||||
} // end while packets in the packetqueue
|
} // end while packets in the packetqueue
|
||||||
Debug(2, "Wrote %d queued packets", packet_count );
|
Debug(2, "Wrote %d queued packets", packet_count );
|
||||||
return packet_count;
|
return packet_count;
|
||||||
} // end int VideoStore::write_packets( PacketQueue &queue ) {
|
} // end int VideoStore::write_packets( PacketQueue &queue ) {
|
||||||
|
|
|
@ -19,6 +19,15 @@ class VideoStore;
|
||||||
class VideoStore {
|
class VideoStore {
|
||||||
private:
|
private:
|
||||||
|
|
||||||
|
struct CodecData {
|
||||||
|
const int codec_id;
|
||||||
|
const char *codec_codec;
|
||||||
|
const char *codec_name;
|
||||||
|
const enum AVPixelFormat pix_fmt;
|
||||||
|
|
||||||
|
};
|
||||||
|
static struct CodecData codec_data[];
|
||||||
|
|
||||||
Monitor *monitor;
|
Monitor *monitor;
|
||||||
AVOutputFormat *out_format;
|
AVOutputFormat *out_format;
|
||||||
AVFormatContext *oc;
|
AVFormatContext *oc;
|
||||||
|
|
|
@ -15,7 +15,8 @@ private $defaults = array(
|
||||||
'Height' => null,
|
'Height' => null,
|
||||||
'Orientation' => null,
|
'Orientation' => null,
|
||||||
'AnalysisFPSLimit' => null,
|
'AnalysisFPSLimit' => null,
|
||||||
'OutputCodec' => 'h264',
|
'OutputCodec' => '0',
|
||||||
|
'Encoder' => 'auto',
|
||||||
'OutputContainer' => 'auto',
|
'OutputContainer' => 'auto',
|
||||||
'ZoneCount' => 0,
|
'ZoneCount' => 0,
|
||||||
'Triggers' => null,
|
'Triggers' => null,
|
||||||
|
|
|
@ -470,13 +470,21 @@ $videowriteropts = array(
|
||||||
'X264 Encode' => 1,
|
'X264 Encode' => 1,
|
||||||
'H264 Camera Passthrough' => 2
|
'H264 Camera Passthrough' => 2
|
||||||
);
|
);
|
||||||
$videowriter_codecs = array(
|
$videowriter_encoders = array(
|
||||||
'' => translate('Disabled'),
|
'' => translate('Auto'),
|
||||||
|
'h264_omx' => 'h264_omx',
|
||||||
'h264' => 'h264',
|
'h264' => 'h264',
|
||||||
'mjpeg' => 'mjpeg',
|
'mjpeg' => 'mjpeg',
|
||||||
'mpeg1' => 'mpeg1',
|
'mpeg1' => 'mpeg1',
|
||||||
'mpeg2' => 'mpeg2',
|
'mpeg2' => 'mpeg2',
|
||||||
);
|
);
|
||||||
|
$videowriter_codecs = array(
|
||||||
|
'0' => translate('Disabled'),
|
||||||
|
'220' => 'h264',
|
||||||
|
'8' => 'mjpeg',
|
||||||
|
'1' => 'mpeg1',
|
||||||
|
'2' => 'mpeg2',
|
||||||
|
);
|
||||||
$videowriter_containers = array(
|
$videowriter_containers = array(
|
||||||
'' => translate('Auto'),
|
'' => translate('Auto'),
|
||||||
'mp4' => 'mp4',
|
'mp4' => 'mp4',
|
||||||
|
@ -609,6 +617,7 @@ if ( $tab != 'storage' ) {
|
||||||
<input type="hidden" name="newMonitor[SaveJPEGs]" value="<?php echo validHtmlStr($monitor->SaveJPEGs()) ?>"/>
|
<input type="hidden" name="newMonitor[SaveJPEGs]" value="<?php echo validHtmlStr($monitor->SaveJPEGs()) ?>"/>
|
||||||
<input type="hidden" name="newMonitor[VideoWriter]" value="<?php echo validHtmlStr($monitor->VideoWriter()) ?>"/>
|
<input type="hidden" name="newMonitor[VideoWriter]" value="<?php echo validHtmlStr($monitor->VideoWriter()) ?>"/>
|
||||||
<input type="hidden" name="newMonitor[OutputCodec]" value="<?php echo validHtmlStr($monitor->OutputCodec()) ?>"/>
|
<input type="hidden" name="newMonitor[OutputCodec]" value="<?php echo validHtmlStr($monitor->OutputCodec()) ?>"/>
|
||||||
|
<input type="hidden" name="newMonitor[Encoder]" value="<?php echo validHtmlStr($monitor->Encoder()) ?>"/>
|
||||||
<input type="hidden" name="newMonitor[OutputContainer]" value="<?php echo validHtmlStr($monitor->OutputContainer()) ?>"/>
|
<input type="hidden" name="newMonitor[OutputContainer]" value="<?php echo validHtmlStr($monitor->OutputContainer()) ?>"/>
|
||||||
<input type="hidden" name="newMonitor[EncoderParameters]" value="<?php echo validHtmlStr($monitor->EncoderParameters()) ?>"/>
|
<input type="hidden" name="newMonitor[EncoderParameters]" value="<?php echo validHtmlStr($monitor->EncoderParameters()) ?>"/>
|
||||||
<input type="hidden" name="newMonitor[RecordAudio]" value="<?php echo validHtmlStr($monitor->RecordAudio()) ?>"/>
|
<input type="hidden" name="newMonitor[RecordAudio]" value="<?php echo validHtmlStr($monitor->RecordAudio()) ?>"/>
|
||||||
|
@ -912,6 +921,7 @@ if ( $monitor->Type() == 'Local' ) {
|
||||||
<tr><td><?php echo translate('SaveJPEGs') ?></td><td><select name="newMonitor[SaveJPEGs]"><?php foreach ( $savejpegopts as $name => $value ) { ?><option value="<?php echo $value ?>"<?php if ( $value == $monitor->SaveJPEGs() ) { ?> selected="selected"<?php } ?>><?php echo $name ?></option><?php } ?></select></td></tr>
|
<tr><td><?php echo translate('SaveJPEGs') ?></td><td><select name="newMonitor[SaveJPEGs]"><?php foreach ( $savejpegopts as $name => $value ) { ?><option value="<?php echo $value ?>"<?php if ( $value == $monitor->SaveJPEGs() ) { ?> selected="selected"<?php } ?>><?php echo $name ?></option><?php } ?></select></td></tr>
|
||||||
<tr><td><?php echo translate('VideoWriter') ?></td><td><select name="newMonitor[VideoWriter]"><?php foreach ( $videowriteropts as $name => $value ) { ?><option value="<?php echo $value ?>"<?php if ( $value == $monitor->VideoWriter() ) { ?> selected="selected"<?php } ?>><?php echo $name ?></option><?php } ?></select></td></tr>
|
<tr><td><?php echo translate('VideoWriter') ?></td><td><select name="newMonitor[VideoWriter]"><?php foreach ( $videowriteropts as $name => $value ) { ?><option value="<?php echo $value ?>"<?php if ( $value == $monitor->VideoWriter() ) { ?> selected="selected"<?php } ?>><?php echo $name ?></option><?php } ?></select></td></tr>
|
||||||
<tr><td><?php echo translate('OutputCodec') ?></td><td><?php echo htmlSelect( 'newMonitor[OutputCodec]', $videowriter_codecs, $monitor->OutputCodec() );?></td></tr>
|
<tr><td><?php echo translate('OutputCodec') ?></td><td><?php echo htmlSelect( 'newMonitor[OutputCodec]', $videowriter_codecs, $monitor->OutputCodec() );?></td></tr>
|
||||||
|
<tr><td><?php echo translate('Encoder') ?></td><td><?php echo htmlSelect( 'newMonitor[Encoder]', $videowriter_encoders, $monitor->Encoder() );?></td></tr>
|
||||||
<tr><td><?php echo translate('OutputContainer') ?></td><td><?php echo htmlSelect( 'newMonitor[OutputContainer]', $videowriter_containers, $monitor->OutputContainer() );?></td></tr>
|
<tr><td><?php echo translate('OutputContainer') ?></td><td><?php echo htmlSelect( 'newMonitor[OutputContainer]', $videowriter_containers, $monitor->OutputContainer() );?></td></tr>
|
||||||
<tr><td><?php echo translate('OptionalEncoderParam') ?></td><td><textarea name="newMonitor[EncoderParameters]" rows="4" cols="36"><?php echo validHtmlStr($monitor->EncoderParameters()) ?></textarea></td></tr>
|
<tr><td><?php echo translate('OptionalEncoderParam') ?></td><td><textarea name="newMonitor[EncoderParameters]" rows="4" cols="36"><?php echo validHtmlStr($monitor->EncoderParameters()) ?></textarea></td></tr>
|
||||||
<tr><td><?php echo translate('RecordAudio') ?></td><td><input type="checkbox" name="newMonitor[RecordAudio]" value="1"<?php if ( $monitor->RecordAudio() ) { ?> checked="checked"<?php } ?>/></td></tr>
|
<tr><td><?php echo translate('RecordAudio') ?></td><td><input type="checkbox" name="newMonitor[RecordAudio]" value="1"<?php if ( $monitor->RecordAudio() ) { ?> checked="checked"<?php } ?>/></td></tr>
|
||||||
|
|
Loading…
Reference in New Issue