Merge branch 'master' of github.com:ZoneMinder/ZoneMinder

This commit is contained in:
Isaac Connor 2019-09-12 16:53:16 -04:00
commit 332fd80cf8
5 changed files with 183 additions and 279 deletions

View File

@ -289,9 +289,10 @@ static void zm_log_fps(double d, const char *postfix) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
void zm_dump_codecpar ( const AVCodecParameters *par ) {
Debug(1, "Dumping codecpar codec_type(%d) codec_id(%d) codec_tag(%d) width(%d) height(%d) bit_rate(%d) format(%d = %s)",
Debug(1, "Dumping codecpar codec_type(%d) codec_id(%d %s) codec_tag(%d) width(%d) height(%d) bit_rate(%d) format(%d = %s)",
par->codec_type,
par->codec_id,
avcodec_get_name(par->codec_id),
par->codec_tag,
par->width,
par->height,
@ -303,10 +304,11 @@ void zm_dump_codecpar ( const AVCodecParameters *par ) {
#endif
void zm_dump_codec(const AVCodecContext *codec) {
Debug(1, "Dumping codec_context codec_type(%d) codec_id(%d) width(%d) height(%d) timebase(%d/%d) format(%s)\n"
Debug(1, "Dumping codec_context codec_type(%d) codec_id(%d %s) width(%d) height(%d) timebase(%d/%d) format(%s) "
"gop_size %d max_b_frames %d me_cmp %d me_range %d qmin %d qmax %d",
codec->codec_type,
codec->codec_id,
avcodec_get_name(codec->codec_id),
codec->width,
codec->height,
codec->time_base.num,
@ -327,7 +329,6 @@ void zm_dump_codec(const AVCodecContext *codec) {
/* "user interface" functions */
void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) {
char buf[256];
Debug(1, "Dumping stream index i(%d) index(%d)", i, index );
int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
AVStream *st = ic->streams[i];
@ -350,8 +351,14 @@ void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
st->codec_info_nb_frames, codec->frame_size,
st->time_base.num, st->time_base.den
);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
Debug(1, "codec: %s", avcodec_get_name(st->codecpar->codec_id));
#else
char buf[256];
avcodec_string(buf, sizeof(buf), st->codec, is_output);
Debug(1, "codec: %s", buf);
#endif
if ( st->sample_aspect_ratio.num && // default
av_cmp_q(st->sample_aspect_ratio, codec->sample_aspect_ratio)
@ -477,6 +484,27 @@ bool is_audio_context( AVCodecContext *codec_context ) {
#endif
}
int zm_receive_packet(AVCodecContext *context, AVPacket &packet) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
int ret = avcodec_receive_packet(context, &packet);
if ( ret < 0 ) {
if ( AVERROR_EOF != ret ) {
Error("Error encoding (%d) (%s)", ret,
av_err2str(ret));
}
return 0;
}
return 1;
#else
int got_packet = 0;
int ret = avcodec_encode_audio2(context, &packet, NULL, &got_packet);
if ( ret < 0 ) {
Error("Error encoding (%d) (%s)", ret, av_err2str(ret));
}
return got_packet;
#endif
} // end int zm_receive_packet(AVCodecContext *context, AVPacket &packet)
int zm_receive_frame(AVCodecContext *context, AVFrame *frame, AVPacket &packet) {
int ret;
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
@ -600,3 +628,10 @@ void dumpPacket(AVPacket *pkt, const char *text) {
pkt->duration);
Debug(2, "%s:%d:%s: %s", __FILE__, __LINE__, text, b);
}
void zm_packet_copy_rescale_ts(const AVPacket *ipkt, AVPacket *opkt, const AVRational src_tb, const AVRational dst_tb) {
opkt->pts = ipkt->pts;
opkt->dts = ipkt->dts;
opkt->duration = ipkt->duration;
av_packet_rescale_ts(opkt, src_tb, dst_tb);
}

View File

@ -352,9 +352,12 @@ bool is_audio_stream(AVStream *);
bool is_video_context(AVCodec *);
bool is_audio_context(AVCodec *);
int zm_receive_packet(AVCodecContext *context, AVPacket &packet);
int zm_receive_frame(AVCodecContext *context, AVFrame *frame, AVPacket &packet);
int zm_send_frame(AVCodecContext *context, AVFrame *frame, AVPacket &packet);
void dumpPacket(AVStream *, AVPacket *,const char *text="");
void dumpPacket(AVPacket *,const char *text="");
void zm_packet_copy_rescale_ts(const AVPacket *ipkt, AVPacket *opkt, const AVRational src_tb, const AVRational dst_tb);
#endif // ZM_FFMPEG_H

View File

@ -193,18 +193,6 @@ VideoStore::VideoStore(
);
video_out_stream->r_frame_rate = video_in_stream->r_frame_rate;
}
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
ret = avcodec_parameters_from_context(video_out_stream->codecpar, video_out_ctx);
if ( ret < 0 ) {
Error("Could not initialize video_out_ctx parameters");
return;
} else {
zm_dump_codec(video_out_ctx);
}
zm_dump_codecpar(video_in_stream->codecpar);
zm_dump_codecpar(video_out_stream->codecpar);
#endif
Debug(3,
"Time bases: VIDEO in stream (%d/%d) in codec: (%d/%d) out "
"stream: (%d/%d) out codec (%d/%d)",
@ -222,6 +210,10 @@ VideoStore::VideoStore(
}
#if LIBAVCODEC_VERSION_CHECK(56, 35, 0, 64, 0)
/* I'm not entirely sure that this is a good idea. We may have to do it someday but really only when transcoding
* * think what I was trying to achieve here was to have zm_dump_codecpar output nice info
* */
#if 0
AVDictionary *opts = 0;
if ( (ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0 ) {
Warning("Can't open video codec (%s) %s",
@ -235,6 +227,24 @@ VideoStore::VideoStore(
while ( (e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL ) {
Warning("Encoder Option %s not recognized by ffmpeg codec", e->key);
}
ret = avcodec_parameters_from_context(video_out_stream->codecpar, video_out_ctx);
if ( ret < 0 ) {
Error("Could not initialize video_out_ctx parameters");
return;
} else {
zm_dump_codec(video_out_ctx);
}
#else
ret = avcodec_parameters_from_context(video_out_stream->codecpar, video_in_ctx);
if ( ret < 0 ) {
Error("Could not initialize video_out_ctx parameters");
return;
} else {
zm_dump_codec(video_out_ctx);
}
#endif
zm_dump_codecpar(video_in_stream->codecpar);
zm_dump_codecpar(video_out_stream->codecpar);
#endif
Monitor::Orientation orientation = monitor->getOrientation();
@ -274,6 +284,7 @@ VideoStore::VideoStore(
audio_first_pts = 0;
audio_first_dts = 0;
/* When encoding audio, these are used to tell us what the correct pts is, because it gets lost during resampling. */
audio_next_pts = 0;
audio_next_dts = 0;
@ -419,9 +430,9 @@ bool VideoStore::open() {
VideoStore::~VideoStore() {
if ( oc->pb ) {
int ret;
if ( audio_out_codec ) {
// The codec queues data. We need to send a flush command and out
// whatever we get. Failures are not fatal.
AVPacket pkt;
@ -429,94 +440,48 @@ VideoStore::~VideoStore() {
pkt.data = NULL;
pkt.size = 0;
av_init_packet(&pkt);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// Put encoder into flushing mode
avcodec_send_frame(audio_out_ctx, NULL);
#endif
while (1) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
ret = avcodec_receive_packet(audio_out_ctx, &pkt);
if ( ret < 0 ) {
if ( AVERROR_EOF != ret ) {
Error("Error encoding audio while flushing (%d) (%s)", ret,
av_err2str(ret));
}
break;
}
#else
int got_packet = 0;
ret = avcodec_encode_audio2(audio_out_ctx, &pkt, NULL, &got_packet);
if ( ret < 0 ) {
Error("Error encoding audio while flushing (%d) (%s)", ret,
av_err2str(ret));
break;
}
Debug(1, "Have audio encoder, need to flush it's out");
if ( !got_packet ) {
break;
}
#endif
/*
* At the end of the file, we pass the remaining samples to
* the encoder. */
int frame_size = audio_out_ctx->frame_size;
while ( av_audio_fifo_size(fifo) > 0 ) {
/* Take one frame worth of audio samples from the FIFO buffer,
* encode it and write it to the output file. */
dumpPacket(&pkt, "raw from encoder");
// Need to adjust pts and dts and duration
Debug(1, "Remaining samples in fifo for AAC codec frame_size %d > fifo size %d",
frame_size, av_audio_fifo_size(fifo));
// SHould probably set the frame size to what is reported FIXME
if ( av_audio_fifo_read(fifo, (void **)out_frame->data, frame_size) ) {
av_init_packet(&opkt);
if ( zm_send_frame(audio_out_ctx, out_frame, opkt) ) {
if ( zm_receive_packet(audio_out_ctx, pkt) )
pkt.stream_index = audio_out_stream->index;
pkt.duration = av_rescale_q(
pkt.duration,
audio_out_ctx->time_base,
audio_out_stream->time_base);
// Scale the PTS of the outgoing packet to be the correct time base
if ( pkt.pts != AV_NOPTS_VALUE ) {
#if 0
pkt.pts = av_rescale_q(
pkt.pts,
audio_out_ctx->time_base,
audio_in_stream->time_base);
// audio_first_pts is in audio_in_stream time base
pkt.pts -= audio_first_pts;
pkt.pts = av_rescale_q(
pkt.pts,
audio_in_stream->time_base,
audio_out_stream->time_base);
#else
pkt.pts = av_rescale_q(
pkt.pts,
audio_out_ctx->time_base,
audio_out_stream->time_base);
}
} // end if data returned from fifo
} // end while still data in the fifo
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// Put encoder into flushing mode
avcodec_send_frame(audio_out_ctx, NULL);
#endif
Debug(2, "audio pkt.pts = %" PRId64 " from first_pts(%" PRId64 ")",
pkt.pts, audio_first_pts);
} else {
Debug(2, "pkt.pts = undef");
pkt.pts = AV_NOPTS_VALUE;
}
while (1) {
if ( ! zm_receive_packet(audio_out_ctx, pkt) )
break;
if ( pkt.dts != AV_NOPTS_VALUE ) {
#if 0
pkt.dts = av_rescale_q(
pkt.dts,
audio_out_ctx->time_base,
audio_in_stream->time_base);
pkt.dts -= audio_first_dts;
pkt.dts = av_rescale_q(
pkt.dts,
audio_in_stream->time_base,
audio_out_stream->time_base);
#else
pkt.dts = av_rescale_q(
pkt.dts,
audio_out_ctx->time_base,
audio_out_stream->time_base);
#endif
Debug(2, "pkt.dts = %" PRId64 " - first_dts(%" PRId64 ")",
pkt.dts, audio_first_dts);
} else {
pkt.dts = AV_NOPTS_VALUE;
}
dumpPacket(&pkt, "raw from encoder");
pkt.stream_index = audio_out_stream->index;
av_packet_rescale_ts(&pkt, audio_out_ctx->time_base, audio_out_stream->time_base);
dumpPacket(audio_out_stream, &pkt, "writing flushed packet");
av_interleaved_write_frame(oc, &pkt);
zm_av_packet_unref(&pkt);
@ -711,7 +676,7 @@ bool VideoStore::setup_resampler() {
}
}
if ( found ) {
Debug(3, "Sample rate is good");
Debug(3, "Sample rate is good %d", audio_out_ctx->sample_rate);
} else {
audio_out_ctx->sample_rate =
audio_out_codec->supported_samplerates[0];
@ -743,6 +708,7 @@ bool VideoStore::setup_resampler() {
audio_out_stream = NULL;
return false;
}
zm_dump_codec(audio_out_ctx);
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
ret = avcodec_parameters_from_context(
@ -752,6 +718,7 @@ bool VideoStore::setup_resampler() {
return false;
}
#endif
zm_dump_codecpar(audio_out_stream->codecpar);
Debug(3,
"Time bases: AUDIO in stream (%d/%d) in codec: (%d/%d) out "
@ -923,6 +890,7 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) {
);
if ( duration <= 0 ) {
// Why are we setting the duration to 1?
Warning("Duration is 0, setting to 1");
duration = ipkt->duration ? ipkt->duration : av_rescale_q(1,video_in_stream->time_base, video_out_stream->time_base);
}
}
@ -936,17 +904,6 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) {
opkt.pts = 0;
Debug(2, "Starting video first_pts will become %" PRId64, ipkt->pts);
video_first_pts = ipkt->pts;
#if 1
if ( audio_in_stream ) {
// Since audio starts after the start of the video, need to set this here.
audio_first_pts = av_rescale_q(
ipkt->pts,
video_in_stream->time_base,
audio_in_stream->time_base
);
Debug(2, "Starting audio first_pts will become %" PRId64, audio_first_pts);
}
#endif
} else {
opkt.pts = av_rescale_q(
ipkt->pts - video_first_pts,
@ -960,8 +917,6 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) {
} else {
Debug(3, "opkt.pts = undef");
opkt.pts = AV_NOPTS_VALUE;
// can't set 0, it will get rejected
//AV_NOPTS_VALUE;
}
// Just because the in stream wraps, doesn't mean the out needs to.
// Really, if we are limiting ourselves to 10min segments I can't imagine every wrapping in the out.
@ -974,17 +929,6 @@ int VideoStore::writeVideoFramePacket(AVPacket *ipkt) {
opkt.dts = 0;
Debug(2, "Starting video first_dts will become (%" PRId64 ")", ipkt->dts);
video_first_dts = ipkt->dts;
#if 1
if ( audio_in_stream ) {
// Since audio starts after the start of the video, need to set this here.
audio_first_dts = av_rescale_q(
ipkt->dts,
video_in_stream->time_base,
audio_in_stream->time_base
);
Debug(2, "Starting audio first dts will become %" PRId64, audio_first_dts);
}
#endif
} else {
opkt.dts = av_rescale_q(
ipkt->dts - video_first_dts,
@ -1035,6 +979,40 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) {
}
dumpPacket(audio_in_stream, ipkt, "input packet");
if ( !audio_first_pts ) {
// Since audio starts after the start of the video, need to set this here.
audio_first_pts = av_rescale_q(
video_first_pts,
video_in_stream->time_base,
audio_in_stream->time_base
);
Debug(2, "Starting audio first_pts will become %" PRId64, audio_first_pts);
audio_next_pts = ipkt->pts - audio_first_pts;
if ( audio_next_pts < 0 ) {
audio_first_pts -= audio_next_pts;
audio_next_pts = 0;
}
}
if ( !audio_first_dts ) {
// Since audio starts after the start of the video, need to set this here.
audio_first_dts = av_rescale_q(
video_first_dts,
video_in_stream->time_base,
audio_in_stream->time_base
);
audio_next_dts = ipkt->dts - audio_first_dts;
if ( ipkt->dts < 0 ) {
audio_first_dts -= ipkt->dts;
ipkt->dts = 0;
}
Debug(2, "Starting audio first_dts will become %" PRId64, audio_first_dts);
}
// Need to adjust pts before feeding to decoder.... should really copy the pkt instead of modifying it
ipkt->pts -= audio_first_pts;
ipkt->dts -= audio_first_dts;
dumpPacket(audio_in_stream, ipkt, "after pts adjustment");
if ( audio_out_codec ) {
if ( ( ret = zm_receive_frame(audio_in_ctx, in_frame, *ipkt) ) < 0 ) {
Debug(3, "Not ready to receive frame");
@ -1042,112 +1020,22 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) {
}
zm_dump_frame(in_frame, "In frame from decode");
if ( in_frame->pts != AV_NOPTS_VALUE ) {
if ( !audio_first_pts ) {
audio_first_pts = in_frame->pts;
Debug(1, "No audio_first_pts setting to %" PRId64, audio_first_pts);
in_frame->pts = 0;
} else {
// out_frame_pts is in codec->timebase, audio_first_pts is in packet timebase.
in_frame->pts = in_frame->pts - audio_first_pts;
zm_dump_frame(in_frame, "in frame after pts adjustment");
}
} else {
// sending AV_NOPTS_VALUE doesn't really work but we seem to get it in ffmpeg 2.8
in_frame->pts = audio_next_pts;
}
if ( !resample_audio() ) {
//av_frame_unref(in_frame);
return 0;
}
zm_dump_frame(out_frame, "Out frame after resample");
#if 0
// out_frame pts is in the input pkt pts... needs to be adjusted before sending to the encoder
if ( out_frame->pts != AV_NOPTS_VALUE ) {
if ( !audio_first_pts ) {
audio_first_pts = out_frame->pts;
Debug(1, "No audio_first_pts setting to %" PRId64, audio_first_pts);
out_frame->pts = 0;
} else {
// out_frame_pts is in codec->timebase, audio_first_pts is in packet timebase.
out_frame->pts = out_frame->pts - audio_first_pts;
zm_dump_frame(out_frame, "Out frame after pts adjustment");
}
//
} else {
// sending AV_NOPTS_VALUE doesn't really work but we seem to get it in ffmpeg 2.8
out_frame->pts = audio_next_pts;
}
audio_next_pts = out_frame->pts + out_frame->nb_samples;
#endif
av_init_packet(&opkt);
if ( !zm_send_frame(audio_out_ctx, out_frame, opkt) ) {
return 0;
}
dumpPacket(audio_out_stream, &opkt, "raw opkt");
Debug(1, "Duration before %d in %d/%d", opkt.duration,
audio_out_ctx->time_base.num,
audio_out_ctx->time_base.den);
opkt.duration = av_rescale_q(
opkt.duration,
audio_out_ctx->time_base,
audio_out_stream->time_base);
Debug(1, "Duration after %d in %d/%d", opkt.duration,
audio_out_stream->time_base.num,
audio_out_stream->time_base.den);
// Scale the PTS of the outgoing packet to be the correct time base
#if 0
if ( ipkt->pts != AV_NOPTS_VALUE ) {
if ( !audio_first_pts ) {
opkt.pts = 0;
audio_first_pts = ipkt->pts;
Debug(1, "No audio_first_pts");
} else {
opkt.pts = av_rescale_q(
opkt.pts,
av_packet_rescale_ts(&opkt,
audio_out_ctx->time_base,
audio_out_stream->time_base);
opkt.pts -= audio_first_pts;
Debug(2, "audio opkt.pts = %" PRId64 " from first_pts %" PRId64,
opkt.pts, audio_first_pts);
}
} else {
Debug(2, "opkt.pts = undef");
opkt.pts = AV_NOPTS_VALUE;
}
if ( opkt.dts != AV_NOPTS_VALUE ) {
if ( !audio_first_dts ) {
opkt.dts = 0;
audio_first_dts = opkt.dts;
} else {
opkt.dts = av_rescale_q(
opkt.dts,
audio_out_ctx->time_base,
audio_out_stream->time_base);
opkt.dts -= audio_first_dts;
Debug(2, "audio opkt.dts = %" PRId64 " from first_dts %" PRId64,
opkt.dts, audio_first_dts);
}
audio_last_dts = opkt.dts;
} else {
opkt.dts = AV_NOPTS_VALUE;
}
#else
opkt.pts = av_rescale_q(
opkt.pts,
audio_out_ctx->time_base,
audio_out_stream->time_base);
opkt.dts = av_rescale_q(
opkt.dts,
audio_out_ctx->time_base,
audio_out_stream->time_base);
#endif
write_packet(&opkt, audio_out_stream);
zm_av_packet_unref(&opkt);
@ -1155,27 +1043,8 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) {
#if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0)
// While the encoder still has packets for us
while ( !avcodec_receive_packet(audio_out_ctx, &opkt) ) {
opkt.pts = av_rescale_q(
opkt.pts,
audio_out_ctx->time_base,
audio_out_stream->time_base);
opkt.dts = av_rescale_q(
opkt.dts,
audio_out_ctx->time_base,
audio_out_stream->time_base);
av_packet_rescale_ts(&opkt, audio_out_ctx->time_base, audio_out_stream->time_base);
dumpPacket(audio_out_stream, &opkt, "raw opkt");
Debug(1, "Duration before %d in %d/%d", opkt.duration,
audio_out_ctx->time_base.num,
audio_out_ctx->time_base.den);
opkt.duration = av_rescale_q(
opkt.duration,
audio_out_ctx->time_base,
audio_out_stream->time_base);
Debug(1, "Duration after %d in %d/%d", opkt.duration,
audio_out_stream->time_base.num,
audio_out_stream->time_base.den);
write_packet(&opkt, audio_out_stream);
}
#endif
@ -1188,6 +1057,7 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) {
opkt.size = ipkt->size;
opkt.flags = ipkt->flags;
#if 0
if ( ipkt->duration && (ipkt->duration != AV_NOPTS_VALUE) ) {
opkt.duration = av_rescale_q(
ipkt->duration,
@ -1229,6 +1099,12 @@ int VideoStore::writeAudioFramePacket(AVPacket *ipkt) {
} else {
opkt.dts = AV_NOPTS_VALUE;
}
#else
opkt.duration = ipkt->duration;
opkt.pts = ipkt->pts;
opkt.dts = ipkt->dts;
av_packet_rescale_ts(&opkt, audio_in_stream->time_base, audio_out_stream->time_base);
#endif
write_packet(&opkt, audio_out_stream);
zm_av_packet_unref(&opkt);
@ -1318,12 +1194,17 @@ int VideoStore::resample_audio() {
zm_dump_frame(out_frame, "Out frame after fifo read");
// resampling changes the duration because the timebase is 1/samples
// I think we should be dealing in codec timebases not stream
#if 0
if ( in_frame->pts != AV_NOPTS_VALUE ) {
out_frame->pts = av_rescale_q(
in_frame->pts,
audio_in_ctx->time_base,
audio_out_ctx->time_base);
}
#else
out_frame->pts = audio_next_pts;
audio_next_pts += out_frame->nb_samples;
#endif
zm_dump_frame(out_frame, "Out frame after timestamp conversion");
#else
#if defined(HAVE_LIBAVRESAMPLE)

View File

@ -24,12 +24,12 @@ function gotoStep2( element ) {
function configureButtons(element) {
var form = element.form;
if (form.elements.namedItem("nextBtn")) {
if (form.elements.namedItem('nextBtn')) {
form.nextBtn.disabled = (form.probe.selectedIndex==0) ||
(form.username == "") || (form.username == null) ||
(form.password == "") || (form.password == null);
(form.Username == '') || (form.Username == null) ||
(form.Password == '') || (form.Password == null);
}
if (form.elements.namedItem("saveBtn")) {
if (form.elements.namedItem('saveBtn')) {
form.saveBtn.disabled = (form.probe.selectedIndex==0);
}
}

View File

@ -41,7 +41,7 @@ function execONVIF( $cmd ) {
Please the following command from a command line for more information:<br/><br/>$shell_command"
);
} else {
ZM\Logger::Debug( "Results from probe: " . implode( '<br/>', $output ) );
ZM\Logger::Debug('Results from probe: '.implode('<br/>', $output));
}
return $output;
@ -141,20 +141,6 @@ if ( !isset($_REQUEST['step']) || ($_REQUEST['step'] == '1') ) {
$ip = $matches[1];
}
$host = $ip;
/*
if ( isset($monitors[$ip]) )
{
$monitor = $monitors[$ip];
$sourceString .= " (".$monitor['Name'].")";
}
else
{
$sourceString .= " - ".translate('Available');
}
$cameras[$sourceDesc] = $sourceString;
}
*/
// $sourceDesc = htmlspecialchars(serialize($camera['monitor']));
$sourceDesc = base64_encode(json_encode($camera['monitor']));
$sourceString = $camera['model'].' @ '.$host.' using version '.$camera['monitor']['SOAP'];
$cameras[$sourceDesc] = $sourceString;
@ -179,28 +165,27 @@ if ( !isset($_REQUEST['step']) || ($_REQUEST['step'] == '1') ) {
</p>
<p>
<label for="probe"><?php echo translate('DetectedCameras') ?></label>
<?php echo htmlSelect('probe', $cameras, null, array('onchange'=>'configureButtons(this)')); ?>
<?php echo htmlSelect('probe', $cameras, null, array('data-on-change-this'=>'configureButtons')); ?>
</p>
<p>
<?php echo translate('OnvifCredentialsIntro') ?>
</p>
<p>
<label for="username"><?php echo translate('Username') ?></label>
<input type="text" name="username" value="" onChange="configureButtons(this)"/>
<label for="Username"><?php echo translate('Username') ?></label>
<input type="text" name="Username" data-on-change-this="configureButtons"/>
</p>
<p>
<label for="password"><?php echo translate('Password') ?></label>
<input type="password" name="password" value="" onChange="configureButtons(this)"/>
<label for="Password"><?php echo translate('Password') ?></label>
<input type="password" name="Password" data-on-change-this="configureButtons"/>
</p>
<div id="contentButtons">
<input type="button" value="<?php echo translate('Cancel') ?>" data-on-click="closeWindow"/>
<input type="submit" name="nextBtn" value="<?php echo translate('Next') ?>" data-on-click-this="gotoStep2" disabled="disabled"/>
<button type="button" data-on-click="closeWindow"><?php echo translate('Cancel') ?></button>
<button type="button" name="nextBtn" data-on-click-this="gotoStep2" disabled="disabled"><?php echo translate('Next') ?></button>
</div>
</form>
</div>
</div>
</body>
</html>
<?php
//==== STEP 2 ============================================================
@ -221,7 +206,7 @@ if ( !isset($_REQUEST['step']) || ($_REQUEST['step'] == '1') ) {
//print $monitor['Host'].", ".$_REQUEST['username'].", ".$_REQUEST['password']."<br/>";
$detprofiles = probeProfiles($monitor['Host'], $monitor['SOAP'], $_REQUEST['username'], $_REQUEST['password']);
$detprofiles = probeProfiles($monitor['Host'], $monitor['SOAP'], $_REQUEST['Username'], $_REQUEST['Password']);
foreach ( $detprofiles as $profile ) {
$monitor = $camera['monitor'];
@ -258,18 +243,18 @@ if ( !isset($_REQUEST['step']) || ($_REQUEST['step'] == '1') ) {
</p>
<p>
<label for="probe"><?php echo translate('DetectedProfiles') ?></label>
<?php echo htmlSelect('probe', $profiles, null, array('onchange'=>'configureButtons(this)')); ?>
<?php echo htmlSelect('probe', $profiles, null, array('data-on-change-this'=>'configureButtons')); ?>
</p>
<div id="contentButtons">
<input type="button" name="prevBtn" value="<?php echo translate('Prev') ?>" data-on-click-this="gotoStep1"/>
<input type="button" value="<?php echo translate('Cancel') ?>" data-on-click="closeWindow"/>
<input type="submit" name="saveBtn" value="<?php echo translate('Save') ?>" data-on-click-this="submitCamera" disabled="disabled"/>
<button type="button" name="prevBtn" data-on-click-this="gotoStep1"><?php echo translate('Prev') ?></button>
<button type="button" data-on-click="closeWindow"><?php echo translate('Cancel') ?></button>
<button type="button" name="saveBtn" data-on-click-this="submitCamera" disabled="disabled"><?php echo translate('Save') ?></button>
</div>
</form>
</div>
</div>
</body>
</html>
<?php
} // end if step 1 or 2
?>
</html>