whitespacing updates

This commit is contained in:
Isaac Connor 2016-06-21 12:21:18 -04:00
parent 31a5252afa
commit 6a8db582ff
20 changed files with 3841 additions and 3838 deletions

View File

@ -32,19 +32,19 @@ Camera::Camera( int p_id, SourceType p_type, int p_width, int p_height, int p_co
colour( p_colour ), colour( p_colour ),
contrast( p_contrast ), contrast( p_contrast ),
capture( p_capture ), capture( p_capture ),
record_audio( p_record_audio ) record_audio( p_record_audio )
{ {
pixels = width * height; pixels = width * height;
imagesize = pixels * colours; imagesize = pixels * colours;
Debug(2,"New camera id: %d width: %d height: %d colours: %d subpixelorder: %d capture: %d",id,width,height,colours,subpixelorder,capture); Debug(2,"New camera id: %d width: %d height: %d colours: %d subpixelorder: %d capture: %d",id,width,height,colours,subpixelorder,capture);
/* Because many loops are unrolled and work on 16 colours/time or 4 pixels/time, we have to meet requirements */ /* Because many loops are unrolled and work on 16 colours/time or 4 pixels/time, we have to meet requirements */
if((colours == ZM_COLOUR_GRAY8 || colours == ZM_COLOUR_RGB32) && (imagesize % 16) != 0) { if((colours == ZM_COLOUR_GRAY8 || colours == ZM_COLOUR_RGB32) && (imagesize % 16) != 0) {
Fatal("Image size is not multiples of 16"); Fatal("Image size is not multiples of 16");
} else if(colours == ZM_COLOUR_RGB24 && ((imagesize % 16) != 0 || (imagesize % 12) != 0)) { } else if(colours == ZM_COLOUR_RGB24 && ((imagesize % 16) != 0 || (imagesize % 12) != 0)) {
Fatal("Image size is not multiples of 12 and 16"); Fatal("Image size is not multiples of 12 and 16");
} }
} }
Camera::~Camera() Camera::~Camera()

View File

@ -35,83 +35,83 @@ cURLCamera::cURLCamera( int p_id, const std::string &p_path, const std::string &
mPath( p_path ), mUser( p_user ), mPass ( p_pass ), bTerminate( false ), bReset( false ), mode ( MODE_UNSET ) mPath( p_path ), mUser( p_user ), mPass ( p_pass ), bTerminate( false ), bReset( false ), mode ( MODE_UNSET )
{ {
if ( capture ) if ( capture )
{ {
Initialise(); Initialise();
} }
} }
cURLCamera::~cURLCamera() cURLCamera::~cURLCamera()
{ {
if ( capture ) if ( capture )
{ {
Terminate(); Terminate();
} }
} }
void cURLCamera::Initialise() void cURLCamera::Initialise()
{ {
content_length_match_len = strlen(content_length_match); content_length_match_len = strlen(content_length_match);
content_type_match_len = strlen(content_type_match); content_type_match_len = strlen(content_type_match);
databuffer.expand(CURL_BUFFER_INITIAL_SIZE); databuffer.expand(CURL_BUFFER_INITIAL_SIZE);
/* cURL initialization */ /* cURL initialization */
cRet = curl_global_init(CURL_GLOBAL_ALL); cRet = curl_global_init(CURL_GLOBAL_ALL);
if(cRet != CURLE_OK) { if(cRet != CURLE_OK) {
Fatal("libcurl initialization failed: ", curl_easy_strerror(cRet)); Fatal("libcurl initialization failed: ", curl_easy_strerror(cRet));
} }
Debug(2,"libcurl version: %s",curl_version()); Debug(2,"libcurl version: %s",curl_version());
/* Create the shared data mutex */ /* Create the shared data mutex */
nRet = pthread_mutex_init(&shareddata_mutex, NULL); nRet = pthread_mutex_init(&shareddata_mutex, NULL);
if(nRet != 0) { if(nRet != 0) {
Fatal("Shared data mutex creation failed: %s",strerror(nRet)); Fatal("Shared data mutex creation failed: %s",strerror(nRet));
} }
/* Create the data available condition variable */ /* Create the data available condition variable */
nRet = pthread_cond_init(&data_available_cond, NULL); nRet = pthread_cond_init(&data_available_cond, NULL);
if(nRet != 0) { if(nRet != 0) {
Fatal("Data available condition variable creation failed: %s",strerror(nRet)); Fatal("Data available condition variable creation failed: %s",strerror(nRet));
} }
/* Create the request complete condition variable */ /* Create the request complete condition variable */
nRet = pthread_cond_init(&request_complete_cond, NULL); nRet = pthread_cond_init(&request_complete_cond, NULL);
if(nRet != 0) { if(nRet != 0) {
Fatal("Request complete condition variable creation failed: %s",strerror(nRet)); Fatal("Request complete condition variable creation failed: %s",strerror(nRet));
} }
/* Create the thread */ /* Create the thread */
nRet = pthread_create(&thread, NULL, thread_func_dispatcher, this); nRet = pthread_create(&thread, NULL, thread_func_dispatcher, this);
if(nRet != 0) { if(nRet != 0) {
Fatal("Thread creation failed: %s",strerror(nRet)); Fatal("Thread creation failed: %s",strerror(nRet));
} }
} }
void cURLCamera::Terminate() void cURLCamera::Terminate()
{ {
/* Signal the thread to terminate */ /* Signal the thread to terminate */
bTerminate = true; bTerminate = true;
/* Wait for thread termination */ /* Wait for thread termination */
pthread_join(thread, NULL); pthread_join(thread, NULL);
/* Destroy condition variables */ /* Destroy condition variables */
pthread_cond_destroy(&request_complete_cond); pthread_cond_destroy(&request_complete_cond);
pthread_cond_destroy(&data_available_cond); pthread_cond_destroy(&data_available_cond);
/* Destroy mutex */ /* Destroy mutex */
pthread_mutex_destroy(&shareddata_mutex); pthread_mutex_destroy(&shareddata_mutex);
/* cURL cleanup */ /* cURL cleanup */
curl_global_cleanup(); curl_global_cleanup();
} }
int cURLCamera::PrimeCapture() int cURLCamera::PrimeCapture()
{ {
//Info( "Priming capture from %s", mPath.c_str() ); //Info( "Priming capture from %s", mPath.c_str() );
return 0; return 0;
} }
int cURLCamera::PreCapture() int cURLCamera::PreCapture()
@ -122,187 +122,187 @@ int cURLCamera::PreCapture()
int cURLCamera::Capture( Image &image ) int cURLCamera::Capture( Image &image )
{ {
bool frameComplete = false; bool frameComplete = false;
/* MODE_STREAM specific variables */ /* MODE_STREAM specific variables */
bool SubHeadersParsingComplete = false; bool SubHeadersParsingComplete = false;
unsigned int frame_content_length = 0; unsigned int frame_content_length = 0;
std::string frame_content_type; std::string frame_content_type;
bool need_more_data = false; bool need_more_data = false;
/* Grab the mutex to ensure exclusive access to the shared data */ /* Grab the mutex to ensure exclusive access to the shared data */
lock(); lock();
while (!frameComplete) { while (!frameComplete) {
/* If the work thread did a reset, reset our local variables */ /* If the work thread did a reset, reset our local variables */
if(bReset) { if(bReset) {
SubHeadersParsingComplete = false; SubHeadersParsingComplete = false;
frame_content_length = 0; frame_content_length = 0;
frame_content_type.clear(); frame_content_type.clear();
need_more_data = false; need_more_data = false;
bReset = false; bReset = false;
} }
if(mode == MODE_UNSET) { if(mode == MODE_UNSET) {
/* Don't have a mode yet. Sleep while waiting for data */ /* Don't have a mode yet. Sleep while waiting for data */
nRet = pthread_cond_wait(&data_available_cond,&shareddata_mutex); nRet = pthread_cond_wait(&data_available_cond,&shareddata_mutex);
if(nRet != 0) { if(nRet != 0) {
Error("Failed waiting for available data condition variable: %s",strerror(nRet)); Error("Failed waiting for available data condition variable: %s",strerror(nRet));
return -20; return -20;
} }
} }
if(mode == MODE_STREAM) { if(mode == MODE_STREAM) {
/* Subheader parsing */ /* Subheader parsing */
while(!SubHeadersParsingComplete && !need_more_data) { while(!SubHeadersParsingComplete && !need_more_data) {
size_t crlf_start, crlf_end, crlf_size; size_t crlf_start, crlf_end, crlf_size;
std::string subheader; std::string subheader;
/* Check if the buffer contains something */ /* Check if the buffer contains something */
if(databuffer.empty()) { if(databuffer.empty()) {
/* Empty buffer, wait for data */ /* Empty buffer, wait for data */
need_more_data = true; need_more_data = true;
break; break;
} }
/* Find crlf start */ /* Find crlf start */
crlf_start = memcspn(databuffer,"\r\n",databuffer.size()); crlf_start = memcspn(databuffer,"\r\n",databuffer.size());
if(crlf_start == databuffer.size()) { if(crlf_start == databuffer.size()) {
/* Not found, wait for more data */ /* Not found, wait for more data */
need_more_data = true; need_more_data = true;
break; break;
} }
/* See if we have enough data for determining crlf length */ /* See if we have enough data for determining crlf length */
if(databuffer.size() < crlf_start+5) { if(databuffer.size() < crlf_start+5) {
/* Need more data */ /* Need more data */
need_more_data = true; need_more_data = true;
break; break;
} }
/* Find crlf end and calculate crlf size */ /* Find crlf end and calculate crlf size */
crlf_end = memspn(((const char*)databuffer.head())+crlf_start,"\r\n",5); crlf_end = memspn(((const char*)databuffer.head())+crlf_start,"\r\n",5);
crlf_size = (crlf_start + crlf_end) - crlf_start; crlf_size = (crlf_start + crlf_end) - crlf_start;
/* Is this the end of a previous stream? (This is just before the boundary) */ /* Is this the end of a previous stream? (This is just before the boundary) */
if(crlf_start == 0) { if(crlf_start == 0) {
databuffer.consume(crlf_size); databuffer.consume(crlf_size);
continue; continue;
} }
/* Check for invalid CRLF size */ /* Check for invalid CRLF size */
if(crlf_size > 4) { if(crlf_size > 4) {
Error("Invalid CRLF length"); Error("Invalid CRLF length");
} }
/* Check if the crlf is \n\n or \r\n\r\n (marks end of headers, this is the last header) */ /* Check if the crlf is \n\n or \r\n\r\n (marks end of headers, this is the last header) */
if( (crlf_size == 2 && memcmp(((const char*)databuffer.head())+crlf_start,"\n\n",2) == 0) || (crlf_size == 4 && memcmp(((const char*)databuffer.head())+crlf_start,"\r\n\r\n",4) == 0) ) { if( (crlf_size == 2 && memcmp(((const char*)databuffer.head())+crlf_start,"\n\n",2) == 0) || (crlf_size == 4 && memcmp(((const char*)databuffer.head())+crlf_start,"\r\n\r\n",4) == 0) ) {
/* This is the last header */ /* This is the last header */
SubHeadersParsingComplete = true; SubHeadersParsingComplete = true;
} }
/* Copy the subheader, excluding the crlf */ /* Copy the subheader, excluding the crlf */
subheader.assign(databuffer, crlf_start); subheader.assign(databuffer, crlf_start);
/* Advance the buffer past this one */ /* Advance the buffer past this one */
databuffer.consume(crlf_start+crlf_size); databuffer.consume(crlf_start+crlf_size);
Debug(7,"Got subheader: %s",subheader.c_str()); Debug(7,"Got subheader: %s",subheader.c_str());
/* Find where the data in this header starts */ /* Find where the data in this header starts */
size_t subheader_data_start = subheader.rfind(' '); size_t subheader_data_start = subheader.rfind(' ');
if(subheader_data_start == std::string::npos) { if(subheader_data_start == std::string::npos) {
subheader_data_start = subheader.find(':'); subheader_data_start = subheader.find(':');
} }
/* Extract the data into a string */ /* Extract the data into a string */
std::string subheader_data = subheader.substr(subheader_data_start+1, std::string::npos); std::string subheader_data = subheader.substr(subheader_data_start+1, std::string::npos);
Debug(8,"Got subheader data: %s",subheader_data.c_str()); Debug(8,"Got subheader data: %s",subheader_data.c_str());
/* Check the header */ /* Check the header */
if(strncasecmp(subheader.c_str(),content_length_match,content_length_match_len) == 0) { if(strncasecmp(subheader.c_str(),content_length_match,content_length_match_len) == 0) {
/* Found the content-length header */ /* Found the content-length header */
frame_content_length = atoi(subheader_data.c_str()); frame_content_length = atoi(subheader_data.c_str());
Debug(6,"Got content-length subheader: %d",frame_content_length); Debug(6,"Got content-length subheader: %d",frame_content_length);
} else if(strncasecmp(subheader.c_str(),content_type_match,content_type_match_len) == 0) { } else if(strncasecmp(subheader.c_str(),content_type_match,content_type_match_len) == 0) {
/* Found the content-type header */ /* Found the content-type header */
frame_content_type = subheader_data; frame_content_type = subheader_data;
Debug(6,"Got content-type subheader: %s",frame_content_type.c_str()); Debug(6,"Got content-type subheader: %s",frame_content_type.c_str());
} }
} }
/* Attempt to extract the frame */ /* Attempt to extract the frame */
if(!need_more_data) { if(!need_more_data) {
if(!SubHeadersParsingComplete) { if(!SubHeadersParsingComplete) {
/* We haven't parsed all headers yet */ /* We haven't parsed all headers yet */
need_more_data = true; need_more_data = true;
} else if(frame_content_length <= 0) { } else if(frame_content_length <= 0) {
/* Invalid frame */ /* Invalid frame */
Error("Invalid frame: invalid content length"); Error("Invalid frame: invalid content length");
} else if(frame_content_type != "image/jpeg") { } else if(frame_content_type != "image/jpeg") {
/* Unsupported frame type */ /* Unsupported frame type */
Error("Unsupported frame: %s",frame_content_type.c_str()); Error("Unsupported frame: %s",frame_content_type.c_str());
} else if(frame_content_length > databuffer.size()) { } else if(frame_content_length > databuffer.size()) {
/* Incomplete frame, wait for more data */ /* Incomplete frame, wait for more data */
need_more_data = true; need_more_data = true;
} else { } else {
/* All good. decode the image */ /* All good. decode the image */
image.DecodeJpeg(databuffer.extract(frame_content_length), frame_content_length, colours, subpixelorder); image.DecodeJpeg(databuffer.extract(frame_content_length), frame_content_length, colours, subpixelorder);
frameComplete = true; frameComplete = true;
} }
} }
/* Attempt to get more data */ /* Attempt to get more data */
if(need_more_data) { if(need_more_data) {
nRet = pthread_cond_wait(&data_available_cond,&shareddata_mutex); nRet = pthread_cond_wait(&data_available_cond,&shareddata_mutex);
if(nRet != 0) { if(nRet != 0) {
Error("Failed waiting for available data condition variable: %s",strerror(nRet)); Error("Failed waiting for available data condition variable: %s",strerror(nRet));
return -18; return -18;
} }
need_more_data = false; need_more_data = false;
} }
} else if(mode == MODE_SINGLE) { } else if(mode == MODE_SINGLE) {
/* Check if we have anything */ /* Check if we have anything */
if (!single_offsets.empty()) { if (!single_offsets.empty()) {
if( (single_offsets.front() > 0) && (databuffer.size() >= single_offsets.front()) ) { if( (single_offsets.front() > 0) && (databuffer.size() >= single_offsets.front()) ) {
/* Extract frame */ /* Extract frame */
image.DecodeJpeg(databuffer.extract(single_offsets.front()), single_offsets.front(), colours, subpixelorder); image.DecodeJpeg(databuffer.extract(single_offsets.front()), single_offsets.front(), colours, subpixelorder);
single_offsets.pop_front(); single_offsets.pop_front();
frameComplete = true; frameComplete = true;
} else { } else {
/* This shouldn't happen */ /* This shouldn't happen */
Error("Internal error. Attempting recovery"); Error("Internal error. Attempting recovery");
databuffer.consume(single_offsets.front()); databuffer.consume(single_offsets.front());
single_offsets.pop_front(); single_offsets.pop_front();
} }
} else { } else {
/* Don't have a frame yet, wait for the request complete condition variable */ /* Don't have a frame yet, wait for the request complete condition variable */
nRet = pthread_cond_wait(&request_complete_cond,&shareddata_mutex); nRet = pthread_cond_wait(&request_complete_cond,&shareddata_mutex);
if(nRet != 0) { if(nRet != 0) {
Error("Failed waiting for request complete condition variable: %s",strerror(nRet)); Error("Failed waiting for request complete condition variable: %s",strerror(nRet));
return -19; return -19;
} }
} }
} else { } else {
/* Failed to match content-type */ /* Failed to match content-type */
Fatal("Unable to match Content-Type. Check URL, username and password"); Fatal("Unable to match Content-Type. Check URL, username and password");
} /* mode */ } /* mode */
} /* frameComplete loop */ } /* frameComplete loop */
/* Release the mutex */ /* Release the mutex */
unlock(); unlock();
if(!frameComplete) if(!frameComplete)
return -1; return -1;
return 0; return 0;
} }
int cURLCamera::PostCapture() int cURLCamera::PostCapture()
@ -313,7 +313,7 @@ int cURLCamera::PostCapture()
int cURLCamera::CaptureAndRecord( Image &image, bool recording, char* event_directory ) int cURLCamera::CaptureAndRecord( Image &image, bool recording, char* event_directory )
{ {
Error("Capture and Record not implemented for the cURL camera type"); Error("Capture and Record not implemented for the cURL camera type");
// Nothing to do here // Nothing to do here
return( 0 ); return( 0 );
} }
@ -321,241 +321,241 @@ int cURLCamera::CaptureAndRecord( Image &image, bool recording, char* event_dire
size_t cURLCamera::data_callback(void *buffer, size_t size, size_t nmemb, void *userdata) size_t cURLCamera::data_callback(void *buffer, size_t size, size_t nmemb, void *userdata)
{ {
lock(); lock();
/* Append the data we just received to our buffer */ /* Append the data we just received to our buffer */
databuffer.append((const char*)buffer, size*nmemb); databuffer.append((const char*)buffer, size*nmemb);
/* Signal data available */ /* Signal data available */
nRet = pthread_cond_signal(&data_available_cond); nRet = pthread_cond_signal(&data_available_cond);
if(nRet != 0) { if(nRet != 0) {
Error("Failed signaling data available condition variable: %s",strerror(nRet)); Error("Failed signaling data available condition variable: %s",strerror(nRet));
return -16; return -16;
} }
unlock(); unlock();
/* Return bytes processed */ /* Return bytes processed */
return size*nmemb; return size*nmemb;
} }
size_t cURLCamera::header_callback( void *buffer, size_t size, size_t nmemb, void *userdata) size_t cURLCamera::header_callback( void *buffer, size_t size, size_t nmemb, void *userdata)
{ {
std::string header; std::string header;
header.assign((const char*)buffer, size*nmemb); header.assign((const char*)buffer, size*nmemb);
Debug(4,"Got header: %s",header.c_str()); Debug(4,"Got header: %s",header.c_str());
/* Check Content-Type header */ /* Check Content-Type header */
if(strncasecmp(header.c_str(),content_type_match,content_type_match_len) == 0) { if(strncasecmp(header.c_str(),content_type_match,content_type_match_len) == 0) {
size_t pos = header.find(';'); size_t pos = header.find(';');
if(pos != std::string::npos) { if(pos != std::string::npos) {
header.erase(pos, std::string::npos); header.erase(pos, std::string::npos);
} }
pos = header.rfind(' '); pos = header.rfind(' ');
if(pos == std::string::npos) { if(pos == std::string::npos) {
pos = header.find(':'); pos = header.find(':');
} }
std::string content_type = header.substr(pos+1, std::string::npos); std::string content_type = header.substr(pos+1, std::string::npos);
Debug(6,"Content-Type is: %s",content_type.c_str()); Debug(6,"Content-Type is: %s",content_type.c_str());
lock(); lock();
const char* multipart_match = "multipart/x-mixed-replace"; const char* multipart_match = "multipart/x-mixed-replace";
const char* image_jpeg_match = "image/jpeg"; const char* image_jpeg_match = "image/jpeg";
if(strncasecmp(content_type.c_str(),multipart_match,strlen(multipart_match)) == 0) { if(strncasecmp(content_type.c_str(),multipart_match,strlen(multipart_match)) == 0) {
Debug(7,"Content type matched as multipart/x-mixed-replace"); Debug(7,"Content type matched as multipart/x-mixed-replace");
mode = MODE_STREAM; mode = MODE_STREAM;
} else if(strncasecmp(content_type.c_str(),image_jpeg_match,strlen(image_jpeg_match)) == 0) { } else if(strncasecmp(content_type.c_str(),image_jpeg_match,strlen(image_jpeg_match)) == 0) {
Debug(7,"Content type matched as image/jpeg"); Debug(7,"Content type matched as image/jpeg");
mode = MODE_SINGLE; mode = MODE_SINGLE;
} }
unlock(); unlock();
} }
/* Return bytes processed */ /* Return bytes processed */
return size*nmemb; return size*nmemb;
} }
void* cURLCamera::thread_func() void* cURLCamera::thread_func()
{ {
long tRet; long tRet;
double dSize; double dSize;
c = curl_easy_init(); c = curl_easy_init();
if(c == NULL) { if(c == NULL) {
Fatal("Failed getting easy handle from libcurl"); Fatal("Failed getting easy handle from libcurl");
} }
/* Set URL */ /* Set URL */
cRet = curl_easy_setopt(c, CURLOPT_URL, mPath.c_str()); cRet = curl_easy_setopt(c, CURLOPT_URL, mPath.c_str());
if(cRet != CURLE_OK) if(cRet != CURLE_OK)
Fatal("Failed setting libcurl URL: %s", curl_easy_strerror(cRet)); Fatal("Failed setting libcurl URL: %s", curl_easy_strerror(cRet));
/* Header callback */ /* Header callback */
cRet = curl_easy_setopt(c, CURLOPT_HEADERFUNCTION, &header_callback_dispatcher); cRet = curl_easy_setopt(c, CURLOPT_HEADERFUNCTION, &header_callback_dispatcher);
if(cRet != CURLE_OK) if(cRet != CURLE_OK)
Fatal("Failed setting libcurl header callback function: %s", curl_easy_strerror(cRet)); Fatal("Failed setting libcurl header callback function: %s", curl_easy_strerror(cRet));
cRet = curl_easy_setopt(c, CURLOPT_HEADERDATA, this); cRet = curl_easy_setopt(c, CURLOPT_HEADERDATA, this);
if(cRet != CURLE_OK) if(cRet != CURLE_OK)
Fatal("Failed setting libcurl header callback object: %s", curl_easy_strerror(cRet)); Fatal("Failed setting libcurl header callback object: %s", curl_easy_strerror(cRet));
/* Data callback */ /* Data callback */
cRet = curl_easy_setopt(c, CURLOPT_WRITEFUNCTION, &data_callback_dispatcher); cRet = curl_easy_setopt(c, CURLOPT_WRITEFUNCTION, &data_callback_dispatcher);
if(cRet != CURLE_OK) if(cRet != CURLE_OK)
Fatal("Failed setting libcurl data callback function: %s", curl_easy_strerror(cRet)); Fatal("Failed setting libcurl data callback function: %s", curl_easy_strerror(cRet));
cRet = curl_easy_setopt(c, CURLOPT_WRITEDATA, this); cRet = curl_easy_setopt(c, CURLOPT_WRITEDATA, this);
if(cRet != CURLE_OK) if(cRet != CURLE_OK)
Fatal("Failed setting libcurl data callback object: %s", curl_easy_strerror(cRet)); Fatal("Failed setting libcurl data callback object: %s", curl_easy_strerror(cRet));
/* Progress callback */ /* Progress callback */
cRet = curl_easy_setopt(c, CURLOPT_NOPROGRESS, 0); cRet = curl_easy_setopt(c, CURLOPT_NOPROGRESS, 0);
if(cRet != CURLE_OK) if(cRet != CURLE_OK)
Fatal("Failed enabling libcurl progress callback function: %s", curl_easy_strerror(cRet)); Fatal("Failed enabling libcurl progress callback function: %s", curl_easy_strerror(cRet));
cRet = curl_easy_setopt(c, CURLOPT_PROGRESSFUNCTION, &progress_callback_dispatcher); cRet = curl_easy_setopt(c, CURLOPT_PROGRESSFUNCTION, &progress_callback_dispatcher);
if(cRet != CURLE_OK) if(cRet != CURLE_OK)
Fatal("Failed setting libcurl progress callback function: %s", curl_easy_strerror(cRet)); Fatal("Failed setting libcurl progress callback function: %s", curl_easy_strerror(cRet));
cRet = curl_easy_setopt(c, CURLOPT_PROGRESSDATA, this); cRet = curl_easy_setopt(c, CURLOPT_PROGRESSDATA, this);
if(cRet != CURLE_OK) if(cRet != CURLE_OK)
Fatal("Failed setting libcurl progress callback object: %s", curl_easy_strerror(cRet)); Fatal("Failed setting libcurl progress callback object: %s", curl_easy_strerror(cRet));
/* Set username and password */ /* Set username and password */
if(!mUser.empty()) { if(!mUser.empty()) {
cRet = curl_easy_setopt(c, CURLOPT_USERNAME, mUser.c_str()); cRet = curl_easy_setopt(c, CURLOPT_USERNAME, mUser.c_str());
if(cRet != CURLE_OK) if(cRet != CURLE_OK)
Error("Failed setting username: %s", curl_easy_strerror(cRet)); Error("Failed setting username: %s", curl_easy_strerror(cRet));
} }
if(!mPass.empty()) { if(!mPass.empty()) {
cRet = curl_easy_setopt(c, CURLOPT_PASSWORD, mPass.c_str()); cRet = curl_easy_setopt(c, CURLOPT_PASSWORD, mPass.c_str());
if(cRet != CURLE_OK) if(cRet != CURLE_OK)
Error("Failed setting password: %s", curl_easy_strerror(cRet)); Error("Failed setting password: %s", curl_easy_strerror(cRet));
} }
/* Authenication preference */ /* Authenication preference */
cRet = curl_easy_setopt(c, CURLOPT_HTTPAUTH, CURLAUTH_ANY); cRet = curl_easy_setopt(c, CURLOPT_HTTPAUTH, CURLAUTH_ANY);
if(cRet != CURLE_OK) if(cRet != CURLE_OK)
Warning("Failed setting libcurl acceptable http authenication methods: %s", curl_easy_strerror(cRet)); Warning("Failed setting libcurl acceptable http authenication methods: %s", curl_easy_strerror(cRet));
/* Work loop */ /* Work loop */
for(int attempt=1;attempt<=CURL_MAXRETRY;attempt++) { for(int attempt=1;attempt<=CURL_MAXRETRY;attempt++) {
tRet = 0; tRet = 0;
while(!bTerminate) { while(!bTerminate) {
/* Do the work */ /* Do the work */
cRet = curl_easy_perform(c); cRet = curl_easy_perform(c);
if(mode == MODE_SINGLE) { if(mode == MODE_SINGLE) {
if(cRet != CURLE_OK) { if(cRet != CURLE_OK) {
break; break;
} }
/* Attempt to get the size of the file */ /* Attempt to get the size of the file */
cRet = curl_easy_getinfo(c, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &dSize); cRet = curl_easy_getinfo(c, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &dSize);
if(cRet != CURLE_OK) { if(cRet != CURLE_OK) {
break; break;
} }
/* We need to lock for the offsets array and the condition variable */ /* We need to lock for the offsets array and the condition variable */
lock(); lock();
/* Push the size into our offsets array */ /* Push the size into our offsets array */
if(dSize > 0) { if(dSize > 0) {
single_offsets.push_back(dSize); single_offsets.push_back(dSize);
} else { } else {
Fatal("Unable to get the size of the image"); Fatal("Unable to get the size of the image");
} }
/* Signal the request complete condition variable */ /* Signal the request complete condition variable */
tRet = pthread_cond_signal(&request_complete_cond); tRet = pthread_cond_signal(&request_complete_cond);
if(tRet != 0) { if(tRet != 0) {
Error("Failed signaling request completed condition variable: %s",strerror(tRet)); Error("Failed signaling request completed condition variable: %s",strerror(tRet));
} }
/* Unlock */ /* Unlock */
unlock(); unlock();
} else if (mode == MODE_STREAM) { } else if (mode == MODE_STREAM) {
break; break;
} }
} }
/* Return value checking */ /* Return value checking */
if(cRet == CURLE_ABORTED_BY_CALLBACK || bTerminate) { if(cRet == CURLE_ABORTED_BY_CALLBACK || bTerminate) {
/* Aborted */ /* Aborted */
break; break;
} else if (cRet != CURLE_OK) { } else if (cRet != CURLE_OK) {
/* Some error */ /* Some error */
Error("cURL Request failed: %s",curl_easy_strerror(cRet)); Error("cURL Request failed: %s",curl_easy_strerror(cRet));
if(attempt < CURL_MAXRETRY) { if(attempt < CURL_MAXRETRY) {
Error("Retrying.. Attempt %d of %d",attempt,CURL_MAXRETRY); Error("Retrying.. Attempt %d of %d",attempt,CURL_MAXRETRY);
/* Do a reset */ /* Do a reset */
lock(); lock();
databuffer.clear(); databuffer.clear();
single_offsets.clear(); single_offsets.clear();
mode = MODE_UNSET; mode = MODE_UNSET;
bReset = true; bReset = true;
unlock(); unlock();
} }
tRet = -50; tRet = -50;
} }
} }
/* Cleanup */ /* Cleanup */
curl_easy_cleanup(c); curl_easy_cleanup(c);
c = NULL; c = NULL;
return (void*)tRet; return (void*)tRet;
} }
int cURLCamera::lock() { int cURLCamera::lock() {
int nRet; int nRet;
/* Lock shared data */ /* Lock shared data */
nRet = pthread_mutex_lock(&shareddata_mutex); nRet = pthread_mutex_lock(&shareddata_mutex);
if(nRet != 0) { if(nRet != 0) {
Error("Failed locking shared data mutex: %s",strerror(nRet)); Error("Failed locking shared data mutex: %s",strerror(nRet));
} }
return nRet; return nRet;
} }
int cURLCamera::unlock() { int cURLCamera::unlock() {
int nRet; int nRet;
/* Unlock shared data */ /* Unlock shared data */
nRet = pthread_mutex_unlock(&shareddata_mutex); nRet = pthread_mutex_unlock(&shareddata_mutex);
if(nRet != 0) { if(nRet != 0) {
Error("Failed unlocking shared data mutex: %s",strerror(nRet)); Error("Failed unlocking shared data mutex: %s",strerror(nRet));
} }
return nRet; return nRet;
} }
int cURLCamera::progress_callback(void *userdata, double dltotal, double dlnow, double ultotal, double ulnow) int cURLCamera::progress_callback(void *userdata, double dltotal, double dlnow, double ultotal, double ulnow)
{ {
/* Signal the curl thread to terminate */ /* Signal the curl thread to terminate */
if(bTerminate) if(bTerminate)
return -10; return -10;
return 0; return 0;
} }
/* These functions call the functions in the class for the correct object */ /* These functions call the functions in the class for the correct object */
size_t data_callback_dispatcher(void *buffer, size_t size, size_t nmemb, void *userdata) size_t data_callback_dispatcher(void *buffer, size_t size, size_t nmemb, void *userdata)
{ {
return ((cURLCamera*)userdata)->data_callback(buffer,size,nmemb,userdata); return ((cURLCamera*)userdata)->data_callback(buffer,size,nmemb,userdata);
} }
size_t header_callback_dispatcher(void *buffer, size_t size, size_t nmemb, void *userdata) size_t header_callback_dispatcher(void *buffer, size_t size, size_t nmemb, void *userdata)
{ {
return ((cURLCamera*)userdata)->header_callback(buffer,size,nmemb,userdata); return ((cURLCamera*)userdata)->header_callback(buffer,size,nmemb,userdata);
} }
int progress_callback_dispatcher(void *userdata, double dltotal, double dlnow, double ultotal, double ulnow) int progress_callback_dispatcher(void *userdata, double dltotal, double dlnow, double ultotal, double ulnow)
{ {
return ((cURLCamera*)userdata)->progress_callback(userdata,dltotal,dlnow,ultotal,ulnow); return ((cURLCamera*)userdata)->progress_callback(userdata,dltotal,dlnow,ultotal,ulnow);
} }
void* thread_func_dispatcher(void* object) { void* thread_func_dispatcher(void* object) {
return ((cURLCamera*)object)->thread_func(); return ((cURLCamera*)object)->thread_func();
} }

View File

@ -42,57 +42,57 @@
class cURLCamera : public Camera class cURLCamera : public Camera
{ {
protected: protected:
typedef enum {MODE_UNSET, MODE_SINGLE, MODE_STREAM} mode_t; typedef enum {MODE_UNSET, MODE_SINGLE, MODE_STREAM} mode_t;
std::string mPath; std::string mPath;
std::string mUser; std::string mUser;
std::string mPass; std::string mPass;
/* cURL object(s) */ /* cURL object(s) */
CURL* c; CURL* c;
/* Shared data */ /* Shared data */
volatile bool bTerminate; volatile bool bTerminate;
volatile bool bReset; volatile bool bReset;
volatile mode_t mode; volatile mode_t mode;
Buffer databuffer; Buffer databuffer;
std::deque<size_t> single_offsets; std::deque<size_t> single_offsets;
/* pthread objects */ /* pthread objects */
pthread_t thread; pthread_t thread;
pthread_mutex_t shareddata_mutex; pthread_mutex_t shareddata_mutex;
pthread_cond_t data_available_cond; pthread_cond_t data_available_cond;
pthread_cond_t request_complete_cond; pthread_cond_t request_complete_cond;
public: public:
cURLCamera( int p_id, const std::string &path, const std::string &username, const std::string &password, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ); cURLCamera( int p_id, const std::string &path, const std::string &username, const std::string &password, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
~cURLCamera(); ~cURLCamera();
const std::string &Path() const { return( mPath ); } const std::string &Path() const { return( mPath ); }
const std::string &Username() const { return( mUser ); } const std::string &Username() const { return( mUser ); }
const std::string &Password() const { return( mPass ); } const std::string &Password() const { return( mPass ); }
void Initialise(); void Initialise();
void Terminate(); void Terminate();
int PrimeCapture(); int PrimeCapture();
int PreCapture(); int PreCapture();
int Capture( Image &image ); int Capture( Image &image );
int PostCapture(); int PostCapture();
int CaptureAndRecord( Image &image, bool recording, char* event_directory); int CaptureAndRecord( Image &image, bool recording, char* event_directory);
size_t data_callback(void *buffer, size_t size, size_t nmemb, void *userdata); size_t data_callback(void *buffer, size_t size, size_t nmemb, void *userdata);
size_t header_callback(void *buffer, size_t size, size_t nmemb, void *userdata); size_t header_callback(void *buffer, size_t size, size_t nmemb, void *userdata);
int progress_callback(void *userdata, double dltotal, double dlnow, double ultotal, double ulnow); int progress_callback(void *userdata, double dltotal, double dlnow, double ultotal, double ulnow);
int debug_callback(CURL* handle, curl_infotype type, char* str, size_t strsize, void* data); int debug_callback(CURL* handle, curl_infotype type, char* str, size_t strsize, void* data);
void* thread_func(); void* thread_func();
int lock(); int lock();
int unlock(); int unlock();
private: private:
int nRet; int nRet;
CURLcode cRet; CURLcode cRet;
}; };

View File

@ -15,7 +15,7 @@
* You should have received a copy of the GNU General Public License * You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/ */
#include "zm_ffmpeg.h" #include "zm_ffmpeg.h"
#include "zm_image.h" #include "zm_image.h"
@ -24,243 +24,243 @@
#if HAVE_LIBAVCODEC || HAVE_LIBAVUTIL || HAVE_LIBSWSCALE #if HAVE_LIBAVCODEC || HAVE_LIBAVUTIL || HAVE_LIBSWSCALE
void FFMPEGInit() { void FFMPEGInit() {
static bool bInit = false; static bool bInit = false;
if(!bInit) { if(!bInit) {
av_register_all(); av_register_all();
av_log_set_level(AV_LOG_DEBUG); av_log_set_level(AV_LOG_DEBUG);
bInit = true; bInit = true;
} }
} }
#if HAVE_LIBAVUTIL #if HAVE_LIBAVUTIL
enum _AVPIXELFORMAT GetFFMPEGPixelFormat(unsigned int p_colours, unsigned p_subpixelorder) { enum _AVPIXELFORMAT GetFFMPEGPixelFormat(unsigned int p_colours, unsigned p_subpixelorder) {
enum _AVPIXELFORMAT pf; enum _AVPIXELFORMAT pf;
Debug(8,"Colours: %d SubpixelOrder: %d",p_colours,p_subpixelorder); Debug(8,"Colours: %d SubpixelOrder: %d",p_colours,p_subpixelorder);
switch(p_colours) { switch(p_colours) {
case ZM_COLOUR_RGB24: case ZM_COLOUR_RGB24:
{ {
if(p_subpixelorder == ZM_SUBPIX_ORDER_BGR) { if(p_subpixelorder == ZM_SUBPIX_ORDER_BGR) {
/* BGR subpixel order */ /* BGR subpixel order */
pf = AV_PIX_FMT_BGR24; pf = AV_PIX_FMT_BGR24;
} else { } else {
/* Assume RGB subpixel order */ /* Assume RGB subpixel order */
pf = AV_PIX_FMT_RGB24; pf = AV_PIX_FMT_RGB24;
} }
break; break;
} }
case ZM_COLOUR_RGB32: case ZM_COLOUR_RGB32:
{ {
if(p_subpixelorder == ZM_SUBPIX_ORDER_ARGB) { if(p_subpixelorder == ZM_SUBPIX_ORDER_ARGB) {
/* ARGB subpixel order */ /* ARGB subpixel order */
pf = AV_PIX_FMT_ARGB; pf = AV_PIX_FMT_ARGB;
} else if(p_subpixelorder == ZM_SUBPIX_ORDER_ABGR) { } else if(p_subpixelorder == ZM_SUBPIX_ORDER_ABGR) {
/* ABGR subpixel order */ /* ABGR subpixel order */
pf = AV_PIX_FMT_ABGR; pf = AV_PIX_FMT_ABGR;
} else if(p_subpixelorder == ZM_SUBPIX_ORDER_BGRA) { } else if(p_subpixelorder == ZM_SUBPIX_ORDER_BGRA) {
/* BGRA subpixel order */ /* BGRA subpixel order */
pf = AV_PIX_FMT_BGRA; pf = AV_PIX_FMT_BGRA;
} else { } else {
/* Assume RGBA subpixel order */ /* Assume RGBA subpixel order */
pf = AV_PIX_FMT_RGBA; pf = AV_PIX_FMT_RGBA;
} }
break; break;
} }
case ZM_COLOUR_GRAY8: case ZM_COLOUR_GRAY8:
pf = AV_PIX_FMT_GRAY8; pf = AV_PIX_FMT_GRAY8;
break; break;
default: default:
Panic("Unexpected colours: %d",p_colours); Panic("Unexpected colours: %d",p_colours);
pf = AV_PIX_FMT_GRAY8; /* Just to shush gcc variable may be unused warning */ pf = AV_PIX_FMT_GRAY8; /* Just to shush gcc variable may be unused warning */
break; break;
} }
return pf; return pf;
} }
#endif // HAVE_LIBAVUTIL #endif // HAVE_LIBAVUTIL
#if HAVE_LIBSWSCALE && HAVE_LIBAVUTIL #if HAVE_LIBSWSCALE && HAVE_LIBAVUTIL
SWScale::SWScale() : gotdefaults(false), swscale_ctx(NULL), input_avframe(NULL), output_avframe(NULL) { SWScale::SWScale() : gotdefaults(false), swscale_ctx(NULL), input_avframe(NULL), output_avframe(NULL) {
Debug(4,"SWScale object created"); Debug(4,"SWScale object created");
/* Allocate AVFrame for the input */ /* Allocate AVFrame for the input */
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101) #if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
input_avframe = av_frame_alloc(); input_avframe = av_frame_alloc();
#else #else
input_avframe = avcodec_alloc_frame(); input_avframe = avcodec_alloc_frame();
#endif #endif
if(input_avframe == NULL) { if(input_avframe == NULL) {
Fatal("Failed allocating AVFrame for the input"); Fatal("Failed allocating AVFrame for the input");
} }
/* Allocate AVFrame for the output */ /* Allocate AVFrame for the output */
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101) #if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
output_avframe = av_frame_alloc(); output_avframe = av_frame_alloc();
#else #else
output_avframe = avcodec_alloc_frame(); output_avframe = avcodec_alloc_frame();
#endif #endif
if(output_avframe == NULL) { if(output_avframe == NULL) {
Fatal("Failed allocating AVFrame for the output"); Fatal("Failed allocating AVFrame for the output");
} }
} }
SWScale::~SWScale() { SWScale::~SWScale() {
/* Free up everything */ /* Free up everything */
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101) #if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
av_frame_free( &input_avframe ); av_frame_free( &input_avframe );
#else #else
av_freep( &input_avframe ); av_freep( &input_avframe );
#endif #endif
//input_avframe = NULL; //input_avframe = NULL;
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101) #if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
av_frame_free( &output_avframe ); av_frame_free( &output_avframe );
#else #else
av_freep( &output_avframe ); av_freep( &output_avframe );
#endif #endif
//output_avframe = NULL; //output_avframe = NULL;
if(swscale_ctx) { if(swscale_ctx) {
sws_freeContext(swscale_ctx); sws_freeContext(swscale_ctx);
swscale_ctx = NULL; swscale_ctx = NULL;
} }
Debug(4,"SWScale object destroyed"); Debug(4,"SWScale object destroyed");
} }
int SWScale::SetDefaults(enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) { int SWScale::SetDefaults(enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
/* Assign the defaults */ /* Assign the defaults */
default_input_pf = in_pf; default_input_pf = in_pf;
default_output_pf = out_pf; default_output_pf = out_pf;
default_width = width; default_width = width;
default_height = height; default_height = height;
gotdefaults = true; gotdefaults = true;
return 0; return 0;
} }
int SWScale::Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) { int SWScale::Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
/* Parameter checking */ /* Parameter checking */
if(in_buffer == NULL || out_buffer == NULL) { if(in_buffer == NULL || out_buffer == NULL) {
Error("NULL Input or output buffer"); Error("NULL Input or output buffer");
return -1; return -1;
} }
if(!width || !height) { if(!width || !height) {
Error("Invalid width or height"); Error("Invalid width or height");
return -3; return -3;
} }
#if LIBSWSCALE_VERSION_CHECK(0, 8, 0, 8, 0) #if LIBSWSCALE_VERSION_CHECK(0, 8, 0, 8, 0)
/* Warn if the input or output pixelformat is not supported */ /* Warn if the input or output pixelformat is not supported */
if(!sws_isSupportedInput(in_pf)) { if(!sws_isSupportedInput(in_pf)) {
Warning("swscale does not support the input format: %c%c%c%c",(in_pf)&0xff,((in_pf)&0xff),((in_pf>>16)&0xff),((in_pf>>24)&0xff)); Warning("swscale does not support the input format: %c%c%c%c",(in_pf)&0xff,((in_pf)&0xff),((in_pf>>16)&0xff),((in_pf>>24)&0xff));
} }
if(!sws_isSupportedOutput(out_pf)) { if(!sws_isSupportedOutput(out_pf)) {
Warning("swscale does not support the output format: %c%c%c%c",(out_pf)&0xff,((out_pf>>8)&0xff),((out_pf>>16)&0xff),((out_pf>>24)&0xff)); Warning("swscale does not support the output format: %c%c%c%c",(out_pf)&0xff,((out_pf>>8)&0xff),((out_pf>>16)&0xff),((out_pf>>24)&0xff));
} }
#endif #endif
/* Check the buffer sizes */ /* Check the buffer sizes */
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0) #if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
size_t insize = av_image_get_buffer_size(in_pf, width, height,1); size_t insize = av_image_get_buffer_size(in_pf, width, height,1);
#else #else
size_t insize = avpicture_get_size(in_pf, width, height); size_t insize = avpicture_get_size(in_pf, width, height);
#endif #endif
if(insize != in_buffer_size) { if(insize != in_buffer_size) {
Error("The input buffer size does not match the expected size for the input format. Required: %d Available: %d", insize, in_buffer_size); Error("The input buffer size does not match the expected size for the input format. Required: %d Available: %d", insize, in_buffer_size);
return -4; return -4;
} }
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0) #if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
size_t outsize = av_image_get_buffer_size(out_pf, width, height,1); size_t outsize = av_image_get_buffer_size(out_pf, width, height,1);
#else #else
size_t outsize = avpicture_get_size(out_pf, width, height); size_t outsize = avpicture_get_size(out_pf, width, height);
#endif #endif
if(outsize < out_buffer_size) { if(outsize < out_buffer_size) {
Error("The output buffer is undersized for the output format. Required: %d Available: %d", outsize, out_buffer_size); Error("The output buffer is undersized for the output format. Required: %d Available: %d", outsize, out_buffer_size);
return -5; return -5;
} }
/* Get the context */ /* Get the context */
swscale_ctx = sws_getCachedContext( swscale_ctx, width, height, in_pf, width, height, out_pf, SWS_FAST_BILINEAR, NULL, NULL, NULL ); swscale_ctx = sws_getCachedContext( swscale_ctx, width, height, in_pf, width, height, out_pf, SWS_FAST_BILINEAR, NULL, NULL, NULL );
if(swscale_ctx == NULL) { if(swscale_ctx == NULL) {
Error("Failed getting swscale context"); Error("Failed getting swscale context");
return -6; return -6;
} }
/* Fill in the buffers */ /* Fill in the buffers */
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0) #if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
if(av_image_fill_arrays(input_avframe->data, input_avframe->linesize, if(av_image_fill_arrays(input_avframe->data, input_avframe->linesize,
(uint8_t*)in_buffer, in_pf, width, height, 1) <= 0) (uint8_t*)in_buffer, in_pf, width, height, 1) <= 0)
{ {
#else #else
if(avpicture_fill( (AVPicture*)input_avframe, (uint8_t*)in_buffer, if(avpicture_fill( (AVPicture*)input_avframe, (uint8_t*)in_buffer,
in_pf, width, height ) <= 0) in_pf, width, height ) <= 0)
{ {
#endif #endif
Error("Failed filling input frame with input buffer"); Error("Failed filling input frame with input buffer");
return -7; return -7;
} }
if(!avpicture_fill( (AVPicture*)output_avframe, out_buffer, out_pf, width, height ) ) { if(!avpicture_fill( (AVPicture*)output_avframe, out_buffer, out_pf, width, height ) ) {
Error("Failed filling output frame with output buffer"); Error("Failed filling output frame with output buffer");
return -8; return -8;
}
/* Do the conversion */
if(!sws_scale(swscale_ctx, input_avframe->data, input_avframe->linesize, 0, height, output_avframe->data, output_avframe->linesize ) ) {
Error("swscale conversion failed");
return -10;
}
return 0;
} }
/* Do the conversion */ int SWScale::Convert(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) {
if(!sws_scale(swscale_ctx, input_avframe->data, input_avframe->linesize, 0, height, output_avframe->data, output_avframe->linesize ) ) { if(img->Width() != width) {
Error("swscale conversion failed"); Error("Source image width differs. Source: %d Output: %d",img->Width(), width);
return -10; return -12;
}
if(img->Height() != height) {
Error("Source image height differs. Source: %d Output: %d",img->Height(), height);
return -13;
}
return Convert(img->Buffer(),img->Size(),out_buffer,out_buffer_size,in_pf,out_pf,width,height);
} }
return 0; int SWScale::ConvertDefaults(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size) {
}
int SWScale::Convert(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size, enum _AVPIXELFORMAT in_pf, enum _AVPIXELFORMAT out_pf, unsigned int width, unsigned int height) { if(!gotdefaults) {
if(img->Width() != width) { Error("Defaults are not set");
Error("Source image width differs. Source: %d Output: %d",img->Width(), width); return -24;
return -12; }
}
if(img->Height() != height) { return Convert(img,out_buffer,out_buffer_size,default_input_pf,default_output_pf,default_width,default_height);
Error("Source image height differs. Source: %d Output: %d",img->Height(), height); }
return -13;
}
return Convert(img->Buffer(),img->Size(),out_buffer,out_buffer_size,in_pf,out_pf,width,height); int SWScale::ConvertDefaults(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size) {
}
int SWScale::ConvertDefaults(const Image* img, uint8_t* out_buffer, const size_t out_buffer_size) { if(!gotdefaults) {
Error("Defaults are not set");
return -24;
}
if(!gotdefaults) { return Convert(in_buffer,in_buffer_size,out_buffer,out_buffer_size,default_input_pf,default_output_pf,default_width,default_height);
Error("Defaults are not set"); }
return -24;
}
return Convert(img,out_buffer,out_buffer_size,default_input_pf,default_output_pf,default_width,default_height);
}
int SWScale::ConvertDefaults(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size) {
if(!gotdefaults) {
Error("Defaults are not set");
return -24;
}
return Convert(in_buffer,in_buffer_size,out_buffer,out_buffer_size,default_input_pf,default_output_pf,default_width,default_height);
}
#endif // HAVE_LIBSWSCALE && HAVE_LIBAVUTIL #endif // HAVE_LIBSWSCALE && HAVE_LIBAVUTIL
#endif // HAVE_LIBAVCODEC || HAVE_LIBAVUTIL || HAVE_LIBSWSCALE #endif // HAVE_LIBAVCODEC || HAVE_LIBAVUTIL || HAVE_LIBSWSCALE
#if HAVE_LIBAVUTIL #if HAVE_LIBAVUTIL
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb){ int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb){
int64_t a, b, this_thing; int64_t a, b, this_thing;
av_assert0(in_ts != AV_NOPTS_VALUE); av_assert0(in_ts != AV_NOPTS_VALUE);
@ -268,90 +268,90 @@ int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int
if (*last == AV_NOPTS_VALUE || !duration || in_tb.num*(int64_t)out_tb.den <= out_tb.num*(int64_t)in_tb.den) { if (*last == AV_NOPTS_VALUE || !duration || in_tb.num*(int64_t)out_tb.den <= out_tb.num*(int64_t)in_tb.den) {
simple_round: simple_round:
*last = av_rescale_q(in_ts, in_tb, fs_tb) + duration; *last = av_rescale_q(in_ts, in_tb, fs_tb) + duration;
return av_rescale_q(in_ts, in_tb, out_tb); return av_rescale_q(in_ts, in_tb, out_tb);
} }
a = av_rescale_q_rnd(2*in_ts-1, in_tb, fs_tb, AV_ROUND_DOWN) >>1; a = av_rescale_q_rnd(2*in_ts-1, in_tb, fs_tb, AV_ROUND_DOWN) >>1;
b = (av_rescale_q_rnd(2*in_ts+1, in_tb, fs_tb, AV_ROUND_UP )+1)>>1; b = (av_rescale_q_rnd(2*in_ts+1, in_tb, fs_tb, AV_ROUND_UP )+1)>>1;
if (*last < 2*a - b || *last > 2*b - a) if (*last < 2*a - b || *last > 2*b - a)
goto simple_round; goto simple_round;
this_thing = av_clip64(*last, a, b); this_thing = av_clip64(*last, a, b);
*last = this_thing + duration; *last = this_thing + duration;
return av_rescale_q(this_thing, fs_tb, out_tb); return av_rescale_q(this_thing, fs_tb, out_tb);
} }
#endif #endif
int hacked_up_context2_for_older_ffmpeg(AVFormatContext **avctx, AVOutputFormat *oformat, const char *format, const char *filename) { int hacked_up_context2_for_older_ffmpeg(AVFormatContext **avctx, AVOutputFormat *oformat, const char *format, const char *filename) {
AVFormatContext *s = avformat_alloc_context(); AVFormatContext *s = avformat_alloc_context();
int ret = 0; int ret = 0;
*avctx = NULL; *avctx = NULL;
if (!s) { if (!s) {
av_log(s, AV_LOG_ERROR, "Out of memory\n"); av_log(s, AV_LOG_ERROR, "Out of memory\n");
ret = AVERROR(ENOMEM); ret = AVERROR(ENOMEM);
return ret; return ret;
} }
if (!oformat) { if (!oformat) {
if (format) { if (format) {
oformat = av_guess_format(format, NULL, NULL); oformat = av_guess_format(format, NULL, NULL);
if (!oformat) { if (!oformat) {
av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format); av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
ret = AVERROR(EINVAL); ret = AVERROR(EINVAL);
}
} else {
oformat = av_guess_format(NULL, filename, NULL);
if (!oformat) {
ret = AVERROR(EINVAL);
av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n", filename);
}
} }
} else {
oformat = av_guess_format(NULL, filename, NULL);
if (!oformat) {
ret = AVERROR(EINVAL);
av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n", filename);
}
}
} }
if (ret) { if (ret) {
avformat_free_context(s); avformat_free_context(s);
return ret; return ret;
} else { } else {
s->oformat = oformat; s->oformat = oformat;
if (s->oformat->priv_data_size > 0) { if (s->oformat->priv_data_size > 0) {
s->priv_data = av_mallocz(s->oformat->priv_data_size); s->priv_data = av_mallocz(s->oformat->priv_data_size);
if (s->priv_data) { if (s->priv_data) {
if (s->oformat->priv_class) { if (s->oformat->priv_class) {
*(const AVClass**)s->priv_data= s->oformat->priv_class; *(const AVClass**)s->priv_data= s->oformat->priv_class;
av_opt_set_defaults(s->priv_data); av_opt_set_defaults(s->priv_data);
} }
} else { } else {
av_log(s, AV_LOG_ERROR, "Out of memory\n"); av_log(s, AV_LOG_ERROR, "Out of memory\n");
ret = AVERROR(ENOMEM); ret = AVERROR(ENOMEM);
return ret; return ret;
} }
s->priv_data = NULL; s->priv_data = NULL;
} }
if (filename) strncpy(s->filename, filename, sizeof(s->filename)); if (filename) strncpy(s->filename, filename, sizeof(s->filename));
*avctx = s; *avctx = s;
return 0; return 0;
} }
} }
static void zm_log_fps(double d, const char *postfix) static void zm_log_fps(double d, const char *postfix)
{ {
uint64_t v = lrintf(d * 100); uint64_t v = lrintf(d * 100);
if (!v) { if (!v) {
Debug(3, "%1.4f %s", d, postfix); Debug(3, "%1.4f %s", d, postfix);
} else if (v % 100) { } else if (v % 100) {
Debug(3, "%3.2f %s", d, postfix); Debug(3, "%3.2f %s", d, postfix);
} else if (v % (100 * 1000)) { } else if (v % (100 * 1000)) {
Debug(3, "%1.0f %s", d, postfix); Debug(3, "%1.0f %s", d, postfix);
} else } else
Debug(3, "%1.0fk %s", d / 1000, postfix); Debug(3, "%1.0fk %s", d / 1000, postfix);
} }
/* "user interface" functions */ /* "user interface" functions */
void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) { void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) {
char buf[256]; char buf[256];
int flags = (is_output ? ic->oformat->flags : ic->iformat->flags); int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
AVStream *st = ic->streams[i]; AVStream *st = ic->streams[i];
@ -363,67 +363,67 @@ void zm_dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
/* the pid is an important information, so we display it */ /* the pid is an important information, so we display it */
/* XXX: add a generic system */ /* XXX: add a generic system */
if (flags & AVFMT_SHOW_IDS) if (flags & AVFMT_SHOW_IDS)
Debug(3, "[0x%x]", st->id); Debug(3, "[0x%x]", st->id);
if (lang) if (lang)
Debug(3, "(%s)", lang->value); Debug(3, "(%s)", lang->value);
av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames,
st->time_base.num, st->time_base.den); st->time_base.num, st->time_base.den);
Debug(3, ": %s", buf); Debug(3, ": %s", buf);
if (st->sample_aspect_ratio.num && // default if (st->sample_aspect_ratio.num && // default
av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) { av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
AVRational display_aspect_ratio; AVRational display_aspect_ratio;
av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
st->codec->width * (int64_t)st->sample_aspect_ratio.num, st->codec->width * (int64_t)st->sample_aspect_ratio.num,
st->codec->height * (int64_t)st->sample_aspect_ratio.den, st->codec->height * (int64_t)st->sample_aspect_ratio.den,
1024 * 1024); 1024 * 1024);
Debug(3, ", SAR %d:%d DAR %d:%d", Debug(3, ", SAR %d:%d DAR %d:%d",
st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
display_aspect_ratio.num, display_aspect_ratio.den); display_aspect_ratio.num, display_aspect_ratio.den);
} }
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
int fps = st->avg_frame_rate.den && st->avg_frame_rate.num; int fps = st->avg_frame_rate.den && st->avg_frame_rate.num;
int tbr = st->r_frame_rate.den && st->r_frame_rate.num; int tbr = st->r_frame_rate.den && st->r_frame_rate.num;
int tbn = st->time_base.den && st->time_base.num; int tbn = st->time_base.den && st->time_base.num;
int tbc = st->codec->time_base.den && st->codec->time_base.num; int tbc = st->codec->time_base.den && st->codec->time_base.num;
if (fps || tbr || tbn || tbc) if (fps || tbr || tbn || tbc)
Debug(3, "\n" ); Debug(3, "\n" );
if (fps) if (fps)
zm_log_fps(av_q2d(st->avg_frame_rate), tbr || tbn || tbc ? "fps, " : "fps"); zm_log_fps(av_q2d(st->avg_frame_rate), tbr || tbn || tbc ? "fps, " : "fps");
if (tbr) if (tbr)
zm_log_fps(av_q2d(st->r_frame_rate), tbn || tbc ? "tbr, " : "tbr"); zm_log_fps(av_q2d(st->r_frame_rate), tbn || tbc ? "tbr, " : "tbr");
if (tbn) if (tbn)
zm_log_fps(1 / av_q2d(st->time_base), tbc ? "tbn, " : "tbn"); zm_log_fps(1 / av_q2d(st->time_base), tbc ? "tbn, " : "tbn");
if (tbc) if (tbc)
zm_log_fps(1 / av_q2d(st->codec->time_base), "tbc"); zm_log_fps(1 / av_q2d(st->codec->time_base), "tbc");
} }
if (st->disposition & AV_DISPOSITION_DEFAULT) if (st->disposition & AV_DISPOSITION_DEFAULT)
Debug(3, " (default)"); Debug(3, " (default)");
if (st->disposition & AV_DISPOSITION_DUB) if (st->disposition & AV_DISPOSITION_DUB)
Debug(3, " (dub)"); Debug(3, " (dub)");
if (st->disposition & AV_DISPOSITION_ORIGINAL) if (st->disposition & AV_DISPOSITION_ORIGINAL)
Debug(3, " (original)"); Debug(3, " (original)");
if (st->disposition & AV_DISPOSITION_COMMENT) if (st->disposition & AV_DISPOSITION_COMMENT)
Debug(3, " (comment)"); Debug(3, " (comment)");
if (st->disposition & AV_DISPOSITION_LYRICS) if (st->disposition & AV_DISPOSITION_LYRICS)
Debug(3, " (lyrics)"); Debug(3, " (lyrics)");
if (st->disposition & AV_DISPOSITION_KARAOKE) if (st->disposition & AV_DISPOSITION_KARAOKE)
Debug(3, " (karaoke)"); Debug(3, " (karaoke)");
if (st->disposition & AV_DISPOSITION_FORCED) if (st->disposition & AV_DISPOSITION_FORCED)
Debug(3, " (forced)"); Debug(3, " (forced)");
if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED) if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
Debug(3, " (hearing impaired)"); Debug(3, " (hearing impaired)");
if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED) if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
Debug(3, " (visual impaired)"); Debug(3, " (visual impaired)");
if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS) if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
Debug(3, " (clean effects)"); Debug(3, " (clean effects)");
Debug(3, "\n"); Debug(3, "\n");
//dump_metadata(NULL, st->metadata, " "); //dump_metadata(NULL, st->metadata, " ");
//dump_sidedata(NULL, st, " "); //dump_sidedata(NULL, st, " ");
} }

File diff suppressed because it is too large Load Diff

View File

@ -33,10 +33,10 @@
// //
class FfmpegCamera : public Camera class FfmpegCamera : public Camera
{ {
protected: protected:
std::string mPath; std::string mPath;
std::string mMethod; std::string mMethod;
std::string mOptions; std::string mOptions;
int frameCount; int frameCount;
@ -69,27 +69,27 @@ protected:
AVPacket lastKeyframePkt; AVPacket lastKeyframePkt;
#if HAVE_LIBSWSCALE #if HAVE_LIBSWSCALE
struct SwsContext *mConvertContext; struct SwsContext *mConvertContext;
#endif #endif
int64_t startTime; int64_t startTime;
public: public:
FfmpegCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ); FfmpegCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
~FfmpegCamera(); ~FfmpegCamera();
const std::string &Path() const { return( mPath ); } const std::string &Path() const { return( mPath ); }
const std::string &Options() const { return( mOptions ); } const std::string &Options() const { return( mOptions ); }
const std::string &Method() const { return( mMethod ); } const std::string &Method() const { return( mMethod ); }
void Initialise(); void Initialise();
void Terminate(); void Terminate();
int PrimeCapture(); int PrimeCapture();
int PreCapture(); int PreCapture();
int Capture( Image &image ); int Capture( Image &image );
int CaptureAndRecord( Image &image, bool recording, char* event_directory ); int CaptureAndRecord( Image &image, bool recording, char* event_directory );
int PostCapture(); int PostCapture();
}; };
#endif // ZM_FFMPEG_CAMERA_H #endif // ZM_FFMPEG_CAMERA_H

View File

@ -36,28 +36,28 @@
FileCamera::FileCamera( int p_id, const char *p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) : Camera( p_id, FILE_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ) FileCamera::FileCamera( int p_id, const char *p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) : Camera( p_id, FILE_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio )
{ {
strncpy( path, p_path, sizeof(path) ); strncpy( path, p_path, sizeof(path) );
if ( capture ) if ( capture )
{ {
Initialise(); Initialise();
} }
} }
FileCamera::~FileCamera() FileCamera::~FileCamera()
{ {
if ( capture ) if ( capture )
{ {
Terminate(); Terminate();
} }
} }
void FileCamera::Initialise() void FileCamera::Initialise()
{ {
if ( !path[0] ) if ( !path[0] )
{ {
Error( "No path specified for file image" ); Error( "No path specified for file image" );
exit( -1 ); exit( -1 );
} }
} }
void FileCamera::Terminate() void FileCamera::Terminate()
@ -66,23 +66,23 @@ void FileCamera::Terminate()
int FileCamera::PreCapture() int FileCamera::PreCapture()
{ {
struct stat statbuf; struct stat statbuf;
if ( stat( path, &statbuf ) < 0 ) if ( stat( path, &statbuf ) < 0 )
{ {
Error( "Can't stat %s: %s", path, strerror(errno) ); Error( "Can't stat %s: %s", path, strerror(errno) );
return( -1 ); return( -1 );
} }
while ( (time( 0 ) - statbuf.st_mtime) < 1 ) while ( (time( 0 ) - statbuf.st_mtime) < 1 )
{ {
usleep( 100000 ); usleep( 100000 );
} }
return( 0 ); return( 0 );
} }
int FileCamera::Capture( Image &image ) int FileCamera::Capture( Image &image )
{ {
return( image.ReadJpeg( path, colours, subpixelorder )?0:-1 ); return( image.ReadJpeg( path, colours, subpixelorder )?0:-1 );
} }
int FileCamera::PostCapture() int FileCamera::PostCapture()

View File

@ -33,20 +33,20 @@
class FileCamera : public Camera class FileCamera : public Camera
{ {
protected: protected:
char path[PATH_MAX]; char path[PATH_MAX];
public: public:
FileCamera( int p_id, const char *p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ); FileCamera( int p_id, const char *p_path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
~FileCamera(); ~FileCamera();
const char *Path() const { return( path ); } const char *Path() const { return( path ); }
void Initialise(); void Initialise();
void Terminate(); void Terminate();
int PreCapture(); int PreCapture();
int Capture( Image &image ); int Capture( Image &image );
int PostCapture(); int PostCapture();
int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);}; int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
}; };
#endif // ZM_FILE_CAMERA_H #endif // ZM_FILE_CAMERA_H

View File

@ -25,102 +25,102 @@
// Do all the buffer checking work here to avoid unnecessary locking // Do all the buffer checking work here to avoid unnecessary locking
void* LibvlcLockBuffer(void* opaque, void** planes) void* LibvlcLockBuffer(void* opaque, void** planes)
{ {
LibvlcPrivateData* data = (LibvlcPrivateData*)opaque; LibvlcPrivateData* data = (LibvlcPrivateData*)opaque;
data->mutex.lock(); data->mutex.lock();
uint8_t* buffer = data->buffer; uint8_t* buffer = data->buffer;
data->buffer = data->prevBuffer; data->buffer = data->prevBuffer;
data->prevBuffer = buffer; data->prevBuffer = buffer;
*planes = data->buffer; *planes = data->buffer;
return NULL; return NULL;
} }
void LibvlcUnlockBuffer(void* opaque, void* picture, void *const *planes) void LibvlcUnlockBuffer(void* opaque, void* picture, void *const *planes)
{ {
LibvlcPrivateData* data = (LibvlcPrivateData*)opaque; LibvlcPrivateData* data = (LibvlcPrivateData*)opaque;
bool newFrame = false; bool newFrame = false;
for(uint32_t i = 0; i < data->bufferSize; i++) for(uint32_t i = 0; i < data->bufferSize; i++)
{
if(data->buffer[i] != data->prevBuffer[i])
{ {
if(data->buffer[i] != data->prevBuffer[i]) newFrame = true;
{ break;
newFrame = true;
break;
}
} }
data->mutex.unlock(); }
data->mutex.unlock();
time_t now; time_t now;
time(&now); time(&now);
// Return frames slightly faster than 1fps (if time() supports greater than one second resolution) // Return frames slightly faster than 1fps (if time() supports greater than one second resolution)
if(newFrame || difftime(now, data->prevTime) >= 0.8) if(newFrame || difftime(now, data->prevTime) >= 0.8)
{ {
data->prevTime = now; data->prevTime = now;
data->newImage.updateValueSignal(true); data->newImage.updateValueSignal(true);
} }
} }
LibvlcCamera::LibvlcCamera( int p_id, const std::string &p_path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) : LibvlcCamera::LibvlcCamera( int p_id, const std::string &p_path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) :
Camera( p_id, LIBVLC_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ), Camera( p_id, LIBVLC_SRC, p_width, p_height, p_colours, ZM_SUBPIX_ORDER_DEFAULT_FOR_COLOUR(p_colours), p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
mPath( p_path ), mPath( p_path ),
mMethod( p_method ), mMethod( p_method ),
mOptions( p_options ) mOptions( p_options )
{ {
mLibvlcInstance = NULL; mLibvlcInstance = NULL;
mLibvlcMedia = NULL; mLibvlcMedia = NULL;
mLibvlcMediaPlayer = NULL; mLibvlcMediaPlayer = NULL;
mLibvlcData.buffer = NULL; mLibvlcData.buffer = NULL;
mLibvlcData.prevBuffer = NULL; mLibvlcData.prevBuffer = NULL;
/* Has to be located inside the constructor so other components such as zma will receive correct colours and subpixel order */ /* Has to be located inside the constructor so other components such as zma will receive correct colours and subpixel order */
if(colours == ZM_COLOUR_RGB32) { if(colours == ZM_COLOUR_RGB32) {
subpixelorder = ZM_SUBPIX_ORDER_BGRA; subpixelorder = ZM_SUBPIX_ORDER_BGRA;
mTargetChroma = "RV32"; mTargetChroma = "RV32";
mBpp = 4; mBpp = 4;
} else if(colours == ZM_COLOUR_RGB24) { } else if(colours == ZM_COLOUR_RGB24) {
subpixelorder = ZM_SUBPIX_ORDER_BGR; subpixelorder = ZM_SUBPIX_ORDER_BGR;
mTargetChroma = "RV24"; mTargetChroma = "RV24";
mBpp = 3; mBpp = 3;
} else if(colours == ZM_COLOUR_GRAY8) { } else if(colours == ZM_COLOUR_GRAY8) {
subpixelorder = ZM_SUBPIX_ORDER_NONE; subpixelorder = ZM_SUBPIX_ORDER_NONE;
mTargetChroma = "GREY"; mTargetChroma = "GREY";
mBpp = 1; mBpp = 1;
} else { } else {
Panic("Unexpected colours: %d",colours); Panic("Unexpected colours: %d",colours);
} }
if ( capture ) if ( capture )
{ {
Initialise(); Initialise();
} }
} }
LibvlcCamera::~LibvlcCamera() LibvlcCamera::~LibvlcCamera()
{ {
if ( capture ) if ( capture )
{ {
Terminate(); Terminate();
} }
if(mLibvlcMediaPlayer != NULL) if(mLibvlcMediaPlayer != NULL)
{ {
libvlc_media_player_release(mLibvlcMediaPlayer); libvlc_media_player_release(mLibvlcMediaPlayer);
mLibvlcMediaPlayer = NULL; mLibvlcMediaPlayer = NULL;
} }
if(mLibvlcMedia != NULL) if(mLibvlcMedia != NULL)
{ {
libvlc_media_release(mLibvlcMedia); libvlc_media_release(mLibvlcMedia);
mLibvlcMedia = NULL; mLibvlcMedia = NULL;
} }
if(mLibvlcInstance != NULL) if(mLibvlcInstance != NULL)
{ {
libvlc_release(mLibvlcInstance); libvlc_release(mLibvlcInstance);
mLibvlcInstance = NULL; mLibvlcInstance = NULL;
} }
if (mOptArgV != NULL) if (mOptArgV != NULL)
{ {
delete[] mOptArgV; delete[] mOptArgV;
} }
} }
void LibvlcCamera::Initialise() void LibvlcCamera::Initialise()
@ -129,105 +129,105 @@ void LibvlcCamera::Initialise()
void LibvlcCamera::Terminate() void LibvlcCamera::Terminate()
{ {
libvlc_media_player_stop(mLibvlcMediaPlayer); libvlc_media_player_stop(mLibvlcMediaPlayer);
if(mLibvlcData.buffer != NULL) if(mLibvlcData.buffer != NULL)
{ {
zm_freealigned(mLibvlcData.buffer); zm_freealigned(mLibvlcData.buffer);
} }
if(mLibvlcData.prevBuffer != NULL) if(mLibvlcData.prevBuffer != NULL)
{ {
zm_freealigned(mLibvlcData.prevBuffer); zm_freealigned(mLibvlcData.prevBuffer);
} }
} }
int LibvlcCamera::PrimeCapture() int LibvlcCamera::PrimeCapture()
{ {
Info("Priming capture from %s", mPath.c_str()); Info("Priming capture from %s", mPath.c_str());
StringVector opVect = split(Options(), ","); StringVector opVect = split(Options(), ",");
// Set transport method as specified by method field, rtpUni is default // Set transport method as specified by method field, rtpUni is default
if ( Method() == "rtpMulti" ) if ( Method() == "rtpMulti" )
opVect.push_back("--rtsp-mcast"); opVect.push_back("--rtsp-mcast");
else if ( Method() == "rtpRtsp" ) else if ( Method() == "rtpRtsp" )
opVect.push_back("--rtsp-tcp"); opVect.push_back("--rtsp-tcp");
else if ( Method() == "rtpRtspHttp" ) else if ( Method() == "rtpRtspHttp" )
opVect.push_back("--rtsp-http"); opVect.push_back("--rtsp-http");
if (opVect.size() > 0) if (opVect.size() > 0)
{ {
mOptArgV = new char*[opVect.size()]; mOptArgV = new char*[opVect.size()];
Debug(2, "Number of Options: %d",opVect.size()); Debug(2, "Number of Options: %d",opVect.size());
for (size_t i=0; i< opVect.size(); i++) { for (size_t i=0; i< opVect.size(); i++) {
opVect[i] = trimSpaces(opVect[i]); opVect[i] = trimSpaces(opVect[i]);
mOptArgV[i] = (char *)opVect[i].c_str(); mOptArgV[i] = (char *)opVect[i].c_str();
Debug(2, "set option %d to '%s'", i, opVect[i].c_str()); Debug(2, "set option %d to '%s'", i, opVect[i].c_str());
}
} }
}
mLibvlcInstance = libvlc_new (opVect.size(), (const char* const*)mOptArgV); mLibvlcInstance = libvlc_new (opVect.size(), (const char* const*)mOptArgV);
if(mLibvlcInstance == NULL) if(mLibvlcInstance == NULL)
Fatal("Unable to create libvlc instance due to: %s", libvlc_errmsg()); Fatal("Unable to create libvlc instance due to: %s", libvlc_errmsg());
mLibvlcMedia = libvlc_media_new_location(mLibvlcInstance, mPath.c_str()); mLibvlcMedia = libvlc_media_new_location(mLibvlcInstance, mPath.c_str());
if(mLibvlcMedia == NULL) if(mLibvlcMedia == NULL)
Fatal("Unable to open input %s due to: %s", mPath.c_str(), libvlc_errmsg()); Fatal("Unable to open input %s due to: %s", mPath.c_str(), libvlc_errmsg());
mLibvlcMediaPlayer = libvlc_media_player_new_from_media(mLibvlcMedia); mLibvlcMediaPlayer = libvlc_media_player_new_from_media(mLibvlcMedia);
if(mLibvlcMediaPlayer == NULL) if(mLibvlcMediaPlayer == NULL)
Fatal("Unable to create player for %s due to: %s", mPath.c_str(), libvlc_errmsg()); Fatal("Unable to create player for %s due to: %s", mPath.c_str(), libvlc_errmsg());
libvlc_video_set_format(mLibvlcMediaPlayer, mTargetChroma.c_str(), width, height, width * mBpp); libvlc_video_set_format(mLibvlcMediaPlayer, mTargetChroma.c_str(), width, height, width * mBpp);
libvlc_video_set_callbacks(mLibvlcMediaPlayer, &LibvlcLockBuffer, &LibvlcUnlockBuffer, NULL, &mLibvlcData); libvlc_video_set_callbacks(mLibvlcMediaPlayer, &LibvlcLockBuffer, &LibvlcUnlockBuffer, NULL, &mLibvlcData);
mLibvlcData.bufferSize = width * height * mBpp; mLibvlcData.bufferSize = width * height * mBpp;
// Libvlc wants 32 byte alignment for images (should in theory do this for all image lines) // Libvlc wants 32 byte alignment for images (should in theory do this for all image lines)
mLibvlcData.buffer = (uint8_t*)zm_mallocaligned(32, mLibvlcData.bufferSize); mLibvlcData.buffer = (uint8_t*)zm_mallocaligned(32, mLibvlcData.bufferSize);
mLibvlcData.prevBuffer = (uint8_t*)zm_mallocaligned(32, mLibvlcData.bufferSize); mLibvlcData.prevBuffer = (uint8_t*)zm_mallocaligned(32, mLibvlcData.bufferSize);
mLibvlcData.newImage.setValueImmediate(false); mLibvlcData.newImage.setValueImmediate(false);
libvlc_media_player_play(mLibvlcMediaPlayer); libvlc_media_player_play(mLibvlcMediaPlayer);
return(0); return(0);
} }
int LibvlcCamera::PreCapture() int LibvlcCamera::PreCapture()
{ {
return(0); return(0);
} }
// Should not return -1 as cancels capture. Always wait for image if available. // Should not return -1 as cancels capture. Always wait for image if available.
int LibvlcCamera::Capture( Image &image ) int LibvlcCamera::Capture( Image &image )
{ {
while(!mLibvlcData.newImage.getValueImmediate()) while(!mLibvlcData.newImage.getValueImmediate())
mLibvlcData.newImage.getUpdatedValue(1); mLibvlcData.newImage.getUpdatedValue(1);
mLibvlcData.mutex.lock(); mLibvlcData.mutex.lock();
image.Assign(width, height, colours, subpixelorder, mLibvlcData.buffer, width * height * mBpp); image.Assign(width, height, colours, subpixelorder, mLibvlcData.buffer, width * height * mBpp);
mLibvlcData.newImage.setValueImmediate(false); mLibvlcData.newImage.setValueImmediate(false);
mLibvlcData.mutex.unlock(); mLibvlcData.mutex.unlock();
return (0); return (0);
} }
// Should not return -1 as cancels capture. Always wait for image if available. // Should not return -1 as cancels capture. Always wait for image if available.
int LibvlcCamera::CaptureAndRecord( Image &image, bool recording, char* event_directory ) int LibvlcCamera::CaptureAndRecord( Image &image, bool recording, char* event_directory )
{ {
while(!mLibvlcData.newImage.getValueImmediate()) while(!mLibvlcData.newImage.getValueImmediate())
mLibvlcData.newImage.getUpdatedValue(1); mLibvlcData.newImage.getUpdatedValue(1);
mLibvlcData.mutex.lock(); mLibvlcData.mutex.lock();
image.Assign(width, height, colours, subpixelorder, mLibvlcData.buffer, width * height * mBpp); image.Assign(width, height, colours, subpixelorder, mLibvlcData.buffer, width * height * mBpp);
mLibvlcData.newImage.setValueImmediate(false); mLibvlcData.newImage.setValueImmediate(false);
mLibvlcData.mutex.unlock(); mLibvlcData.mutex.unlock();
return (0); return (0);
} }
int LibvlcCamera::PostCapture() int LibvlcCamera::PostCapture()
{ {
return(0); return(0);
} }
#endif // HAVE_LIBVLC #endif // HAVE_LIBVLC

View File

@ -33,45 +33,45 @@
// Used by libvlc callbacks // Used by libvlc callbacks
struct LibvlcPrivateData struct LibvlcPrivateData
{ {
uint8_t* buffer; uint8_t* buffer;
uint8_t* prevBuffer; uint8_t* prevBuffer;
time_t prevTime; time_t prevTime;
uint32_t bufferSize; uint32_t bufferSize;
Mutex mutex; Mutex mutex;
ThreadData<bool> newImage; ThreadData<bool> newImage;
}; };
class LibvlcCamera : public Camera class LibvlcCamera : public Camera
{ {
protected: protected:
std::string mPath; std::string mPath;
std::string mMethod; std::string mMethod;
std::string mOptions; std::string mOptions;
char **mOptArgV; char **mOptArgV;
LibvlcPrivateData mLibvlcData; LibvlcPrivateData mLibvlcData;
std::string mTargetChroma; std::string mTargetChroma;
uint8_t mBpp; uint8_t mBpp;
libvlc_instance_t *mLibvlcInstance; libvlc_instance_t *mLibvlcInstance;
libvlc_media_t *mLibvlcMedia; libvlc_media_t *mLibvlcMedia;
libvlc_media_player_t *mLibvlcMediaPlayer; libvlc_media_player_t *mLibvlcMediaPlayer;
public: public:
LibvlcCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ); LibvlcCamera( int p_id, const std::string &path, const std::string &p_method, const std::string &p_options, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
~LibvlcCamera(); ~LibvlcCamera();
const std::string &Path() const { return( mPath ); } const std::string &Path() const { return( mPath ); }
const std::string &Options() const { return( mOptions ); } const std::string &Options() const { return( mOptions ); }
const std::string &Method() const { return( mMethod ); } const std::string &Method() const { return( mMethod ); }
void Initialise(); void Initialise();
void Terminate(); void Terminate();
int PrimeCapture(); int PrimeCapture();
int PreCapture(); int PreCapture();
int Capture( Image &image ); int Capture( Image &image );
int CaptureAndRecord( Image &image, bool recording, char* event_directory ); int CaptureAndRecord( Image &image, bool recording, char* event_directory );
int PostCapture(); int PostCapture();
}; };
#endif // HAVE_LIBVLC #endif // HAVE_LIBVLC

File diff suppressed because it is too large Load Diff

View File

@ -75,90 +75,90 @@ protected:
#endif // ZM_HAS_V4L1 #endif // ZM_HAS_V4L1
protected: protected:
std::string device; std::string device;
int channel; int channel;
int standard; int standard;
int palette; int palette;
bool device_prime; bool device_prime;
bool channel_prime; bool channel_prime;
int channel_index; int channel_index;
unsigned int extras; unsigned int extras;
unsigned int conversion_type; /* 0 = no conversion needed, 1 = use libswscale, 2 = zm internal conversion, 3 = jpeg decoding */ unsigned int conversion_type; /* 0 = no conversion needed, 1 = use libswscale, 2 = zm internal conversion, 3 = jpeg decoding */
convert_fptr_t conversion_fptr; /* Pointer to conversion function used */ convert_fptr_t conversion_fptr; /* Pointer to conversion function used */
uint32_t AutoSelectFormat(int p_colours); uint32_t AutoSelectFormat(int p_colours);
static int camera_count; static int camera_count;
static int channel_count; static int channel_count;
static int channels[VIDEO_MAX_FRAME]; static int channels[VIDEO_MAX_FRAME];
static int standards[VIDEO_MAX_FRAME]; static int standards[VIDEO_MAX_FRAME];
static int vid_fd; static int vid_fd;
static int v4l_version; static int v4l_version;
bool v4l_multi_buffer; bool v4l_multi_buffer;
unsigned int v4l_captures_per_frame; unsigned int v4l_captures_per_frame;
#if ZM_HAS_V4L2 #if ZM_HAS_V4L2
static V4L2Data v4l2_data; static V4L2Data v4l2_data;
#endif // ZM_HAS_V4L2 #endif // ZM_HAS_V4L2
#if ZM_HAS_V4L1 #if ZM_HAS_V4L1
static V4L1Data v4l1_data; static V4L1Data v4l1_data;
#endif // ZM_HAS_V4L1 #endif // ZM_HAS_V4L1
#if HAVE_LIBSWSCALE #if HAVE_LIBSWSCALE
static AVFrame **capturePictures; static AVFrame **capturePictures;
_AVPIXELFORMAT imagePixFormat; _AVPIXELFORMAT imagePixFormat;
_AVPIXELFORMAT capturePixFormat; _AVPIXELFORMAT capturePixFormat;
struct SwsContext *imgConversionContext; struct SwsContext *imgConversionContext;
AVFrame *tmpPicture; AVFrame *tmpPicture;
#endif // HAVE_LIBSWSCALE #endif // HAVE_LIBSWSCALE
static LocalCamera *last_camera; static LocalCamera *last_camera;
public: public:
LocalCamera( LocalCamera(
int p_id, int p_id,
const std::string &device, const std::string &device,
int p_channel, int p_channel,
int p_format, int p_format,
bool v4lmultibuffer, bool v4lmultibuffer,
unsigned int v4lcapturesperframe, unsigned int v4lcapturesperframe,
const std::string &p_method, const std::string &p_method,
int p_width, int p_width,
int p_height, int p_height,
int p_colours, int p_colours,
int p_palette, int p_palette,
int p_brightness, int p_brightness,
int p_contrast, int p_contrast,
int p_hue, int p_hue,
int p_colour, int p_colour,
bool p_capture, bool p_capture,
bool p_record_audio, bool p_record_audio,
unsigned int p_extras = 0); unsigned int p_extras = 0);
~LocalCamera(); ~LocalCamera();
void Initialise(); void Initialise();
void Terminate(); void Terminate();
const std::string &Device() const { return( device ); } const std::string &Device() const { return( device ); }
int Channel() const { return( channel ); } int Channel() const { return( channel ); }
int Standard() const { return( standard ); } int Standard() const { return( standard ); }
int Palette() const { return( palette ); } int Palette() const { return( palette ); }
int Extras() const { return( extras ); } int Extras() const { return( extras ); }
int Brightness( int p_brightness=-1 ); int Brightness( int p_brightness=-1 );
int Hue( int p_hue=-1 ); int Hue( int p_hue=-1 );
int Colour( int p_colour=-1 ); int Colour( int p_colour=-1 );
int Contrast( int p_contrast=-1 ); int Contrast( int p_contrast=-1 );
int PrimeCapture(); int PrimeCapture();
int PreCapture(); int PreCapture();
int Capture( Image &image ); int Capture( Image &image );
int PostCapture(); int PostCapture();
int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);}; int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
static bool GetCurrentSettings( const char *device, char *output, int version, bool verbose ); static bool GetCurrentSettings( const char *device, char *output, int version, bool verbose );
}; };
#endif // ZM_HAS_V4L #endif // ZM_HAS_V4L

View File

@ -504,7 +504,7 @@ Monitor::Monitor(
} }
bool Monitor::connect() { bool Monitor::connect() {
Debug(3, "Connecting to monitor. Purpose is %d", purpose ); Debug(3, "Connecting to monitor. Purpose is %d", purpose );
#if ZM_MEM_MAPPED #if ZM_MEM_MAPPED
snprintf( mem_file, sizeof(mem_file), "%s/zm.mmap.%d", config.path_map, id ); snprintf( mem_file, sizeof(mem_file), "%s/zm.mmap.%d", config.path_map, id );
map_fd = open( mem_file, O_RDWR|O_CREAT, (mode_t)0600 ); map_fd = open( mem_file, O_RDWR|O_CREAT, (mode_t)0600 );
@ -564,7 +564,7 @@ bool Monitor::connect() {
Debug(3,"Aligning shared memory images to the next 16 byte boundary"); Debug(3,"Aligning shared memory images to the next 16 byte boundary");
shared_images = (uint8_t*)((unsigned long)shared_images + (16 - ((unsigned long)shared_images % 16))); shared_images = (uint8_t*)((unsigned long)shared_images + (16 - ((unsigned long)shared_images % 16)));
} }
Debug(3, "Allocating %d image buffers", image_buffer_count ); Debug(3, "Allocating %d image buffers", image_buffer_count );
image_buffer = new Snapshot[image_buffer_count]; image_buffer = new Snapshot[image_buffer_count];
for ( int i = 0; i < image_buffer_count; i++ ) { for ( int i = 0; i < image_buffer_count; i++ ) {
image_buffer[i].timestamp = &(shared_timestamps[i]); image_buffer[i].timestamp = &(shared_timestamps[i]);
@ -1338,7 +1338,7 @@ bool Monitor::Analyse()
if ( shared_data->action ) if ( shared_data->action )
{ {
// Can there be more than 1 bit set in the action? Shouldn't these be elseifs? // Can there be more than 1 bit set in the action? Shouldn't these be elseifs?
if ( shared_data->action & RELOAD ) if ( shared_data->action & RELOAD )
{ {
Info( "Received reload indication at count %d", image_count ); Info( "Received reload indication at count %d", image_count );
@ -1352,7 +1352,7 @@ bool Monitor::Analyse()
Info( "Received suspend indication at count %d", image_count ); Info( "Received suspend indication at count %d", image_count );
shared_data->active = false; shared_data->active = false;
//closeEvent(); //closeEvent();
} else { } else {
Info( "Received suspend indication at count %d, but wasn't active", image_count ); Info( "Received suspend indication at count %d, but wasn't active", image_count );
} }
if ( config.max_suspend_time ) if ( config.max_suspend_time )
@ -1401,7 +1401,7 @@ bool Monitor::Analyse()
{ {
bool signal = shared_data->signal; bool signal = shared_data->signal;
bool signal_change = (signal != last_signal); bool signal_change = (signal != last_signal);
Debug(3, "Motion detection is enabled signal(%d) signal_change(%d)", signal, signal_change); Debug(3, "Motion detection is enabled signal(%d) signal_change(%d)", signal, signal_change);
//Set video recording flag for event start constructor and easy reference in code //Set video recording flag for event start constructor and easy reference in code
// TODO: Use enum instead of the # 2. Makes for easier reading // TODO: Use enum instead of the # 2. Makes for easier reading
@ -1467,9 +1467,9 @@ bool Monitor::Analyse()
// Get new score. // Get new score.
motion_score = DetectMotion( *snap_image, zoneSet ); motion_score = DetectMotion( *snap_image, zoneSet );
Debug( 3, "After motion detection, last_motion_score(%d), new motion score(%d)", last_motion_score, motion_score ); Debug( 3, "After motion detection, last_motion_score(%d), new motion score(%d)", last_motion_score, motion_score );
// Why are we updating the last_motion_score too? // Why are we updating the last_motion_score too?
last_motion_score = motion_score; last_motion_score = motion_score;
} }
//int motion_score = DetectBlack( *snap_image, zoneSet ); //int motion_score = DetectBlack( *snap_image, zoneSet );
if ( motion_score ) if ( motion_score )
@ -1531,27 +1531,27 @@ bool Monitor::Analyse()
//TODO: We shouldn't have to do this every time. Not sure why it clears itself if this isn't here?? //TODO: We shouldn't have to do this every time. Not sure why it clears itself if this isn't here??
snprintf(video_store_data->event_file, sizeof(video_store_data->event_file), "%s", event->getEventFile()); snprintf(video_store_data->event_file, sizeof(video_store_data->event_file), "%s", event->getEventFile());
if ( section_length ) { if ( section_length ) {
int section_mod = timestamp->tv_sec%section_length; int section_mod = timestamp->tv_sec%section_length;
Debug( 3, "Section length (%d) Last Section Mod(%d), new section mod(%d)", section_length, last_section_mod, section_mod ); Debug( 3, "Section length (%d) Last Section Mod(%d), new section mod(%d)", section_length, last_section_mod, section_mod );
if ( section_mod < last_section_mod ) { if ( section_mod < last_section_mod ) {
//if ( state == IDLE || state == TAPE || event_close_mode == CLOSE_TIME ) { //if ( state == IDLE || state == TAPE || event_close_mode == CLOSE_TIME ) {
//if ( state == TAPE ) { //if ( state == TAPE ) {
//shared_data->state = state = IDLE; //shared_data->state = state = IDLE;
//Info( "%s: %03d - Closing event %d, section end", name, image_count, event->Id() ) //Info( "%s: %03d - Closing event %d, section end", name, image_count, event->Id() )
//} else { //} else {
Info( "%s: %03d - Closing event %d, section end forced ", name, image_count, event->Id() ); Info( "%s: %03d - Closing event %d, section end forced ", name, image_count, event->Id() );
//} //}
closeEvent(); closeEvent();
last_section_mod = 0; last_section_mod = 0;
//} else { //} else {
//Debug( 2, "Time to close event, but state (%d) is not IDLE or TAPE and event_close_mode is not CLOSE_TIME (%d)", state, event_close_mode ); //Debug( 2, "Time to close event, but state (%d) is not IDLE or TAPE and event_close_mode is not CLOSE_TIME (%d)", state, event_close_mode );
//} //}
} else { } else {
last_section_mod = section_mod; last_section_mod = section_mod;
} }
} }
} // end if section_length } // end if section_length
if ( !event ) if ( !event )
{ {
@ -4412,14 +4412,14 @@ void Monitor::SingleImageZip( int scale)
fwrite( img_buffer, img_buffer_size, 1, stdout ); fwrite( img_buffer, img_buffer_size, 1, stdout );
} }
unsigned int Monitor::Colours() const { return( camera->Colours() ); } unsigned int Monitor::Colours() const { return( camera->Colours() ); }
unsigned int Monitor::SubpixelOrder() const { return( camera->SubpixelOrder() ); } unsigned int Monitor::SubpixelOrder() const { return( camera->SubpixelOrder() ); }
int Monitor::PrimeCapture() { int Monitor::PrimeCapture() {
return( camera->PrimeCapture() ); return( camera->PrimeCapture() );
} }
int Monitor::PreCapture() { int Monitor::PreCapture() {
return( camera->PreCapture() ); return( camera->PreCapture() );
} }
int Monitor::PostCapture() { int Monitor::PostCapture() {
return( camera->PostCapture() ); return( camera->PostCapture() );
} }
Monitor::Orientation Monitor::getOrientation()const { return orientation; } Monitor::Orientation Monitor::getOrientation()const { return orientation; }

View File

@ -125,11 +125,11 @@ class Monitor
time_t last_write_time; time_t last_write_time;
uint64_t extrapad1; uint64_t extrapad1;
}; };
union { /* +72 */ union { /* +72 */
time_t last_read_time; time_t last_read_time;
uint64_t extrapad2; uint64_t extrapad2;
}; };
uint8_t control_state[256]; /* +80 */ uint8_t control_state[256]; /* +80 */
} SharedData; } SharedData;
@ -150,8 +150,8 @@ class Monitor
/* sizeof(Snapshot) expected to be 16 bytes on 32bit and 32 bytes on 64bit */ /* sizeof(Snapshot) expected to be 16 bytes on 32bit and 32 bytes on 64bit */
struct Snapshot struct Snapshot
{ {
struct timeval *timestamp; struct timeval *timestamp;
Image *image; Image *image;
void* padding; void* padding;
}; };
@ -170,27 +170,27 @@ class Monitor
class MonitorLink { class MonitorLink {
protected: protected:
unsigned int id; unsigned int id;
char name[64]; char name[64];
bool connected; bool connected;
time_t last_connect_time; time_t last_connect_time;
#if ZM_MEM_MAPPED #if ZM_MEM_MAPPED
int map_fd; int map_fd;
char mem_file[PATH_MAX]; char mem_file[PATH_MAX];
#else // ZM_MEM_MAPPED #else // ZM_MEM_MAPPED
int shm_id; int shm_id;
#endif // ZM_MEM_MAPPED #endif // ZM_MEM_MAPPED
off_t mem_size; off_t mem_size;
unsigned char *mem_ptr; unsigned char *mem_ptr;
volatile SharedData *shared_data; volatile SharedData *shared_data;
volatile TriggerData *trigger_data; volatile TriggerData *trigger_data;
volatile VideoStoreData *video_store_data; volatile VideoStoreData *video_store_data;
int last_state; int last_state;
int last_event; int last_event;
public: public:
@ -221,263 +221,263 @@ class Monitor
protected: protected:
// These are read from the DB and thereafter remain unchanged // These are read from the DB and thereafter remain unchanged
unsigned int id; unsigned int id;
char name[64]; char name[64];
unsigned int server_id; // Id of the Server object unsigned int server_id; // Id of the Server object
unsigned int storage_id; // Id of the Storage Object, which currently will just provide a path, but in future may do more. unsigned int storage_id; // Id of the Storage Object, which currently will just provide a path, but in future may do more.
Function function; // What the monitor is doing Function function; // What the monitor is doing
bool enabled; // Whether the monitor is enabled or asleep bool enabled; // Whether the monitor is enabled or asleep
unsigned int width; // Normally the same as the camera, but not if partly rotated unsigned int width; // Normally the same as the camera, but not if partly rotated
unsigned int height; // Normally the same as the camera, but not if partly rotated unsigned int height; // Normally the same as the camera, but not if partly rotated
bool v4l_multi_buffer; bool v4l_multi_buffer;
unsigned int v4l_captures_per_frame; unsigned int v4l_captures_per_frame;
Orientation orientation; // Whether the image has to be rotated at all Orientation orientation; // Whether the image has to be rotated at all
unsigned int deinterlacing; unsigned int deinterlacing;
int savejpegspref; int savejpegspref;
int videowriterpref; int videowriterpref;
std::string encoderparams; std::string encoderparams;
std::vector<EncoderParameter_t> encoderparamsvec; std::vector<EncoderParameter_t> encoderparamsvec;
bool record_audio; // Whether to store the audio that we receive bool record_audio; // Whether to store the audio that we receive
int brightness; // The statically saved brightness of the camera int brightness; // The statically saved brightness of the camera
int contrast; // The statically saved contrast of the camera int contrast; // The statically saved contrast of the camera
int hue; // The statically saved hue of the camera int hue; // The statically saved hue of the camera
int colour; // The statically saved colour of the camera int colour; // The statically saved colour of the camera
char event_prefix[64]; // The prefix applied to event names as they are created char event_prefix[64]; // The prefix applied to event names as they are created
char label_format[64]; // The format of the timestamp on the images char label_format[64]; // The format of the timestamp on the images
Coord label_coord; // The coordinates of the timestamp on the images Coord label_coord; // The coordinates of the timestamp on the images
int label_size; // Size of the timestamp on the images int label_size; // Size of the timestamp on the images
int image_buffer_count; // Size of circular image buffer, at least twice the size of the pre_event_count int image_buffer_count; // Size of circular image buffer, at least twice the size of the pre_event_count
int pre_event_buffer_count; // Size of dedicated circular pre event buffer used when analysis is not performed at capturing framerate, int pre_event_buffer_count; // Size of dedicated circular pre event buffer used when analysis is not performed at capturing framerate,
// value is pre_event_count + alarm_frame_count - 1 // value is pre_event_count + alarm_frame_count - 1
int warmup_count; // How many images to process before looking for events int warmup_count; // How many images to process before looking for events
int pre_event_count; // How many images to hold and prepend to an alarm event int pre_event_count; // How many images to hold and prepend to an alarm event
int post_event_count; // How many unalarmed images must occur before the alarm state is reset int post_event_count; // How many unalarmed images must occur before the alarm state is reset
int stream_replay_buffer; // How many frames to store to support DVR functions, IGNORED from this object, passed directly into zms now int stream_replay_buffer; // How many frames to store to support DVR functions, IGNORED from this object, passed directly into zms now
int section_length; // How long events should last in continuous modes int section_length; // How long events should last in continuous modes
bool adaptive_skip; // Whether to use the newer adaptive algorithm for this monitor bool adaptive_skip; // Whether to use the newer adaptive algorithm for this monitor
int frame_skip; // How many frames to skip in continuous modes int frame_skip; // How many frames to skip in continuous modes
int motion_frame_skip; // How many frames to skip in motion detection int motion_frame_skip; // How many frames to skip in motion detection
double analysis_fps; // Target framerate for video analysis double analysis_fps; // Target framerate for video analysis
unsigned int analysis_update_delay; // How long we wait before updating analysis parameters unsigned int analysis_update_delay; // How long we wait before updating analysis parameters
int capture_delay; // How long we wait between capture frames int capture_delay; // How long we wait between capture frames
int alarm_capture_delay; // How long we wait between capture frames when in alarm state int alarm_capture_delay; // How long we wait between capture frames when in alarm state
int alarm_frame_count; // How many alarm frames are required before an event is triggered int alarm_frame_count; // How many alarm frames are required before an event is triggered
int fps_report_interval; // How many images should be captured/processed between reporting the current FPS int fps_report_interval; // How many images should be captured/processed between reporting the current FPS
int ref_blend_perc; // Percentage of new image going into reference image. int ref_blend_perc; // Percentage of new image going into reference image.
int alarm_ref_blend_perc; // Percentage of new image going into reference image during alarm. int alarm_ref_blend_perc; // Percentage of new image going into reference image during alarm.
bool track_motion; // Whether this monitor tries to track detected motion bool track_motion; // Whether this monitor tries to track detected motion
Rgb signal_check_colour; // The colour that the camera will emit when no video signal detected Rgb signal_check_colour; // The colour that the camera will emit when no video signal detected
bool embed_exif; // Whether to embed Exif data into each image frame or not bool embed_exif; // Whether to embed Exif data into each image frame or not
double fps; double fps;
Image delta_image; Image delta_image;
Image ref_image; Image ref_image;
Image alarm_image; // Used in creating analysis images, will be initialized in Analysis Image alarm_image; // Used in creating analysis images, will be initialized in Analysis
Image write_image; // Used when creating snapshot images Image write_image; // Used when creating snapshot images
Purpose purpose; // What this monitor has been created to do Purpose purpose; // What this monitor has been created to do
int event_count; int event_count;
int image_count; int image_count;
int ready_count; int ready_count;
int first_alarm_count; int first_alarm_count;
int last_alarm_count; int last_alarm_count;
int buffer_count; int buffer_count;
int prealarm_count; int prealarm_count;
State state; State state;
time_t start_time; time_t start_time;
time_t last_fps_time; time_t last_fps_time;
time_t auto_resume_time; time_t auto_resume_time;
unsigned int last_motion_score; unsigned int last_motion_score;
EventCloseMode event_close_mode; EventCloseMode event_close_mode;
#if ZM_MEM_MAPPED #if ZM_MEM_MAPPED
int map_fd; int map_fd;
char mem_file[PATH_MAX]; char mem_file[PATH_MAX];
#else // ZM_MEM_MAPPED #else // ZM_MEM_MAPPED
int shm_id; int shm_id;
#endif // ZM_MEM_MAPPED #endif // ZM_MEM_MAPPED
off_t mem_size; off_t mem_size;
unsigned char *mem_ptr; unsigned char *mem_ptr;
Storage *storage; Storage *storage;
SharedData *shared_data; SharedData *shared_data;
TriggerData *trigger_data; TriggerData *trigger_data;
VideoStoreData *video_store_data; VideoStoreData *video_store_data;
Snapshot *image_buffer; Snapshot *image_buffer;
Snapshot next_buffer; /* Used by four field deinterlacing */ Snapshot next_buffer; /* Used by four field deinterlacing */
Snapshot *pre_event_buffer; Snapshot *pre_event_buffer;
Camera *camera; Camera *camera;
Event *event; Event *event;
int n_zones; int n_zones;
Zone **zones; Zone **zones;
struct timeval **timestamps; struct timeval **timestamps;
Image **images; Image **images;
const unsigned char *privacy_bitmask; const unsigned char *privacy_bitmask;
int n_linked_monitors; int n_linked_monitors;
MonitorLink **linked_monitors; MonitorLink **linked_monitors;
public: public:
Monitor( int p_id ); Monitor( int p_id );
// OurCheckAlarms seems to be unused. Check it on zm_monitor.cpp for more info. // OurCheckAlarms seems to be unused. Check it on zm_monitor.cpp for more info.
//bool OurCheckAlarms( Zone *zone, const Image *pImage ); //bool OurCheckAlarms( Zone *zone, const Image *pImage );
Monitor( Monitor(
int p_id, int p_id,
const char *p_name, const char *p_name,
unsigned int p_server_id, unsigned int p_server_id,
unsigned int p_storage_id, unsigned int p_storage_id,
int p_function, int p_function,
bool p_enabled, bool p_enabled,
const char *p_linked_monitors, const char *p_linked_monitors,
Camera *p_camera, Camera *p_camera,
int p_orientation, int p_orientation,
unsigned int p_deinterlacing, unsigned int p_deinterlacing,
int p_savejpegs, int p_savejpegs,
int p_videowriter, int p_videowriter,
std::string p_encoderparams, std::string p_encoderparams,
bool p_record_audio, bool p_record_audio,
const char *p_event_prefix, const char *p_event_prefix,
const char *p_label_format, const char *p_label_format,
const Coord &p_label_coord, const Coord &p_label_coord,
int label_size, int label_size,
int p_image_buffer_count, int p_image_buffer_count,
int p_warmup_count, int p_warmup_count,
int p_pre_event_count, int p_pre_event_count,
int p_post_event_count, int p_post_event_count,
int p_stream_replay_buffer, int p_stream_replay_buffer,
int p_alarm_frame_count, int p_alarm_frame_count,
int p_section_length, int p_section_length,
int p_frame_skip, int p_frame_skip,
int p_motion_frame_skip, int p_motion_frame_skip,
double p_analysis_fps, double p_analysis_fps,
unsigned int p_analysis_update_delay, unsigned int p_analysis_update_delay,
int p_capture_delay, int p_capture_delay,
int p_alarm_capture_delay, int p_alarm_capture_delay,
int p_fps_report_interval, int p_fps_report_interval,
int p_ref_blend_perc, int p_ref_blend_perc,
int p_alarm_ref_blend_perc, int p_alarm_ref_blend_perc,
bool p_track_motion, bool p_track_motion,
Rgb p_signal_check_colour, Rgb p_signal_check_colour,
bool p_embed_exif, bool p_embed_exif,
Purpose p_purpose, Purpose p_purpose,
int p_n_zones=0, int p_n_zones=0,
Zone *p_zones[]=0 Zone *p_zones[]=0
); );
~Monitor(); ~Monitor();
void AddZones( int p_n_zones, Zone *p_zones[] ); void AddZones( int p_n_zones, Zone *p_zones[] );
void AddPrivacyBitmask( Zone *p_zones[] ); void AddPrivacyBitmask( Zone *p_zones[] );
bool connect(); bool connect();
inline int ShmValid() const { inline int ShmValid() const {
return( shared_data->valid ); return( shared_data->valid );
} }
inline int Id() const { inline int Id() const {
return( id ); return( id );
} }
inline const char *Name() const { inline const char *Name() const {
return( name ); return( name );
} }
inline Storage *getStorage() { inline Storage *getStorage() {
if ( ! storage ) { if ( ! storage ) {
storage = new Storage( storage_id ); storage = new Storage( storage_id );
} }
return( storage ); return( storage );
} }
inline Function GetFunction() const { inline Function GetFunction() const {
return( function ); return( function );
} }
inline bool Enabled() { inline bool Enabled() {
if ( function <= MONITOR ) if ( function <= MONITOR )
return( false ); return( false );
return( enabled ); return( enabled );
} }
inline const char *EventPrefix() const { inline const char *EventPrefix() const {
return( event_prefix ); return( event_prefix );
} }
inline bool Ready() { inline bool Ready() {
if ( function <= MONITOR ) if ( function <= MONITOR )
return( false ); return( false );
return( image_count > ready_count ); return( image_count > ready_count );
} }
inline bool Active() { inline bool Active() {
if ( function <= MONITOR ) if ( function <= MONITOR )
return( false ); return( false );
return( enabled && shared_data->active ); return( enabled && shared_data->active );
} }
inline bool Exif() { inline bool Exif() {
return( embed_exif ); return( embed_exif );
} }
Orientation getOrientation()const; Orientation getOrientation()const;
unsigned int Width() const { return width; } unsigned int Width() const { return width; }
unsigned int Height() const { return height; } unsigned int Height() const { return height; }
unsigned int Colours() const; unsigned int Colours() const;
unsigned int SubpixelOrder() const; unsigned int SubpixelOrder() const;
int GetOptSaveJPEGs() const { return( savejpegspref ); } int GetOptSaveJPEGs() const { return( savejpegspref ); }
int GetOptVideoWriter() const { return( videowriterpref ); } int GetOptVideoWriter() const { return( videowriterpref ); }
const std::vector<EncoderParameter_t>* GetOptEncoderParams() const { return( &encoderparamsvec ); } const std::vector<EncoderParameter_t>* GetOptEncoderParams() const { return( &encoderparamsvec ); }
State GetState() const; State GetState() const;
int GetImage( int index=-1, int scale=100 ); int GetImage( int index=-1, int scale=100 );
struct timeval GetTimestamp( int index=-1 ) const; struct timeval GetTimestamp( int index=-1 ) const;
void UpdateAdaptiveSkip(); void UpdateAdaptiveSkip();
useconds_t GetAnalysisRate(); useconds_t GetAnalysisRate();
unsigned int GetAnalysisUpdateDelay() const { return( analysis_update_delay ); } unsigned int GetAnalysisUpdateDelay() const { return( analysis_update_delay ); }
int GetCaptureDelay() const { return( capture_delay ); } int GetCaptureDelay() const { return( capture_delay ); }
int GetAlarmCaptureDelay() const { return( alarm_capture_delay ); } int GetAlarmCaptureDelay() const { return( alarm_capture_delay ); }
unsigned int GetLastReadIndex() const; unsigned int GetLastReadIndex() const;
unsigned int GetLastWriteIndex() const; unsigned int GetLastWriteIndex() const;
unsigned int GetLastEvent() const; unsigned int GetLastEvent() const;
double GetFPS() const; double GetFPS() const;
void ForceAlarmOn( int force_score, const char *force_case, const char *force_text="" ); void ForceAlarmOn( int force_score, const char *force_case, const char *force_text="" );
void ForceAlarmOff(); void ForceAlarmOff();
void CancelForced(); void CancelForced();
TriggerState GetTriggerState() const { return( (TriggerState)(trigger_data?trigger_data->trigger_state:TRIGGER_CANCEL )); } TriggerState GetTriggerState() const { return( (TriggerState)(trigger_data?trigger_data->trigger_state:TRIGGER_CANCEL )); }
void actionReload(); void actionReload();
void actionEnable(); void actionEnable();
void actionDisable(); void actionDisable();
void actionSuspend(); void actionSuspend();
void actionResume(); void actionResume();
int actionBrightness( int p_brightness=-1 ); int actionBrightness( int p_brightness=-1 );
int actionHue( int p_hue=-1 ); int actionHue( int p_hue=-1 );
int actionColour( int p_colour=-1 ); int actionColour( int p_colour=-1 );
int actionContrast( int p_contrast=-1 ); int actionContrast( int p_contrast=-1 );
int PrimeCapture(); int PrimeCapture();
int PreCapture(); int PreCapture();
int Capture(); int Capture();
int PostCapture(); int PostCapture();
unsigned int DetectMotion( const Image &comp_image, Event::StringSet &zoneSet ); unsigned int DetectMotion( const Image &comp_image, Event::StringSet &zoneSet );
// DetectBlack seems to be unused. Check it on zm_monitor.cpp for more info. // DetectBlack seems to be unused. Check it on zm_monitor.cpp for more info.
//unsigned int DetectBlack( const Image &comp_image, Event::StringSet &zoneSet ); //unsigned int DetectBlack( const Image &comp_image, Event::StringSet &zoneSet );
bool CheckSignal( const Image *image ); bool CheckSignal( const Image *image );
bool Analyse(); bool Analyse();
void DumpImage( Image *dump_image ) const; void DumpImage( Image *dump_image ) const;
void TimestampImage( Image *ts_image, const struct timeval *ts_time ) const; void TimestampImage( Image *ts_image, const struct timeval *ts_time ) const;
bool closeEvent(); bool closeEvent();
void Reload(); void Reload();
void ReloadZones(); void ReloadZones();
void ReloadLinkedMonitors( const char * ); void ReloadLinkedMonitors( const char * );
bool DumpSettings( char *output, bool verbose ); bool DumpSettings( char *output, bool verbose );
void DumpZoneImage( const char *zone_string=0 ); void DumpZoneImage( const char *zone_string=0 );
#if ZM_HAS_V4L #if ZM_HAS_V4L
static int LoadLocalMonitors( const char *device, Monitor **&monitors, Purpose purpose ); static int LoadLocalMonitors( const char *device, Monitor **&monitors, Purpose purpose );
@ -505,9 +505,9 @@ public:
class MonitorStream : public StreamBase { class MonitorStream : public StreamBase {
protected: protected:
typedef struct SwapImage { typedef struct SwapImage {
bool valid; bool valid;
struct timeval timestamp; struct timeval timestamp;
char file_name[PATH_MAX]; char file_name[PATH_MAX];
} SwapImage; } SwapImage;
private: private:

View File

@ -36,60 +36,60 @@
class RemoteCamera : public Camera class RemoteCamera : public Camera
{ {
protected: protected:
std::string protocol; std::string protocol;
std::string host; std::string host;
std::string port; std::string port;
std::string path; std::string path;
std::string auth; std::string auth;
std::string username; std::string username;
std::string password; std::string password;
std::string auth64; std::string auth64;
// Reworked authentication system // Reworked authentication system
// First try without authentication, even if we have a username and password // First try without authentication, even if we have a username and password
// on receiving a 401 response, select authentication method (basic or digest) // on receiving a 401 response, select authentication method (basic or digest)
// fill required fields and set needAuth // fill required fields and set needAuth
// subsequent requests can set the required authentication header. // subsequent requests can set the required authentication header.
bool mNeedAuth; bool mNeedAuth;
zm::Authenticator* mAuthenticator; zm::Authenticator* mAuthenticator;
protected: protected:
struct addrinfo *hp; struct addrinfo *hp;
public: public:
RemoteCamera( RemoteCamera(
int p_id, int p_id,
const std::string &p_proto, const std::string &p_proto,
const std::string &p_host, const std::string &p_host,
const std::string &p_port, const std::string &p_port,
const std::string &p_path, const std::string &p_path,
int p_width, int p_width,
int p_height, int p_height,
int p_colours, int p_colours,
int p_brightness, int p_brightness,
int p_contrast, int p_contrast,
int p_hue, int p_hue,
int p_colour, int p_colour,
bool p_capture, bool p_capture,
bool p_record_audio bool p_record_audio
); );
virtual ~RemoteCamera(); virtual ~RemoteCamera();
const std::string &Protocol() const { return( protocol ); } const std::string &Protocol() const { return( protocol ); }
const std::string &Host() const { return( host ); } const std::string &Host() const { return( host ); }
const std::string &Port() const { return( port ); } const std::string &Port() const { return( port ); }
const std::string &Path() const { return( path ); } const std::string &Path() const { return( path ); }
const std::string &Auth() const { return( auth ); } const std::string &Auth() const { return( auth ); }
const std::string &Username() const { return( username ); } const std::string &Username() const { return( username ); }
const std::string &Password() const { return( password ); } const std::string &Password() const { return( password ); }
virtual void Initialise(); virtual void Initialise();
virtual void Terminate() = 0; virtual void Terminate() = 0;
virtual int Connect() = 0; virtual int Connect() = 0;
virtual int Disconnect() = 0; virtual int Disconnect() = 0;
virtual int PreCapture() = 0; virtual int PreCapture() = 0;
virtual int Capture( Image &image ) = 0; virtual int Capture( Image &image ) = 0;
virtual int PostCapture() = 0; virtual int PostCapture() = 0;
virtual int CaptureAndRecord( Image &image, bool recording, char* event_directory )=0; virtual int CaptureAndRecord( Image &image, bool recording, char* event_directory )=0;
}; };
#endif // ZM_REMOTE_CAMERA_H #endif // ZM_REMOTE_CAMERA_H

File diff suppressed because it is too large Load Diff

View File

@ -33,32 +33,32 @@
class RemoteCameraHttp : public RemoteCamera class RemoteCameraHttp : public RemoteCamera
{ {
protected: protected:
std::string request; std::string request;
struct timeval timeout; struct timeval timeout;
//struct hostent *hp; //struct hostent *hp;
//struct sockaddr_in sa; //struct sockaddr_in sa;
int sd; int sd;
Buffer buffer; Buffer buffer;
enum { SINGLE_IMAGE, MULTI_IMAGE } mode; enum { SINGLE_IMAGE, MULTI_IMAGE } mode;
enum { UNDEF, JPEG, X_RGB, X_RGBZ } format; enum { UNDEF, JPEG, X_RGB, X_RGBZ } format;
enum { HEADER, HEADERCONT, SUBHEADER, SUBHEADERCONT, CONTENT } state; enum { HEADER, HEADERCONT, SUBHEADER, SUBHEADERCONT, CONTENT } state;
enum { SIMPLE, REGEXP } method; enum { SIMPLE, REGEXP } method;
public: public:
RemoteCameraHttp( int p_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ); RemoteCameraHttp( int p_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
~RemoteCameraHttp(); ~RemoteCameraHttp();
void Initialise(); void Initialise();
void Terminate() { Disconnect(); } void Terminate() { Disconnect(); }
int Connect(); int Connect();
int Disconnect(); int Disconnect();
int SendRequest(); int SendRequest();
int ReadData( Buffer &buffer, int bytes_expected=0 ); int ReadData( Buffer &buffer, int bytes_expected=0 );
int GetResponse(); int GetResponse();
int PreCapture(); int PreCapture();
int Capture( Image &image ); int Capture( Image &image );
int PostCapture(); int PostCapture();
int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);}; int CaptureAndRecord( Image &image, bool recording, char* event_directory ) {return(0);};
}; };
#endif // ZM_REMOTE_CAMERA_HTTP_H #endif // ZM_REMOTE_CAMERA_HTTP_H

View File

@ -29,166 +29,166 @@
#include <sys/socket.h> #include <sys/socket.h>
RemoteCameraRtsp::RemoteCameraRtsp( int p_id, const std::string &p_method, const std::string &p_host, const std::string &p_port, const std::string &p_path, int p_width, int p_height, bool p_rtsp_describe, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) : RemoteCameraRtsp::RemoteCameraRtsp( int p_id, const std::string &p_method, const std::string &p_host, const std::string &p_port, const std::string &p_path, int p_width, int p_height, bool p_rtsp_describe, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ) :
RemoteCamera( p_id, "rtsp", p_host, p_port, p_path, p_width, p_height, p_colours, p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ), RemoteCamera( p_id, "rtsp", p_host, p_port, p_path, p_width, p_height, p_colours, p_brightness, p_contrast, p_hue, p_colour, p_capture, p_record_audio ),
rtsp_describe( p_rtsp_describe ), rtsp_describe( p_rtsp_describe ),
rtspThread( 0 ) rtspThread( 0 )
{ {
if ( p_method == "rtpUni" ) if ( p_method == "rtpUni" )
method = RtspThread::RTP_UNICAST; method = RtspThread::RTP_UNICAST;
else if ( p_method == "rtpMulti" ) else if ( p_method == "rtpMulti" )
method = RtspThread::RTP_MULTICAST; method = RtspThread::RTP_MULTICAST;
else if ( p_method == "rtpRtsp" ) else if ( p_method == "rtpRtsp" )
method = RtspThread::RTP_RTSP; method = RtspThread::RTP_RTSP;
else if ( p_method == "rtpRtspHttp" ) else if ( p_method == "rtpRtspHttp" )
method = RtspThread::RTP_RTSP_HTTP; method = RtspThread::RTP_RTSP_HTTP;
else else
Fatal( "Unrecognised method '%s' when creating RTSP camera %d", p_method.c_str(), id ); Fatal( "Unrecognised method '%s' when creating RTSP camera %d", p_method.c_str(), id );
if ( capture ) if ( capture )
{ {
Initialise(); Initialise();
} }
mFormatContext = NULL; mFormatContext = NULL;
mVideoStreamId = -1; mVideoStreamId = -1;
mAudioStreamId = -1; mAudioStreamId = -1;
mCodecContext = NULL; mCodecContext = NULL;
mCodec = NULL; mCodec = NULL;
mRawFrame = NULL; mRawFrame = NULL;
mFrame = NULL; mFrame = NULL;
frameCount = 0; frameCount = 0;
wasRecording = false; wasRecording = false;
startTime=0; startTime=0;
#if HAVE_LIBSWSCALE #if HAVE_LIBSWSCALE
mConvertContext = NULL; mConvertContext = NULL;
#endif #endif
/* Has to be located inside the constructor so other components such as zma will receive correct colours and subpixel order */ /* Has to be located inside the constructor so other components such as zma will receive correct colours and subpixel order */
if(colours == ZM_COLOUR_RGB32) { if(colours == ZM_COLOUR_RGB32) {
subpixelorder = ZM_SUBPIX_ORDER_RGBA; subpixelorder = ZM_SUBPIX_ORDER_RGBA;
imagePixFormat = AV_PIX_FMT_RGBA; imagePixFormat = AV_PIX_FMT_RGBA;
} else if(colours == ZM_COLOUR_RGB24) { } else if(colours == ZM_COLOUR_RGB24) {
subpixelorder = ZM_SUBPIX_ORDER_RGB; subpixelorder = ZM_SUBPIX_ORDER_RGB;
imagePixFormat = AV_PIX_FMT_RGB24; imagePixFormat = AV_PIX_FMT_RGB24;
} else if(colours == ZM_COLOUR_GRAY8) { } else if(colours == ZM_COLOUR_GRAY8) {
subpixelorder = ZM_SUBPIX_ORDER_NONE; subpixelorder = ZM_SUBPIX_ORDER_NONE;
imagePixFormat = AV_PIX_FMT_GRAY8; imagePixFormat = AV_PIX_FMT_GRAY8;
} else { } else {
Panic("Unexpected colours: %d",colours); Panic("Unexpected colours: %d",colours);
} }
} }
RemoteCameraRtsp::~RemoteCameraRtsp() RemoteCameraRtsp::~RemoteCameraRtsp()
{ {
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101) #if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
av_frame_free( &mFrame ); av_frame_free( &mFrame );
av_frame_free( &mRawFrame ); av_frame_free( &mRawFrame );
#else #else
av_freep( &mFrame ); av_freep( &mFrame );
av_freep( &mRawFrame ); av_freep( &mRawFrame );
#endif #endif
#if HAVE_LIBSWSCALE #if HAVE_LIBSWSCALE
if ( mConvertContext ) if ( mConvertContext )
{ {
sws_freeContext( mConvertContext ); sws_freeContext( mConvertContext );
mConvertContext = NULL; mConvertContext = NULL;
} }
#endif #endif
if ( mCodecContext ) if ( mCodecContext )
{ {
avcodec_close( mCodecContext ); avcodec_close( mCodecContext );
mCodecContext = NULL; // Freed by avformat_free_context in the destructor of RtspThread class mCodecContext = NULL; // Freed by avformat_free_context in the destructor of RtspThread class
} }
if ( capture ) if ( capture )
{ {
Terminate(); Terminate();
} }
} }
void RemoteCameraRtsp::Initialise() void RemoteCameraRtsp::Initialise()
{ {
RemoteCamera::Initialise(); RemoteCamera::Initialise();
int max_size = width*height*colours; int max_size = width*height*colours;
// This allocates a buffer able to hold a raw fframe, which is a little artbitrary. Might be nice to get some // This allocates a buffer able to hold a raw fframe, which is a little artbitrary. Might be nice to get some
// decent data on how large a buffer is really needed. I think in ffmpeg there are now some functions to do that. // decent data on how large a buffer is really needed. I think in ffmpeg there are now some functions to do that.
buffer.size( max_size ); buffer.size( max_size );
if ( logDebugging() ) if ( logDebugging() )
av_log_set_level( AV_LOG_DEBUG ); av_log_set_level( AV_LOG_DEBUG );
else else
av_log_set_level( AV_LOG_QUIET ); av_log_set_level( AV_LOG_QUIET );
av_register_all(); av_register_all();
Connect(); Connect();
} }
void RemoteCameraRtsp::Terminate() void RemoteCameraRtsp::Terminate()
{ {
Disconnect(); Disconnect();
} }
int RemoteCameraRtsp::Connect() int RemoteCameraRtsp::Connect()
{ {
rtspThread = new RtspThread( id, method, protocol, host, port, path, auth, rtsp_describe ); rtspThread = new RtspThread( id, method, protocol, host, port, path, auth, rtsp_describe );
rtspThread->start(); rtspThread->start();
return( 0 ); return( 0 );
} }
int RemoteCameraRtsp::Disconnect() int RemoteCameraRtsp::Disconnect()
{ {
if ( rtspThread ) if ( rtspThread )
{ {
rtspThread->stop(); rtspThread->stop();
rtspThread->join(); rtspThread->join();
delete rtspThread; delete rtspThread;
rtspThread = 0; rtspThread = 0;
} }
return( 0 ); return( 0 );
} }
int RemoteCameraRtsp::PrimeCapture() int RemoteCameraRtsp::PrimeCapture()
{ {
Debug( 2, "Waiting for sources" ); Debug( 2, "Waiting for sources" );
for ( int i = 0; i < 100 && !rtspThread->hasSources(); i++ ) for ( int i = 0; i < 100 && !rtspThread->hasSources(); i++ )
{ {
usleep( 100000 ); usleep( 100000 );
} }
if ( !rtspThread->hasSources() ) if ( !rtspThread->hasSources() )
Fatal( "No RTSP sources" ); Fatal( "No RTSP sources" );
Debug( 2, "Got sources" ); Debug( 2, "Got sources" );
mFormatContext = rtspThread->getFormatContext(); mFormatContext = rtspThread->getFormatContext();
// Find first video stream present // Find first video stream present
mVideoStreamId = -1; mVideoStreamId = -1;
mAudioStreamId = -1; mAudioStreamId = -1;
// Find the first video stream. // Find the first video stream.
for ( unsigned int i = 0; i < mFormatContext->nb_streams; i++ ) { for ( unsigned int i = 0; i < mFormatContext->nb_streams; i++ ) {
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0)) #if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO ) if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else #else
if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO ) if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif #endif
{ {
if ( mVideoStreamId == -1 ) { if ( mVideoStreamId == -1 ) {
mVideoStreamId = i; mVideoStreamId = i;
continue; continue;
} else { } else {
Debug(2, "Have another video stream." ); Debug(2, "Have another video stream." );
} }
} }
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0)) #if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO ) if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
#else #else
@ -203,39 +203,39 @@ int RemoteCameraRtsp::PrimeCapture()
} }
} }
if ( mVideoStreamId == -1 ) if ( mVideoStreamId == -1 )
Fatal( "Unable to locate video stream" ); Fatal( "Unable to locate video stream" );
if ( mAudioStreamId == -1 ) if ( mAudioStreamId == -1 )
Debug( 3, "Unable to locate audio stream" ); Debug( 3, "Unable to locate audio stream" );
// Get a pointer to the codec context for the video stream // Get a pointer to the codec context for the video stream
mCodecContext = mFormatContext->streams[mVideoStreamId]->codec; mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;
// Find the decoder for the video stream // Find the decoder for the video stream
mCodec = avcodec_find_decoder( mCodecContext->codec_id ); mCodec = avcodec_find_decoder( mCodecContext->codec_id );
if ( mCodec == NULL ) if ( mCodec == NULL )
Panic( "Unable to locate codec %d decoder", mCodecContext->codec_id ); Panic( "Unable to locate codec %d decoder", mCodecContext->codec_id );
// Open codec // Open codec
#if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0) #if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0)
if ( avcodec_open( mCodecContext, mCodec ) < 0 ) if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else #else
if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 ) if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif #endif
Panic( "Can't open codec" ); Panic( "Can't open codec" );
// Allocate space for the native video frame // Allocate space for the native video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101) #if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
mRawFrame = av_frame_alloc(); mRawFrame = av_frame_alloc();
#else #else
mRawFrame = avcodec_alloc_frame(); mRawFrame = avcodec_alloc_frame();
#endif #endif
// Allocate space for the converted video frame // Allocate space for the converted video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101) #if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
mFrame = av_frame_alloc(); mFrame = av_frame_alloc();
#else #else
mFrame = avcodec_alloc_frame(); mFrame = avcodec_alloc_frame();
#endif #endif
if(mRawFrame == NULL || mFrame == NULL) if(mRawFrame == NULL || mFrame == NULL)
@ -247,152 +247,152 @@ int RemoteCameraRtsp::PrimeCapture()
int pSize = avpicture_get_size( imagePixFormat, width, height ); int pSize = avpicture_get_size( imagePixFormat, width, height );
#endif #endif
if( (unsigned int)pSize != imagesize) { if( (unsigned int)pSize != imagesize) {
Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize); Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
} }
/* /*
#if HAVE_LIBSWSCALE #if HAVE_LIBSWSCALE
if(!sws_isSupportedInput(mCodecContext->pix_fmt)) { if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff)); Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
} }
if(!sws_isSupportedOutput(imagePixFormat)) { if(!sws_isSupportedOutput(imagePixFormat)) {
Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff)); Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
} }
#else // HAVE_LIBSWSCALE #else // HAVE_LIBSWSCALE
Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" ); Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE #endif // HAVE_LIBSWSCALE
*/ */
return( 0 ); return( 0 );
} }
int RemoteCameraRtsp::PreCapture() { int RemoteCameraRtsp::PreCapture() {
if ( !rtspThread->isRunning() ) if ( !rtspThread->isRunning() )
return( -1 ); return( -1 );
if ( !rtspThread->hasSources() ) if ( !rtspThread->hasSources() )
{ {
Error( "Cannot precapture, no RTP sources" ); Error( "Cannot precapture, no RTP sources" );
return( -1 ); return( -1 );
} }
return( 0 ); return( 0 );
} }
int RemoteCameraRtsp::Capture( Image &image ) { int RemoteCameraRtsp::Capture( Image &image ) {
AVPacket packet; AVPacket packet;
uint8_t* directbuffer; uint8_t* directbuffer;
int frameComplete = false; int frameComplete = false;
/* Request a writeable buffer of the target image */ /* Request a writeable buffer of the target image */
directbuffer = image.WriteBuffer(width, height, colours, subpixelorder); directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
if(directbuffer == NULL) { if(directbuffer == NULL) {
Error("Failed requesting writeable buffer for the captured image."); Error("Failed requesting writeable buffer for the captured image.");
return (-1); return (-1);
} }
while ( true ) { while ( true ) {
buffer.clear(); buffer.clear();
if ( !rtspThread->isRunning() ) if ( !rtspThread->isRunning() )
return (-1); return (-1);
if ( rtspThread->getFrame( buffer ) ) { if ( rtspThread->getFrame( buffer ) ) {
Debug( 3, "Read frame %d bytes", buffer.size() ); Debug( 3, "Read frame %d bytes", buffer.size() );
Debug( 4, "Address %p", buffer.head() ); Debug( 4, "Address %p", buffer.head() );
Hexdump( 4, buffer.head(), 16 ); Hexdump( 4, buffer.head(), 16 );
if ( !buffer.size() ) if ( !buffer.size() )
return( -1 ); return( -1 );
if(mCodecContext->codec_id == AV_CODEC_ID_H264) { if(mCodecContext->codec_id == AV_CODEC_ID_H264) {
// SPS and PPS frames should be saved and appended to IDR frames // SPS and PPS frames should be saved and appended to IDR frames
int nalType = (buffer.head()[3] & 0x1f); int nalType = (buffer.head()[3] & 0x1f);
// SPS The SPS NAL unit contains parameters that apply to a series of consecutive coded video pictures // SPS The SPS NAL unit contains parameters that apply to a series of consecutive coded video pictures
if(nalType == 7) if(nalType == 7)
{ {
lastSps = buffer; lastSps = buffer;
continue; continue;
} }
// PPS The PPS NAL unit contains parameters that apply to the decoding of one or more individual pictures inside a coded video sequence // PPS The PPS NAL unit contains parameters that apply to the decoding of one or more individual pictures inside a coded video sequence
else if(nalType == 8) else if(nalType == 8)
{ {
lastPps = buffer; lastPps = buffer;
continue; continue;
} }
// IDR // IDR
else if(nalType == 5) else if(nalType == 5)
{ {
buffer += lastSps; buffer += lastSps;
buffer += lastPps; buffer += lastPps;
} }
} else { } else {
Debug(3, "Not an h264 packet"); Debug(3, "Not an h264 packet");
} }
av_init_packet( &packet ); av_init_packet( &packet );
while ( !frameComplete && buffer.size() > 0 ) { while ( !frameComplete && buffer.size() > 0 ) {
packet.data = buffer.head(); packet.data = buffer.head();
packet.size = buffer.size(); packet.size = buffer.size();
// So I think this is the magic decode step. Result is a raw image? // So I think this is the magic decode step. Result is a raw image?
#if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0) #if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet ); int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet );
#else #else
int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size ); int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size );
#endif #endif
if ( len < 0 ) { if ( len < 0 ) {
Error( "Error while decoding frame %d", frameCount ); Error( "Error while decoding frame %d", frameCount );
Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() ); Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() );
buffer.clear(); buffer.clear();
continue; continue;
} }
Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() ); Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() );
//if ( buffer.size() < 400 ) //if ( buffer.size() < 400 )
//Hexdump( 0, buffer.head(), buffer.size() ); //Hexdump( 0, buffer.head(), buffer.size() );
buffer -= len; buffer -= len;
} }
// At this point, we either have a frame or ran out of buffer. What happens if we run out of buffer? // At this point, we either have a frame or ran out of buffer. What happens if we run out of buffer?
if ( frameComplete ) { if ( frameComplete ) {
Debug( 3, "Got frame %d", frameCount ); Debug( 3, "Got frame %d", frameCount );
avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height ); avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height );
#if HAVE_LIBSWSCALE #if HAVE_LIBSWSCALE
if(mConvertContext == NULL) { if(mConvertContext == NULL) {
mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL ); mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
if(mConvertContext == NULL) if(mConvertContext == NULL)
Fatal( "Unable to create conversion context"); Fatal( "Unable to create conversion context");
} }
if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 ) if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount ); Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE #else // HAVE_LIBSWSCALE
Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" ); Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE #endif // HAVE_LIBSWSCALE
frameCount++; frameCount++;
} /* frame complete */ } /* frame complete */
#if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100) #if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100)
av_packet_unref( &packet ); av_packet_unref( &packet );
#else #else
av_free_packet( &packet ); av_free_packet( &packet );
#endif #endif
} /* getFrame() */ } /* getFrame() */
if(frameComplete) if(frameComplete)
return (0); return (0);
} // end while true } // end while true
// can never get here. // can never get here.
return (0) ; return (0) ;
} }
//int RemoteCameraRtsp::ReadData(void *opaque, uint8_t *buf, int bufSize) { //int RemoteCameraRtsp::ReadData(void *opaque, uint8_t *buf, int bufSize) {
@ -408,84 +408,84 @@ int RemoteCameraRtsp::Capture( Image &image ) {
//Function to handle capture and store //Function to handle capture and store
int RemoteCameraRtsp::CaptureAndRecord( Image &image, bool recording, char* event_file ) { int RemoteCameraRtsp::CaptureAndRecord( Image &image, bool recording, char* event_file ) {
AVPacket packet; AVPacket packet;
uint8_t* directbuffer; uint8_t* directbuffer;
int frameComplete = false; int frameComplete = false;
/* Request a writeable buffer of the target image */ /* Request a writeable buffer of the target image */
directbuffer = image.WriteBuffer(width, height, colours, subpixelorder); directbuffer = image.WriteBuffer(width, height, colours, subpixelorder);
if(directbuffer == NULL) { if(directbuffer == NULL) {
Error("Failed requesting writeable buffer for the captured image."); Error("Failed requesting writeable buffer for the captured image.");
return (-1); return (-1);
} }
while ( true ) { while ( true ) {
buffer.clear(); buffer.clear();
if ( !rtspThread->isRunning() ) if ( !rtspThread->isRunning() )
return (-1); return (-1);
if ( rtspThread->getFrame( buffer ) ) { if ( rtspThread->getFrame( buffer ) ) {
Debug( 3, "Read frame %d bytes", buffer.size() ); Debug( 3, "Read frame %d bytes", buffer.size() );
Debug( 4, "Address %p", buffer.head() ); Debug( 4, "Address %p", buffer.head() );
Hexdump( 4, buffer.head(), 16 ); Hexdump( 4, buffer.head(), 16 );
if ( !buffer.size() ) if ( !buffer.size() )
return( -1 ); return( -1 );
if(mCodecContext->codec_id == AV_CODEC_ID_H264) { if(mCodecContext->codec_id == AV_CODEC_ID_H264) {
// SPS and PPS frames should be saved and appended to IDR frames // SPS and PPS frames should be saved and appended to IDR frames
int nalType = (buffer.head()[3] & 0x1f); int nalType = (buffer.head()[3] & 0x1f);
// SPS // SPS
if(nalType == 7) { if(nalType == 7) {
lastSps = buffer; lastSps = buffer;
continue; continue;
} }
// PPS // PPS
else if(nalType == 8) { else if(nalType == 8) {
lastPps = buffer; lastPps = buffer;
continue; continue;
} }
// IDR // IDR
else if(nalType == 5) { else if(nalType == 5) {
buffer += lastSps; buffer += lastSps;
buffer += lastPps; buffer += lastPps;
} }
} // end if H264, what about other codecs? } // end if H264, what about other codecs?
av_init_packet( &packet ); av_init_packet( &packet );
// Why are we checking for it being the video stream? Because it might be audio or something else. // Why are we checking for it being the video stream? Because it might be audio or something else.
// Um... we just initialized packet... we can't be testing for what it is yet.... // Um... we just initialized packet... we can't be testing for what it is yet....
if ( packet.stream_index == mVideoStreamId ) { if ( packet.stream_index == mVideoStreamId ) {
while ( !frameComplete && buffer.size() > 0 ) { while ( !frameComplete && buffer.size() > 0 ) {
packet.data = buffer.head(); packet.data = buffer.head();
packet.size = buffer.size(); packet.size = buffer.size();
// So this does the decode // So this does the decode
#if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0) #if LIBAVCODEC_VERSION_CHECK(52, 23, 0, 23, 0)
int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet ); int len = avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet );
#else #else
int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size ); int len = avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size );
#endif #endif
if ( len < 0 ) { if ( len < 0 ) {
Error( "Error while decoding frame %d", frameCount ); Error( "Error while decoding frame %d", frameCount );
Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() ); Hexdump( Logger::ERROR, buffer.head(), buffer.size()>256?256:buffer.size() );
buffer.clear(); buffer.clear();
continue; continue;
} }
Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() ); Debug( 2, "Frame: %d - %d/%d", frameCount, len, buffer.size() );
//if ( buffer.size() < 400 ) //if ( buffer.size() < 400 )
//Hexdump( 0, buffer.head(), buffer.size() ); //Hexdump( 0, buffer.head(), buffer.size() );
buffer -= len; buffer -= len;
} // end while get & decode a frame } // end while get & decode a frame
if ( frameComplete ) { if ( frameComplete ) {
Debug( 3, "Got frame %d", frameCount ); Debug( 3, "Got frame %d", frameCount );
#if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0) #if LIBAVUTIL_VERSION_CHECK(54, 6, 0, 6, 0)
av_image_fill_arrays(mFrame->data, mFrame->linesize, av_image_fill_arrays(mFrame->data, mFrame->linesize,
@ -496,98 +496,98 @@ int RemoteCameraRtsp::CaptureAndRecord( Image &image, bool recording, char* even
#endif #endif
//Video recording //Video recording
if ( recording && !wasRecording ) { if ( recording && !wasRecording ) {
//Instantiate the video storage module //Instantiate the video storage module
videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime, this->getMonitor()->getOrientation() ); videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime, this->getMonitor()->getOrientation() );
wasRecording = true; wasRecording = true;
strcpy(oldDirectory, event_file); strcpy(oldDirectory, event_file);
} else if ( !recording && wasRecording && videoStore ) { } else if ( !recording && wasRecording && videoStore ) {
// Why are we deleting the videostore? Becase for soem reason we are no longer recording? How does that happen? // Why are we deleting the videostore? Becase for soem reason we are no longer recording? How does that happen?
Info("Deleting videoStore instance"); Info("Deleting videoStore instance");
delete videoStore; delete videoStore;
videoStore = NULL; videoStore = NULL;
} }
//The directory we are recording to is no longer tied to the current event. Need to re-init the videostore with the correct directory and start recording again //The directory we are recording to is no longer tied to the current event. Need to re-init the videostore with the correct directory and start recording again
if ( recording && wasRecording && (strcmp(oldDirectory, event_file)!=0) && (packet.flags & AV_PKT_FLAG_KEY) ) { if ( recording && wasRecording && (strcmp(oldDirectory, event_file)!=0) && (packet.flags & AV_PKT_FLAG_KEY) ) {
//don't open new videostore until we're on a key frame..would this require an offset adjustment for the event as a result?...if we store our key frame location with the event will that be enough? //don't open new videostore until we're on a key frame..would this require an offset adjustment for the event as a result?...if we store our key frame location with the event will that be enough?
Info("Re-starting video storage module"); Info("Re-starting video storage module");
if ( videoStore ) { if ( videoStore ) {
delete videoStore; delete videoStore;
videoStore = NULL; videoStore = NULL;
} }
videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime, this->getMonitor()->getOrientation() ); videoStore = new VideoStore((const char *)event_file, "mp4", mFormatContext->streams[mVideoStreamId],mAudioStreamId==-1?NULL:mFormatContext->streams[mAudioStreamId],startTime, this->getMonitor()->getOrientation() );
strcpy( oldDirectory, event_file ); strcpy( oldDirectory, event_file );
} }
if ( videoStore && recording ) { if ( videoStore && recording ) {
//Write the packet to our video store //Write the packet to our video store
int ret = videoStore->writeVideoFramePacket(&packet, mFormatContext->streams[mVideoStreamId]);//, &lastKeyframePkt); int ret = videoStore->writeVideoFramePacket(&packet, mFormatContext->streams[mVideoStreamId]);//, &lastKeyframePkt);
if ( ret < 0 ) {//Less than zero and we skipped a frame if ( ret < 0 ) {//Less than zero and we skipped a frame
av_free_packet( &packet ); av_free_packet( &packet );
return 0; return 0;
} }
} }
#if HAVE_LIBSWSCALE #if HAVE_LIBSWSCALE
// Why are we re-scaling after writing out the packet? // Why are we re-scaling after writing out the packet?
if(mConvertContext == NULL) { if(mConvertContext == NULL) {
mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL ); mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL );
if(mConvertContext == NULL) if(mConvertContext == NULL)
Fatal( "Unable to create conversion context"); Fatal( "Unable to create conversion context");
} }
if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 ) if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 )
Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount ); Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount );
#else // HAVE_LIBSWSCALE #else // HAVE_LIBSWSCALE
Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" ); Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE #endif // HAVE_LIBSWSCALE
frameCount++; frameCount++;
} /* frame complete */ } /* frame complete */
} else if ( packet.stream_index == mAudioStreamId ) { } else if ( packet.stream_index == mAudioStreamId ) {
Debug( 4, "Got audio packet" ); Debug( 4, "Got audio packet" );
if ( videoStore && recording ) { if ( videoStore && recording ) {
if ( record_audio ) { if ( record_audio ) {
Debug( 4, "Storing Audio packet" ); Debug( 4, "Storing Audio packet" );
//Write the packet to our video store //Write the packet to our video store
int ret = videoStore->writeAudioFramePacket(&packet, mFormatContext->streams[packet.stream_index]); //FIXME no relevance of last key frame int ret = videoStore->writeAudioFramePacket(&packet, mFormatContext->streams[packet.stream_index]); //FIXME no relevance of last key frame
if ( ret < 0 ) { //Less than zero and we skipped a frame if ( ret < 0 ) { //Less than zero and we skipped a frame
#if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100) #if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100)
av_packet_unref( &packet ); av_packet_unref( &packet );
#else #else
av_free_packet( &packet ); av_free_packet( &packet );
#endif #endif
return 0; return 0;
} }
} else { } else {
Debug( 4, "Not storing audio" ); Debug( 4, "Not storing audio" );
} }
} }
} // end if video or audio packet } // end if video or audio packet
#if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100) #if LIBAVCODEC_VERSION_CHECK(57, 8, 0, 12, 100)
av_packet_unref( &packet ); av_packet_unref( &packet );
#else #else
av_free_packet( &packet ); av_free_packet( &packet );
#endif #endif
} /* getFrame() */ } /* getFrame() */
if(frameComplete) if(frameComplete)
return (0); return (0);
} // end while true } // end while true
// can never get here. // can never get here.
return (0) ; return (0) ;
} // int RemoteCameraRtsp::CaptureAndRecord( Image &image, bool recording, char* event_file ) } // int RemoteCameraRtsp::CaptureAndRecord( Image &image, bool recording, char* event_file )
int RemoteCameraRtsp::PostCapture() int RemoteCameraRtsp::PostCapture()
{ {
return( 0 ); return( 0 );
} }
#endif // HAVE_LIBAVFORMAT #endif // HAVE_LIBAVFORMAT

View File

@ -36,56 +36,56 @@
class RemoteCameraRtsp : public RemoteCamera class RemoteCameraRtsp : public RemoteCamera
{ {
protected: protected:
struct sockaddr_in rtsp_sa; struct sockaddr_in rtsp_sa;
struct sockaddr_in rtcp_sa; struct sockaddr_in rtcp_sa;
int rtsp_sd; int rtsp_sd;
int rtp_sd; int rtp_sd;
int rtcp_sd; int rtcp_sd;
bool rtsp_describe; bool rtsp_describe;
Buffer buffer; Buffer buffer;
Buffer lastSps; Buffer lastSps;
Buffer lastPps; Buffer lastPps;
RtspThread::RtspMethod method; RtspThread::RtspMethod method;
RtspThread *rtspThread; RtspThread *rtspThread;
int frameCount; int frameCount;
#if HAVE_LIBAVFORMAT #if HAVE_LIBAVFORMAT
AVFormatContext *mFormatContext; AVFormatContext *mFormatContext;
int mVideoStreamId; int mVideoStreamId;
int mAudioStreamId; int mAudioStreamId;
AVCodecContext *mCodecContext; AVCodecContext *mCodecContext;
AVCodec *mCodec; AVCodec *mCodec;
AVFrame *mRawFrame; AVFrame *mRawFrame;
AVFrame *mFrame; AVFrame *mFrame;
_AVPIXELFORMAT imagePixFormat; _AVPIXELFORMAT imagePixFormat;
#endif // HAVE_LIBAVFORMAT #endif // HAVE_LIBAVFORMAT
bool wasRecording; bool wasRecording;
VideoStore *videoStore; VideoStore *videoStore;
char oldDirectory[4096]; char oldDirectory[4096];
int64_t startTime; int64_t startTime;
#if HAVE_LIBSWSCALE #if HAVE_LIBSWSCALE
struct SwsContext *mConvertContext; struct SwsContext *mConvertContext;
#endif #endif
public: public:
RemoteCameraRtsp( int p_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, bool p_rtsp_describe, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio ); RemoteCameraRtsp( int p_id, const std::string &method, const std::string &host, const std::string &port, const std::string &path, int p_width, int p_height, bool p_rtsp_describe, int p_colours, int p_brightness, int p_contrast, int p_hue, int p_colour, bool p_capture, bool p_record_audio );
~RemoteCameraRtsp(); ~RemoteCameraRtsp();
void Initialise(); void Initialise();
void Terminate(); void Terminate();
int Connect(); int Connect();
int Disconnect(); int Disconnect();
int PrimeCapture(); int PrimeCapture();
int PreCapture(); int PreCapture();
int Capture( Image &image ); int Capture( Image &image );
int PostCapture(); int PostCapture();
int CaptureAndRecord( Image &image, bool recording, char* event_directory ); int CaptureAndRecord( Image &image, bool recording, char* event_directory );
}; };
#endif // ZM_REMOTE_CAMERA_RTSP_H #endif // ZM_REMOTE_CAMERA_RTSP_H

View File

@ -82,8 +82,11 @@ xhtmlHeaders(__FILE__, translate('Frame')." - ".$event['Id']." - ".$frame->Frame
<?php if ( in_array($event['VideoWriter'],array("1","2")) ) { ?> <?php if ( in_array($event['VideoWriter'],array("1","2")) ) { ?>
<img src="?view=image&eid=<?php echo $event['Id'] ?>&fid=<?php echo $frame->FrameId() ?>&scale=<?php echo $event['DefaultScale'] ?>" class="<?php echo $imageData['imageClass'] ?>" width="<?php echo reScale( $event['Width'], $event['DefaultScale'], $scale ) ?>" height="<?php echo reScale( $event['Height'], $event['DefaultScale'], $scale ) ?>" alt="<?php echo $frame->EventId()."-".$frame->FrameId() ?>"> <img src="?view=image&eid=<?php echo $event['Id'] ?>&fid=<?php echo $frame->FrameId() ?>&scale=<?php echo $event['DefaultScale'] ?>" class="<?php echo $imageData['imageClass'] ?>" width="<?php echo reScale( $event['Width'], $event['DefaultScale'], $scale ) ?>" height="<?php echo reScale( $event['Height'], $event['DefaultScale'], $scale ) ?>" alt="<?php echo $frame->EventId()."-".$frame->FrameId() ?>">
<?php } else { <?php } else {
if ( $imageData['hasAnalImage'] ) { ?><a href="?view=frame&amp;eid=<?php echo $event['Id'] ?>&amp;fid=<?php echo $frame->FrameId() ?>&amp;scale=<?php echo $scale ?>&amp;show=<?php echo $imageData['isAnalImage']?"capt":"anal" ?>"><?php } ?> if ( $imageData['hasAnalImage'] ) { ?>
<img src="<?php echo $frame->getImageSrc() ?>" width="<?php echo reScale( $event['Width'], $event['DefaultScale'], $scale ) ?>" height="<?php echo reScale( $event['Height'], $event['DefaultScale'], $scale ) ?>" alt="<?php echo $frame->EventId()."-".$frame->FrameId() ?>" class="<?php echo $imageData['imageClass'] ?>"/><?php if ( $imageData['hasAnalImage'] ) { ?></a><?php } ?> <a href="?view=frame&amp;eid=<?php echo $event['Id'] ?>&amp;fid=<?php echo $frame->FrameId() ?>&amp;scale=<?php echo $scale ?>&amp;show=<?php echo $imageData['isAnalImage']?"capt":"anal" ?>">
<?php } ?>
<img src="<?php echo $frame->getImageSrc() ?>" width="<?php echo reScale( $event['Width'], $event['DefaultScale'], $scale ) ?>" height="<?php echo reScale( $event['Height'], $event['DefaultScale'], $scale ) ?>" alt="<?php echo $frame->EventId()."-".$frame->FrameId() ?>" class="<?php echo $imageData['imageClass'] ?>"/>
<?php if ( $imageData['hasAnalImage'] ) { ?></a><?php } ?>
<?php } ?> <?php } ?>
</p> </p>
<p id="controls"> <p id="controls">