一,人脸识别问题记录:
face_cascade.detectMultiScale报错empty。原来是忘记load xml对比数据集了。
二,将来优化的空间
arm开发板BB-Black的优化
1. 视频采集延迟,不顺畅。
需要修改方案。目前是采集yuyv后压缩成jpeg然后传给客户端。客户端decode解析后再识别人脸,然后显示到窗口。
2. 开发板只能采集320*240的分辨率。
需要修改驱动的buffer。
3. 将来功能要做成能识别出摄像头中是否有人。有人则抓拍,当做白天的小偷监控。
目前只是识别出人脸。而且侧面是识别不了的。只能正面识别。
win+linxu PC将来优化空间
1. client端640*480也还有buffer 65535限制的问题,需要优化。---这部分可以和linuxPC先优化。
注:准备10月份之后再继续玩视频采集
三,有图有真相
人脸监控.png
代码中imwrite的抓拍效果
pic2.jpg
运行中的BB-Black开发板
BB-Black开发板连接摄像头.png
四,arm server代码
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
using namespace std;
#define MAXLINE 4096
using namespace cv;
string int2string(int value)
{
stringstream ss;
ss<
return ss.str();
}
int main(int argc, char** argv)
{
int listenfd, connfd;
struct sockaddr_in servaddr;
if( (listenfd = socket(AF_INET, SOCK_STREAM, 0)) == -1 )
{
printf("create socket error: %s(errno: %d)\n",strerror(errno),errno);
exit(0);
}
memset(&servaddr, 0, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
servaddr.sin_port = htons(8080);
int opt = 1;
if(setsockopt(listenfd,SOL_SOCKET,SO_REUSEADDR,(const void *)&opt,sizeof(opt)))
{
perror("setsockopt");
return -1;
}
if( bind(listenfd, (struct sockaddr*)&servaddr, sizeof(servaddr)) == -1){
printf("bind socket error: %s(errno: %d)\n",strerror(errno),errno);
exit(0);
}
if( listen(listenfd, 10) == -1){
printf("listen socket error: %s(errno: %d)\n",strerror(errno),errno);
exit(0);
}
/* ---main task process--- */
Mat frame;
//--- INITIALIZE VIDEOCAPTURE
VideoCapture cap;
vector inImage;
// open the default camera using default API
cap.open(0);
cap.set(CV_CAP_PROP_FRAME_HEIGHT,240);
cap.set(CV_CAP_PROP_FRAME_WIDTH,320);
if (!cap.isOpened())
{
cerr << "ERROR! Unable to open camera\n";
return -1;
}
printf("open camera success\n");
cap.read(frame);
printf("======waiting for client's apple request======\n");
if( (connfd = accept(listenfd, (struct sockaddr*)NULL, NULL)) == -1){
printf("accept socket error: %s(errno: %d)",strerror(errno),errno);
exit(0);
}
char cok[1]={0x55};
char cokstart[1]={0};
int sizelen=0;
int sizejpg=0;
for (;;)
{
// wait for a new frame from camera and store it into 'frame'
cap.read(frame);
// check if we succeeded
if (frame.empty()) {
cerr << "ERROR! blank frame grabbed\n";
break;
}
if (cok[0]==0x55){
cok[0]=0;
//printf("read one frame!\n");
imencode(".jpg",frame,inImage);
int datalen=inImage.size();
unsigned char *msgImage=new unsigned char[datalen];
unsigned char msgLen[4];
msgLen[0]=datalen >> 24;
msgLen[1]=datalen >> 16;
msgLen[2]=datalen >> 8;
msgLen[3]=datalen;
//printf("datalen=%x\n",datalen);
sizelen=send(connfd,msgLen,4,0);
//printf("sizelen:%x",sizelen);
for(int i=0;i
{
msgImage[i]=inImage[i];
//cout<
}
recv(connfd,cokstart,1,0);
if(cokstart[0] == 0x33)
{
cokstart[0]=0x0;
//vectorvec;
//Mat img_decode;
//string filename="";
//cokstart[0]=0x0;
//for(int i=0;i
//{
// vec.push_back(msgImage[i]);
//}
//img_decode =imdecode(vec,CV_LOAD_IMAGE_COLOR);
//j++;
//imshow("serpic",img_decode);
//usleep(1000);
//filename="sevpic"+int2string(j)+".jpg";
//imwrite(filename,img_decode);
sizejpg=send(connfd,msgImage,datalen,0);
//printf("sizejpg:%x",sizejpg);
usleep(10000);
recv(connfd,cok,1,0);
}
}
}
close(listenfd);
return 0;
}
五,VS2017 client代码
// client.cpp: 定义控制台应用程序的入口点。
//
#include
#include
#include
#include
#include
#include
#include
#include
#include
#pragma comment(lib,"ws2_32.lib")
void detectAndDisplay(cv::Mat frame);
cv::CascadeClassifier face_cascade;
cv::CascadeClassifier eyes_cascade;
/* tansfer int to string */
std::string int2string(int value)
{
std::stringstream ss;
ss << value;
return ss.str();
}
int main(int argc, char** argv)
{
SOCKADDR_IN servaddr;
WORD wVersionRequested;
WSADATA wsaData;
int err;
/** Global variables */
std::string face_cascade_name = "haarcascade_frontalface_alt2.xml";
//导入级联分类器xml文件,并作文件是否存在的判断
if (!face_cascade.load(face_cascade_name))
{
printf("--(!)Error loading face data\n");
return -1;
}
/** Global variables */
std::string eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
//导入级联分类器xml文件,并作文件是否存在的判断
if (!eyes_cascade.load(eyes_cascade_name))
{
printf("--(!)Error loading eyes data\n");
return -1;
}
wVersionRequested = MAKEWORD(1, 1);
err = WSAStartup(wVersionRequested, &wsaData);
if (err != 0) {
return 0;
}
if (LOBYTE(wsaData.wVersion) != 1 ||
HIBYTE(wsaData.wVersion) != 1) {
WSACleanup();
return 0;
}
SOCKET sockfd = socket(AF_INET, SOCK_STREAM, 0);
memset(&servaddr, 0, sizeof(servaddr));
//servaddr.sin_addr.S_un.S_addr = inet_pton(AF_INET,"192.168.7.4",&servaddr.sin_addr);//服务器端的地址
//servaddr.sin_addr.S_un.S_addr = inet_pton(AF_INET, "127.0.0.1", &servaddr.sin_addr);//服务器端的地址
servaddr.sin_addr.S_un.S_addr = inet_addr("192.168.7.2");
servaddr.sin_family = AF_INET;
servaddr.sin_port = htons(8080);
connect(sockfd, (SOCKADDR*)&servaddr, sizeof(SOCKADDR));
#define BUF_SIZE 65535
char buffer[BUF_SIZE];
std::vector vec;
cv::Mat img_decode;
//std::string filename = "";
int size = 0;
int mylen = 0;
int j = 0;
int getpic = 0;
char cokstart[1] = { 0x33 };
memset(buffer, 0, 4);
while (1)
{
size = recv(sockfd, buffer, 4, 0);
mylen = ((buffer[2] << 8)&(0xFF00)) | ((buffer[3])&(0xFF));
if (mylen>1)
{
getpic = 1; //设置标志位,如第一次server没有连接上后续则不执行
}
if (mylen>0) {
/*received length then send ack signal cokstart.*/
send(sockfd, cokstart, 1, 0);
}
/* receive one frame of jpg data */
while (mylen>0)
{
size = recv(sockfd, buffer, mylen, 0);
printf("size is %d\n",size);
//printf("mylen is %x\n",mylen);
/* put char values to vector */
for (int i = 0; i < mylen; i++)
{
vec.push_back(buffer[i]);
}
if (mylen > size)
{
mylen = mylen - size;
}
else
{
mylen = 0;
}
}
if (getpic==1)
{
/* decode jpg data */
img_decode = cv::imdecode(vec, CV_LOAD_IMAGE_COLOR);
/* release vector then ready to receive next frame */
vec.clear();
//cout << "vector capacity:"<
std::vector(vec).swap(vec);
//cout << "vector capacity afterswap:"<
//-- 3. Apply the classifier to the frame
detectAndDisplay(img_decode);
/* display the jpg in windows */
//cv::namedWindow("detect_pic", cv::WINDOW_AUTOSIZE);
//if (!img_decode.empty()) {
// imshow("detect_pic", img_decode);
//}
/* reflash display window in every 33ms */
cvWaitKey(33);
/* save to file -- this function is optional */
//j++;
//filename = "pic" + int2string(j) + ".jpg";
//imwrite(filename, img_decode);
/* send ack signal cok to tell that I'm ready to handler next frame */
char cok[1] = { 0x55 };
send(sockfd, cok, 1, 0);
}
}
}
void detectAndDisplay(cv::Mat frame)
{
std::vector<:rect> faces;
cv::Mat frame_gray;
std::string filename = "";
int k=0;
cvtColor(frame, frame_gray, cv::COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
//-- Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | cv::CASCADE_SCALE_IMAGE, cv::Size(20, 20));
printf("the size of face %d",faces.size());
for (size_t i = 0; i < faces.size(); i++)
{
cv::Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
ellipse(frame, center, cv::Size(faces[i].width / 2, faces[i].height / 2), 0, 0, 360, cv::Scalar(255, 0, 255), 4, 8, 0);
cv::Mat faceROI = frame_gray(faces[i]);
std::vector<:rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | cv::CASCADE_SCALE_IMAGE, cv::Size(30, 30));
for (size_t j = 0; j < eyes.size(); j++)
{
cv::Point eye_center(faces[i].x + eyes[j].x + eyes[j].width / 2, faces[i].y + eyes[j].y + eyes[j].height / 2);
int radius = cvRound((eyes[j].width + eyes[j].height)*0.25);
circle(frame, eye_center, radius, cv::Scalar(255, 0, 0), 4, 8, 0);
k++;
filename = "pic" + int2string(k) + ".jpg";
cv::imwrite(filename, frame);
}
}
/* save to file -- this function is optional */
//-- Show what you got
cv::namedWindow("detect_pic", cv::WINDOW_AUTOSIZE);
if (!frame.empty()) {
imshow("detect_pic", frame);
}
}