esp32cam摄像+上位机opencv人脸识别

准备工作

esp32cam带底板,开发平台vscode+platformio,摄像头为ov2640,需要esp32连接热点建立服务器,python上位机访问网址,读取图像后opencv识别人脸.

ESP32代码

基于esp32cam例程修改,首先打开PIO home新建esp32cam工程,选择安信可开发板

修改例程,分辨率图像质量来找个合适的帧率,jpeg输出比rgb565帧率高

#include "esp_camera.h"
#include <WiFi.h>
#include<stdio.h>
#include<WiFiClient.h>
#define CAMERA_MODEL_AI_THINKER // Has PSRAM
#include "camera_pins.h"
#include "app_httpd.cpp"
void startCameraServer();
void setupLedFlash(int pin);
bool flag=false;
WiFiClient client;
void setup() {
  Serial.begin(115200);
  Serial.setDebugOutput(true);
  camera_config_t config;
  config.ledc_channel = LEDC_CHANNEL_0;
  config.ledc_timer = LEDC_TIMER_0;
  config.pin_d0 = Y2_GPIO_NUM;
  config.pin_d1 = Y3_GPIO_NUM;
  config.pin_d2 = Y4_GPIO_NUM;
  config.pin_d3 = Y5_GPIO_NUM;
  config.pin_d4 = Y6_GPIO_NUM;
  config.pin_d5 = Y7_GPIO_NUM;
  config.pin_d6 = Y8_GPIO_NUM;
  config.pin_d7 = Y9_GPIO_NUM;
  config.pin_xclk = XCLK_GPIO_NUM;
  config.pin_pclk = PCLK_GPIO_NUM;
  config.pin_vsync = VSYNC_GPIO_NUM;
  config.pin_href = HREF_GPIO_NUM;
  config.pin_sccb_sda = SIOD_GPIO_NUM;
  config.pin_sccb_scl = SIOC_GPIO_NUM;
  config.pin_pwdn = PWDN_GPIO_NUM;
  config.pin_reset = RESET_GPIO_NUM;
  config.xclk_freq_hz = 20000000;
  config.frame_size = FRAMESIZE_SVGA;   // 2560x1440;
  config.pixel_format = PIXFORMAT_JPEG; // for streaming
  config.grab_mode = CAMERA_GRAB_WHEN_EMPTY;
  config.fb_location = CAMERA_FB_IN_PSRAM;
  config.jpeg_quality = 10;
  config.fb_count = 1;

  if(config.pixel_format == PIXFORMAT_JPEG){
    if(psramFound()){
      config.jpeg_quality = 10;
      config.fb_count = 2;
      config.grab_mode = CAMERA_GRAB_LATEST;
    } 
  } 

  esp_err_t err = esp_camera_init(&config);
  if (err != ESP_OK) {
    Serial.printf("Camera init failed with error 0x%x", err);
    return;
  }
  sensor_t * s = esp_camera_sensor_get();
  // initial sensors are flipped vertically and colors are a bit saturated
  if (s->id.PID == OV2640_PID) {
    s->set_vflip(s, 0); // flip it back
    s->set_brightness(s, 1); // up the brightness just a bit
    s->set_saturation(s, 1); // lower the saturation
  }
// Setup LED FLash if LED pin is defined in camera_pins.h
#if defined(LED_GPIO_NUM)
  setupLedFlash(LED_GPIO_NUM);
#endif
  //改为STA模式
  WiFi.begin("****", "******");
  Serial.println("**** BEGIN CONNECTING");
  WiFi.setSleep(false);
  int count1=0;
  while (WiFi.status() != WL_CONNECTED&&count1<5) {
    delay(500);
    Serial.print(".");
    count1++;
  }
  if(WiFi.localIP().toString()=="0.0.0.0"){//连接失败换到另一个wifi
    Serial.println("\n16303 CONNECT FALL TRY ****");
    WiFi.disconnect();
    WiFi.begin("******", "******");
    Serial.println("\n**** BEGIN CONNECT");
    WiFi.setSleep(false);
    count1=0;
    while (WiFi.status() != WL_CONNECTED&&count1<5) {
    delay(500);
    Serial.print(".");
    count1++;
  }
  }
  if(WiFi.status() == WL_CONNECTED)连接成功启动服务器
    Serial.println("WiFi connected");

  startCameraServer();
  Serial.print("访问'http://");
  Serial.print(WiFi.localIP());
  Serial.println(":81/stream' to connect");//视频流地址

  Serial.print("主机IP:");
  Serial.println(WiFi.localIP());
  
}


void loop() {
 
}

随后进入web服务器函数文件,修改函数,我只对分辨率,闪光灯亮度做了请求回应

其中视频流绑定在81端口,控制选项在80端口

通过ip:flash?var=led_intensity&val=?和size?var=framesize&val=?控制


void enable_led(bool en)
{ // Turn LED On or Off
    int duty = en ? led_duty : 0;
    if (en && isStreaming && (led_duty > CONFIG_LED_MAX_INTENSITY))
    {
        duty = CONFIG_LED_MAX_INTENSITY;
    }
    ledcWrite(LED_LEDC_CHANNEL, duty);
    //ledc_set_duty(CONFIG_LED_LEDC_SPEED_MODE, CONFIG_LED_LEDC_CHANNEL, duty);
    //ledc_update_duty(CONFIG_LED_LEDC_SPEED_MODE, CONFIG_LED_LEDC_CHANNEL);
    log_i("Set LED intensity to %d", duty);
}
#endif

static esp_err_t flash_handler(httpd_req_t *req)
{
     char *buf = NULL;
    char variable[32];
    char value[32];
    if (parse_get(req, &buf) != ESP_OK) {
        return ESP_FAIL;
    }
    if (httpd_query_key_value(buf, "var", variable, sizeof(variable)) != ESP_OK ||
        httpd_query_key_value(buf, "val", value, sizeof(value)) != ESP_OK) {
        free(buf);
        httpd_resp_send_404(req);
        return ESP_FAIL;
    }
    free(buf);
    int val = atoi(value);
    if(val>255)
    val=255;
    else if(val<0)
    val=0;
    log_i("%s = %d", variable, val);
    if (!strcmp(variable, "led_intensity")) {
        led_duty = val;
        if (isStreaming)
            enable_led(true);
    }
    itoa(val,value,10);
    return httpd_resp_send(req,value,strlen(value));
}

static esp_err_t size_handler(httpd_req_t *req)
{
    char *buf = NULL;
    char op[32];
    char value[32];
    if (parse_get(req, &buf) != ESP_OK) {
        return ESP_FAIL;
    }
    if (httpd_query_key_value(buf, "var", op, sizeof(op)) != ESP_OK||
        httpd_query_key_value(buf, "val", value, sizeof(value)) != ESP_OK) {
        free(buf);
        httpd_resp_send_404(req);
        return ESP_FAIL;
    }
    free(buf);
    int val = atoi(value);
    sensor_t *s = esp_camera_sensor_get();
    int res = 0;
    if(!strcmp(op, "framesize")){
    res = s->set_framesize(s, (framesize_t)val);
    return httpd_resp_send(req, value, strlen(value));
    }
    return httpd_resp_send(req,NULL, 0);

}
static esp_err_t stream_handler(httpd_req_t *req)
{   
    camera_fb_t *fb = NULL;
    struct timeval _timestamp;
    esp_err_t res = ESP_OK;
    size_t _jpg_buf_len = 0;
    uint8_t *_jpg_buf = NULL;
    char *part_buf[128];

    res = httpd_resp_set_type(req, _STREAM_CONTENT_TYPE);
    if (res != ESP_OK)
    {
        return res;
    }

    httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
    httpd_resp_set_hdr(req, "X-Framerate", "60");

#if CONFIG_LED_ILLUMINATOR_ENABLED
    isStreaming = true;
    enable_led(true);
#endif

    while (true)
    {
        fb = esp_camera_fb_get();
        count++;
        if (!fb)
        {
            log_e("Camera capture failed");
            res = ESP_FAIL;
        }
        else
        {
            _timestamp.tv_sec = fb->timestamp.tv_sec;
            _timestamp.tv_usec = fb->timestamp.tv_usec;
                if (fb->format != PIXFORMAT_JPEG)
                {
                    bool jpeg_converted = frame2jpg(fb, 80, &_jpg_buf, &_jpg_buf_len);
                    esp_camera_fb_return(fb);
                    fb = NULL;
                    if (!jpeg_converted)
                    {
                        log_e("JPEG compression failed");
                        res = ESP_FAIL;
                    }
                }
                else
                {
                    _jpg_buf_len = fb->len;
                    _jpg_buf = fb->buf;
                }

        }
        if (res == ESP_OK)
        {
            res = httpd_resp_send_chunk(req, _STREAM_BOUNDARY, strlen(_STREAM_BOUNDARY));
        }
        if (res == ESP_OK)
        {
            size_t hlen = snprintf((char *)part_buf, 128, _STREAM_PART, _jpg_buf_len, _timestamp.tv_sec, _timestamp.tv_usec);
            res = httpd_resp_send_chunk(req, (const char *)part_buf, hlen);
        }
        if (res == ESP_OK)
        {
            res = httpd_resp_send_chunk(req, (const char *)_jpg_buf, _jpg_buf_len);
        }
        if (fb)
        {
            esp_camera_fb_return(fb);
            fb = NULL;
            _jpg_buf = NULL;
        }
        else if (_jpg_buf)
        {
            free(_jpg_buf);
            _jpg_buf = NULL;
        }
        if (res != ESP_OK)
        {
            log_e("Send frame failed");
            break;
        }
    }
#if CONFIG_LED_ILLUMINATOR_ENABLED
    isStreaming = false;
    enable_led(false);
#endif
    return res;
}


void startCameraServer()
{   
    httpd_config_t config = HTTPD_DEFAULT_CONFIG();
    config.max_uri_handlers = 16;
    httpd_uri_t stream_uri = {
        .uri = "/stream",
        .method = HTTP_GET,
        .handler = stream_handler,
        .user_ctx = NULL
#ifdef CONFIG_HTTPD_WS_SUPPORT
        ,
        .is_websocket = true,
        .handle_ws_control_frames = false,
        .supported_subprotocol = NULL
#endif

    };
    httpd_uri_t flash_uri = {
        .uri = "/flash",
        .method = HTTP_GET,
        .handler = flash_handler,
        .user_ctx = NULL
#ifdef CONFIG_HTTPD_WS_SUPPORT
        ,
        .is_websocket = true,
        .handle_ws_control_frames = false,
        .supported_subprotocol = NULL
#endif
    };
    httpd_uri_t size_uri = {
        .uri = "/size",
        .method = HTTP_GET,
        .handler = size_handler,
        .user_ctx = NULL
#ifdef CONFIG_HTTPD_WS_SUPPORT
        ,
        .is_websocket = true,
        .handle_ws_control_frames = false,
        .supported_subprotocol = NULL
#endif

    };
    ra_filter_init(&ra_filter, 20);

#if CONFIG_ESP_FACE_RECOGNITION_ENABLED
    recognizer.set_partition(ESP_PARTITION_TYPE_DATA, ESP_PARTITION_SUBTYPE_ANY, "fr");

    // load ids from flash partition
    recognizer.set_ids_from_flash();
#endif
    if (httpd_start(&camera_httpd, &config) == ESP_OK)
    {
        httpd_register_uri_handler(camera_httpd, &flash_uri);
        httpd_register_uri_handler(camera_httpd, &size_uri);
    }

    config.server_port += 1;
    config.ctrl_port += 1;
    log_i("Starting stream server on port: '%d'", config.server_port);
    if (httpd_start(&stream_httpd, &config) == ESP_OK)
    {
        httpd_register_uri_handler(stream_httpd, &stream_uri);
  
    }
        
}

void setupLedFlash(int pin) 
{
    #if CONFIG_LED_ILLUMINATOR_ENABLED
    ledcSetup(LED_LEDC_CHANNEL, 5000, 8);
    ledcAttachPin(pin, LED_LEDC_CHANNEL);
    #else
    log_i("LED flash is disabled -> CONFIG_LED_ILLUMINATOR_ENABLED = 0");
    #endif
}

烧录进esp32,通过设置的wifi连接并访问视频流,此ip为局域网ip,只能在同一个网络中访问

esp32代码就到这里了

Python代码

import cv2
import numpy as np
import requests
url = 'http://******/stream'
cap = cv2.VideoCapture(url)
if not cap.isOpened():
    print("Failed to open video stream!")
    exit()
#初始化人脸识别器(默认的人脸haar级联)
prototxt_path = "used/deploy.prototxt.txt"
model_path = "used/res10_300x300_ssd_iter_140000.caffemodel"
model = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)
flash=0
frame=9
requests.get("http://192.168.31.111/flash?var=led_intensity&val={}".format(flash))
# 设置视频编码器和输出文件
def face_select(image): 
    h, w = image.shape[:2]
    blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300))
    model.setInput(blob)
    output = np.squeeze(model.forward())
    for i in range(0, output.shape[0]):
        confidence = output[i, 2]
        if confidence > 0.9:
            box = output[i, 3:7] * np.array([w, h, w, h])
            start_x, start_y, end_x, end_y = box.astype(np.int64)
            cv2.rectangle(image, (start_x, start_y), (end_x, end_y), color=(255, 0, 0), thickness=2)
            
            cv2.putText(image, f"{confidence*100:.2f}%", (start_x, start_y-5), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
            cv2.putText(image, f"{confidence*100:.2f}%", (start_x, start_y-5), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
    cv2.putText(image, f"flash:{flash:d}", (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
    cv2.putText(image, f"frame:{frame:d}", (5, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)

    cv2.imshow('image',image)

while True:
    _, image = cap.read()
    face_select(image)
    key=chr(cv2.waitKey(1) & 0xFF)
    # 按下 'q' 键退出循环
    if key == 'q':
        cv2.destroyAllWindows()
        break
    if key == 'w':
        flash+=10
        res=requests.get("http://*******/flash?var=led_intensity&val={}".format(flash))
    if key == 's':
        flash-=10
        res=requests.get("http://*******/flash?var=led_intensity&val={}".format(flash))
    if key in "123456789":
        frame=int(key)
        res=requests.get("http://*******/size?var=framesize&val={}".format(frame))
        frame=int(res.text)

        
    
cap.release()

通过pip安装opencv库以及支持包,使用cv2获取网页视频流

当url为网址时获取视频流,为0时调用电脑摄像头

cap = cv2.VideoCapture(url)

随后加载人脸识别模型,使用训练好的模型,人脸识别相关可以参考利用OpenCV DNN模块进行深度学习:一个最好使用指导书_open cv dnn-CSDN博客

随后在output中获取到置信度,人脸位置

通过链接得知output数据结构为:

[[[[0.00000000e+00 1.00000000e+00 9.72869813e-01 2.06566155e-02 1.11088693e-01 2.40461200e-01 7.53399074e-01]]]]
索引1包含有所有类别标签,从1 到80.
索引2 包含有置信得分。虽然并不是概率,也代表了模型对于物体所属种类的置信度。
最后四个数值,前两个是x,y,表示物体置框位置坐标,后两个代表框的宽和高度。

随后便能绘制出人脸框,并标注可信度

 cv2.rectangle(image, (start_x, start_y), (end_x, end_y), color=(255, 0, 0), thickness=2)
            
            cv2.putText(image, f"{confidence*100:.2f}%", (start_x, start_y-5), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)

最后我们在左上角显示闪光灯和分辨率

Python代码到此为止

运行效果

opencv

代码打包

链接: https://pan.baidu.com/s/1mwNEbQs75UopD8wCHDrLmQ?pwd=7vjf 提取码: 7vjf 复制这段内容后打开百度网盘手机App,操作更方便哦

  • 28
    点赞
  • 20
    收藏
    觉得还不错? 一键收藏
  • 5
    评论
实现数字识别的整个过程可以分为以下几步: 1. 使用ESP32-CAM采集图像并通过WiFi将图像传输到计算机。 2. 在计算机上使用Python和OpenCV对图像进行处理,包括二值化、轮廓检测、字符分割等。 3. 对每个字符进行数字识别,可以使用深度学习算法(如卷积神经网络)或传统机器学习算法(如支持向量机)。 4. 将识别结果返回给ESP32-CAM,可以通过串口或WiFi等方式将结果传输回ESP32-CAM。 下面是一个简单的示例代码,演示如何使用ESP32-CAM、Python和OpenCV实现数字识别: ```python import cv2 import numpy as np import requests import json # ESP32-CAM的IP地址和端口号 ip = '192.168.1.100' port = '80' # 发送HTTP请求获取图像 url = 'http://' + ip + ':' + port + '/capture' response = requests.get(url) img_array = np.array(bytearray(response.content), dtype=np.uint8) img = cv2.imdecode(img_array, cv2.IMREAD_COLOR) # 将图像转换为灰度图像 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 对图像进行二值化处理 ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) # 对图像进行膨胀操作,使字符区域更加连通 kernel = np.ones((3, 3), np.uint8) dilation = cv2.dilate(thresh, kernel, iterations=1) # 查找图像中的轮廓 contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # 对每个轮廓进行字符分割和数字识别 digits = [] for cnt in contours: x, y, w, h = cv2.boundingRect(cnt) if w > 10 and h > 10: roi = thresh[y:y+h, x:x+w] roi = cv2.resize(roi, (28, 28)) roi = roi.reshape((1, 28, 28, 1)).astype('float32') / 255.0 # 发送HTTP请求进行数字识别 data = json.dumps({'inputs': roi.tolist()}) headers = {'content-type': 'application/json'} url = 'http://' + ip + ':' + port + '/predict' response = requests.post(url, data=data, headers=headers) result = json.loads(response.text)['outputs'] digit = np.argmax(result) digits.append(digit) # 将识别结果返回给ESP32-CAM data = json.dumps({'digits': digits}) headers = {'content-type': 'application/json'} url = 'http://' + ip + ':' + port + '/result' response = requests.post(url, data=data, headers=headers) ``` 在这个示例中,我们通过发送HTTP请求获取ESP32-CAM采集的图像,并在计算机上使用OpenCV对图像进行处理。我们首先将图像转换为灰度图像,然后对图像进行二值化处理,使字符区域变为黑色,背景变为白色。接着对图像进行膨胀操作,使字符区域更加连通。然后查找图像中的轮廓,对每个轮廓进行字符分割和数字识别。我们使用Keras框架训练了一个卷积神经网络模型,用于数字识别。最后将识别结果通过HTTP请求返回给ESP32-CAM。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值