基于ESP32CAM的水质评估

为了获取廉价的视频流来做处理,我在前几天自己做了一个可以进行水质评估的系统。用的就是ESP32CAM来作为一个视频流获取的工具。

具体的水质评估,我参考的:

(9条消息) 基于水色图像的水质评价_十三吖的博客-CSDN博客_基于水质图像的水质分析icon-default.png?t=M4ADhttps://blog.csdn.net/qq_40006058/article/details/80572257?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522165331123216781667840316%2522%252C%2522scm%2522%253A%252220140713.130102334.pc%255Fblog.%2522%257D&request_id=165331123216781667840316&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2~blog~first_rank_ecpm_v1~rank_v31_ecpm-1-80572257-null-null.nonecase&utm_term=%E6%B0%B4%E8%89%B2%E5%9B%BE%E7%89%87&spm=1018.2226.3001.4450那么既然要用到模型训练,我想到了两种方式,第一个是opencv.js的使用。但是我的js水平,实在是不敢恭维。所以我想到的另一个方式,就是opencv直接读取视频流后,逐帧截取图片,然后套到模型里进行评估。

这是我组装的一个较为简单的图传系统。将ESP32CAM放在水面上方,同时由于担心亮度问题,我添加了一个,智能的LED灯板作为照亮水面的工具。

ESP32CAM上传视频流的程序代码如下:

#define APP_CPU 1
#define PRO_CPU 0

#include "OV2640.h"
#include <WiFi.h>
#include <WebServer.h>
#include <WiFiClient.h>

#include <esp_bt.h>
#include <esp_wifi.h>
#include <esp_sleep.h>
#include <driver/rtc_io.h>


#define CAMERA_MODEL_AI_THINKER

#include "camera_pins.h"

#include "home_wifi_multi.h"

OV2640 cam;

WebServer server(80);

// ===== rtos task handles =========================
// Streaming is implemented with 3 tasks:
TaskHandle_t tMjpeg;   // handles client connections to the webserver
TaskHandle_t tCam;     // handles getting picture frames from the camera and storing them locally
TaskHandle_t tStream;  // actually streaming frames to all connected clients

// frameSync semaphore is used to prevent streaming buffer as it is replaced with the next frame
SemaphoreHandle_t frameSync = NULL;

// Queue stores currently connected clients to whom we are streaming
QueueHandle_t streamingClients;

// We will try to achieve 25 FPS frame rate
const int FPS = 7;

// We will handle web client requests every 50 ms (20 Hz)
const int WSINTERVAL = 100;


// ======== Server Connection Handler Task ==========================
void mjpegCB(void* pvParameters) {
  TickType_t xLastWakeTime;
  const TickType_t xFrequency = pdMS_TO_TICKS(WSINTERVAL);

  // Creating frame synchronization semaphore and initializing it
  frameSync = xSemaphoreCreateBinary();
  xSemaphoreGive( frameSync );

  // Creating a queue to track all connected clients
  streamingClients = xQueueCreate( 10, sizeof(WiFiClient*) );

  //=== setup section  ==================

  //  Creating RTOS task for grabbing frames from the camera
  xTaskCreatePinnedToCore(
    camCB,        // callback
    "cam",        // name
    4096,         // stacj size
    NULL,         // parameters
    2,            // priority
    &tCam,        // RTOS task handle
    APP_CPU);     // core

  //  Creating task to push the stream to all connected clients
  xTaskCreatePinnedToCore(
    streamCB,
    "strmCB",
    4 * 1024,
    NULL, //(void*) handler,
    2,
    &tStream,
    APP_CPU);

  //  Registering webserver handling routines
  server.on("/mjpeg/1", HTTP_GET, handleJPGSstream);
  server.on("/jpg", HTTP_GET, handleJPG);
  server.onNotFound(handleNotFound);

  //  Starting webserver
  server.begin();

  //=== loop() section  ===================
  xLastWakeTime = xTaskGetTickCount();
  for (;;) {
    server.handleClient();

    //  After every server client handling request, we let other tasks run and then pause
    taskYIELD();
    vTaskDelayUntil(&xLastWakeTime, xFrequency);
  }
}


// Commonly used variables:
volatile size_t camSize;    // size of the current frame, byte
volatile char* camBuf;      // pointer to the current frame


// ==== RTOS task to grab frames from the camera =========================
void camCB(void* pvParameters) {

  TickType_t xLastWakeTime;

  //  A running interval associated with currently desired frame rate
  const TickType_t xFrequency = pdMS_TO_TICKS(1000 / FPS);

  // Mutex for the critical section of swithing the active frames around
  portMUX_TYPE xSemaphore = portMUX_INITIALIZER_UNLOCKED;

  //  Pointers to the 2 frames, their respective sizes and index of the current frame
  char* fbs[2] = { NULL, NULL };
  size_t fSize[2] = { 0, 0 };
  int ifb = 0;

  //=== loop() section  ===================
  xLastWakeTime = xTaskGetTickCount();

  for (;;) {

    //  Grab a frame from the camera and query its size
    cam.run();
    size_t s = cam.getSize();

    //  If frame size is more that we have previously allocated - request  125% of the current frame space
    if (s > fSize[ifb]) {
      fSize[ifb] = s * 4 / 3;
      fbs[ifb] = allocateMemory(fbs[ifb], fSize[ifb]);
    }

    //  Copy current frame into local buffer
    char* b = (char*) cam.getfb();
    memcpy(fbs[ifb], b, s);

    //  Let other tasks run and wait until the end of the current frame rate interval (if any time left)
    taskYIELD();
    vTaskDelayUntil(&xLastWakeTime, xFrequency);

    //  Only switch frames around if no frame is currently being streamed to a client
    //  Wait on a semaphore until client operation completes
    xSemaphoreTake( frameSync, portMAX_DELAY );

    //  Do not allow interrupts while switching the current frame
    portENTER_CRITICAL(&xSemaphore);
    camBuf = fbs[ifb];
    camSize = s;
    ifb++;
    ifb &= 1;  // this should produce 1, 0, 1, 0, 1 ... sequence
    portEXIT_CRITICAL(&xSemaphore);

    //  Let anyone waiting for a frame know that the frame is ready
    xSemaphoreGive( frameSync );

    //  Technically only needed once: let the streaming task know that we have at least one frame
    //  and it could start sending frames to the clients, if any
    xTaskNotifyGive( tStream );

    //  Immediately let other (streaming) tasks run
    taskYIELD();

    //  If streaming task has suspended itself (no active clients to stream to)
    //  there is no need to grab frames from the camera. We can save some juice
    //  by suspedning the tasks
    if ( eTaskGetState( tStream ) == eSuspended ) {
      vTaskSuspend(NULL);  // passing NULL means "suspend yourself"
    }
  }
}


// ==== Memory allocator that takes advantage of PSRAM if present =======================
char* allocateMemory(char* aPtr, size_t aSize) {

  //  Since current buffer is too smal, free it
  if (aPtr != NULL) free(aPtr);


  size_t freeHeap = ESP.getFreeHeap();
  char* ptr = NULL;

  // If memory requested is more than 2/3 of the currently free heap, try PSRAM immediately
  if ( aSize > freeHeap * 2 / 3 ) {
    if ( psramFound() && ESP.getFreePsram() > aSize ) {
      ptr = (char*) ps_malloc(aSize);
    }
  }
  else {
    //  Enough free heap - let's try allocating fast RAM as a buffer
    ptr = (char*) malloc(aSize);

    //  If allocation on the heap failed, let's give PSRAM one more chance:
    if ( ptr == NULL && psramFound() && ESP.getFreePsram() > aSize) {
      ptr = (char*) ps_malloc(aSize);
    }
  }

  // Finally, if the memory pointer is NULL, we were not able to allocate any memory, and that is a terminal condition.
  if (ptr == NULL) {
    ESP.restart();
  }
  return ptr;
}


// ==== STREAMING ======================================================
const char HEADER[] = "HTTP/1.1 200 OK\r\n" \
                      "Access-Control-Allow-Origin: *\r\n" \
                      "Content-Type: multipart/x-mixed-replace; boundary=123456789000000000000987654321\r\n";
const char BOUNDARY[] = "\r\n--123456789000000000000987654321\r\n";
const char CTNTTYPE[] = "Content-Type: image/jpeg\r\nContent-Length: ";
const int hdrLen = strlen(HEADER);
const int bdrLen = strlen(BOUNDARY);
const int cntLen = strlen(CTNTTYPE);


// ==== Handle connection request from clients ===============================
void handleJPGSstream(void)
{
  //  Can only acommodate 10 clients. The limit is a default for WiFi connections
  if ( !uxQueueSpacesAvailable(streamingClients) ) return;


  //  Create a new WiFi Client object to keep track of this one
  WiFiClient* client = new WiFiClient();
  *client = server.client();

  //  Immediately send this client a header
  client->write(HEADER, hdrLen);
  client->write(BOUNDARY, bdrLen);

  // Push the client to the streaming queue
  xQueueSend(streamingClients, (void *) &client, 0);

  // Wake up streaming tasks, if they were previously suspended:
  if ( eTaskGetState( tCam ) == eSuspended ) vTaskResume( tCam );
  if ( eTaskGetState( tStream ) == eSuspended ) vTaskResume( tStream );
}


// ==== Actually stream content to all connected clients ========================
void streamCB(void * pvParameters) {
  char buf[16];
  TickType_t xLastWakeTime;
  TickType_t xFrequency;

  //  Wait until the first frame is captured and there is something to send
  //  to clients
  ulTaskNotifyTake( pdTRUE,          /* Clear the notification value before exiting. */
                    portMAX_DELAY ); /* Block indefinitely. */

  xLastWakeTime = xTaskGetTickCount();
  for (;;) {
    // Default assumption we are running according to the FPS
    xFrequency = pdMS_TO_TICKS(1000 / FPS);

    //  Only bother to send anything if there is someone watching
    UBaseType_t activeClients = uxQueueMessagesWaiting(streamingClients);
    if ( activeClients ) {
      // Adjust the period to the number of connected clients
      xFrequency /= activeClients;

      //  Since we are sending the same frame to everyone,
      //  pop a client from the the front of the queue
      WiFiClient *client;
      xQueueReceive (streamingClients, (void*) &client, 0);

      //  Check if this client is still connected.

      if (!client->connected()) {
        //  delete this client reference if s/he has disconnected
        //  and don't put it back on the queue anymore. Bye!
        delete client;
      }
      else {

        //  Ok. This is an actively connected client.
        //  Let's grab a semaphore to prevent frame changes while we
        //  are serving this frame
        xSemaphoreTake( frameSync, portMAX_DELAY );

        client->write(CTNTTYPE, cntLen);
        sprintf(buf, "%d\r\n\r\n", camSize);
        client->write(buf, strlen(buf));
        client->write((char*) camBuf, (size_t)camSize);
        client->write(BOUNDARY, bdrLen);

        // Since this client is still connected, push it to the end
        // of the queue for further processing
        xQueueSend(streamingClients, (void *) &client, 0);

        //  The frame has been served. Release the semaphore and let other tasks run.
        //  If there is a frame switch ready, it will happen now in between frames
        xSemaphoreGive( frameSync );
        taskYIELD();
      }
    }
    else {
      //  Since there are no connected clients, there is no reason to waste battery running
      vTaskSuspend(NULL);
    }
    //  Let other tasks run after serving every client
    taskYIELD();
    vTaskDelayUntil(&xLastWakeTime, xFrequency);
  }
}



const char JHEADER[] = "HTTP/1.1 200 OK\r\n" \
                       "Content-disposition: inline; filename=capture.jpg\r\n" \
                       "Content-type: image/jpeg\r\n\r\n";
const int jhdLen = strlen(JHEADER);

// ==== Serve up one JPEG frame =============================================
void handleJPG(void)
{
  WiFiClient client = server.client();

  if (!client.connected()) return;
  cam.run();
  client.write(JHEADER, jhdLen);
  client.write((char*)cam.getfb(), cam.getSize());
}


// ==== Handle invalid URL requests ============================================
void handleNotFound()
{
  String message = "Server is running!\n\n";
  message += "URI: ";
  message += server.uri();
  message += "\nMethod: ";
  message += (server.method() == HTTP_GET) ? "GET" : "POST";
  message += "\nArguments: ";
  message += server.args();
  message += "\n";
  server.send(200, "text / plain", message);
}



// ==== SETUP method ==================================================================
void setup()
{

  // Setup Serial connection:
  Serial.begin(115200);
  delay(1000); // wait for a second to let Serial connect


  // Configure the camera
  camera_config_t config;
  config.ledc_channel = LEDC_CHANNEL_0;
  config.ledc_timer = LEDC_TIMER_0;
  config.pin_d0 = Y2_GPIO_NUM;
  config.pin_d1 = Y3_GPIO_NUM;
  config.pin_d2 = Y4_GPIO_NUM;
  config.pin_d3 = Y5_GPIO_NUM;
  config.pin_d4 = Y6_GPIO_NUM;
  config.pin_d5 = Y7_GPIO_NUM;
  config.pin_d6 = Y8_GPIO_NUM;
  config.pin_d7 = Y9_GPIO_NUM;
  config.pin_xclk = XCLK_GPIO_NUM;
  config.pin_pclk = PCLK_GPIO_NUM;
  config.pin_vsync = VSYNC_GPIO_NUM;
  config.pin_href = HREF_GPIO_NUM;
  config.pin_sscb_sda = SIOD_GPIO_NUM;
  config.pin_sscb_scl = SIOC_GPIO_NUM;
  config.pin_pwdn = PWDN_GPIO_NUM;
  config.pin_reset = RESET_GPIO_NUM;
  config.xclk_freq_hz = 20000000;
  config.pixel_format = PIXFORMAT_JPEG;

  // Frame parameters: pick one
  //  config.frame_size = FRAMESIZE_UXGA;
  //  config.frame_size = FRAMESIZE_SVGA;
  //  config.frame_size = FRAMESIZE_QVGA;
  config.frame_size = FRAMESIZE_VGA;
  config.jpeg_quality = 12;
  config.fb_count = 2;

#if defined(CAMERA_MODEL_ESP_EYE)
  pinMode(13, INPUT_PULLUP);
  pinMode(14, INPUT_PULLUP);
#endif

  if (cam.init(config) != ESP_OK) {
    Serial.println("Error initializing the camera");
    delay(10000);
    ESP.restart();
  }


  //  Configure and connect to WiFi
  IPAddress ip;

  WiFi.mode(WIFI_STA);
  WiFi.begin("201", "1234abcd");//WIFI名称和密码
  Serial.print("Connecting to WiFi");
  while (WiFi.status() != WL_CONNECTED)
  {
    delay(500);
    Serial.print(F("."));
  }
  ip = WiFi.localIP();
  Serial.println(F("WiFi connected"));
  Serial.println("");
  Serial.print("Stream Link: http://");
  Serial.print(ip);
  Serial.println("/mjpeg/1");


  // Start mainstreaming RTOS task
  xTaskCreatePinnedToCore(
    mjpegCB,
    "mjpeg",
    4 * 1024,
    NULL,
    2,
    &tMjpeg,
    APP_CPU);
}


void loop() {
  vTaskDelay(1000);
}

 这个程序是GITHUB上找的别人的。OK,接下来,连接串口,找到我们的视频流IP地址,在网页中输入,就得到了我们的实时视频流了。

 其实这种获取视频流的方式是很差劲的,因为帧率很低,而且很考虑网络问题。虽然我自己加了额外的天线,但是信号还是时好时坏的。

接下来,把视频流导入OPENCV里。我使用的pycharm进行的编程,其他的IDE应该也是没有任何问题的。

import cv2
#url = 'http://192.168.43.244/mjpeg/1'
url = 'your ip stream'

cap = cv2.VideoCapture(url)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

fourcc = cv2.VideoWriter_fourcc(*"mp4v")
out = cv2.VideoWriter('./video/test.mp4', fourcc, 20, (width, height))

while(cap.isOpened()):
    ret, frame = cap.read()
    if ret:
        out.write(frame)
        cv2.imshow('frame', frame)
        if cv2.waitKey(25) & 0xFF == ord('q'): #按键盘Q键退出
            break
    else:
        continue

cap.release()
out.release()
cv2.destroyAllWindows()

这里我们导入之后,就开始录制mp4了,之后我们将保存在本地的test.MP4进行一个截图处理:

import cv2

# 使用opencv按一定间隔截取视频帧,并保存为图片

vc = cv2.VideoCapture('./video/test.mp4')  # 读取视频文件
c = 0
d = 0
print("------------")
if vc.isOpened():  # 判断是否正常打开
    print("yes")
    rval, frame = vc.read()
else:
    rval = False
    print("false")

timeF = 100  # 视频帧计数间隔频率

while rval:  # 循环读取视频帧
    rval, frame = vc.read()
    print(c,timeF, c%timeF)
    if (c % timeF == 0):# 每隔timeF帧进行存储操作
        print("write...")
        cv2.imwrite(f'./video_cut/1_{d}.jpg', frame)  # 存储为图像

        print("success!")
    c = c + 100000
    d = d + 1
cv2.waitKey(1)
vc.release()
print("==================================")

截图后,会把这些截图保存在我们本地。

那么现在我们就要进行一个处理,在之前参考的文章里,是通过分解三界颜色矩,来做一个混淆矩阵。

首先,我们开源数据集里,会有训练集和测试集用的图片,这是开源图片。

那么接下来,我们收先把开源的图片进行一个处理,这里处理会生成一个表格作为我们的多维矩阵,方便后续训练和评估:

#导入库文件
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import os
import pandas as pd



#计算颜色矩特征模型
def img2vector(filename):


    returnvect = np.zeros((1, 9))
    #一个1*9的二维数组


    fr = mpimg.imread(filename)
    #用matplotlib读取图片文件


    l_max = fr.shape[0]//2+50   #读取矩阵的第一个维度,然后除以二后向下取整,再加50
    l_min = fr.shape[0]//2-50
    w_max = fr.shape[1]//2+50
    w_min = fr.shape[1]//2-50
    water = fr[l_min:l_max, w_min:w_max, :].reshape(1, 10000, 3)#重塑为一个三维矩阵,1*10000*3


    for i in range(3):
        this = water[:, :, i]/255
        print(this)
        returnvect[0, i] = np.mean(this)    #0,1,2存储一阶颜色矩
        returnvect[0, 3+i] = np.sqrt(np.mean(np.square(this-returnvect[0, i])))#3,4,5存储二阶颜色矩
        returnvect[0, 6+i] = np.cbrt(np.mean(np.power(this-returnvect[0, i], 3)))#6,7,8存储三阶颜色矩
        print(returnvect)
    return returnvect





#计算每个图片的特征
trainfilelist = os.listdir('./water_image')#读取目录下文件列表
m = len(trainfilelist)                   #计算文件数目
labels = np.zeros((1, m)) #生成两个196个0的空矩阵
train = np.zeros((1, m))
#trainingMat=[]
#print(trainfilelist)
trainingMat=np.zeros((m, 9)) #m行9列的0空矩阵


for i in range(m):
    filenamestr = trainfilelist[i]      #获取当前文件名,例1_1.jpg
    filestr = filenamestr.split('.')[0]  #按照.划分,取前一部分
    classnumstr = int(filestr.split('_')[0])#按照_划分,后一部分为该类图片中的序列
    picture_num = int(filestr.split('_')[1])
    labels[0, i] = classnumstr               #前一部分为该图片的标签
    train[0, i] = picture_num
    trainingMat[i, :] = img2vector('./water_image/%s' % filenamestr) #构成数组

#保存
d = np.concatenate((labels.T, train.T, trainingMat), axis=1)#连接数组
dataframe = pd.DataFrame(d, columns=['Water kind','number', 'R_1', 'G_1', 'B_1', 'R_2', 'G_2', 'B_2', 'R_3', 'G_3', 'B_3'])
dataframe.to_csv('./data/moment.csv', encoding='utf-8', index=False)#保存文件


之后同样的,我们截取的图片,也是进行同样的算法处理:

import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
import pandas as pd
from sklearn.model_selection import  train_test_split
from sklearn import svm
from sklearn import metrics
import joblib
import warnings
from first_step import img2vector
from sklearn.model_selection import GridSearchCV
import seaborn as sns
warnings.filterwarnings("ignore")#防止标签缺失的警报



#计算每个图片的特征
trainfilelist = os.listdir('./video_cut')#读取目录下文件列表
m = len(trainfilelist)                   #计算文件数目
labels = np.zeros((1, m)) #生成两个196个0的空矩阵
train = np.zeros((1, m))
#trainingMat=[]
#print(trainfilelist)
trainingMat=np.zeros((m, 9)) #m行9列的0空矩阵

for i in range(m):
    filenamestr = trainfilelist[i]      #获取当前文件名,例1_1.jpg
    filestr = filenamestr.split('.')[0]  #按照.划分,取前一部分
    classnumstr = int(filestr.split('_')[0])#按照_划分,后一部分为该类图片中的序列
    picture_num = int(filestr.split('_')[1])
    labels[0, i] = classnumstr               #前一部分为该图片的标签
    train[0, i] = picture_num
    trainingMat[i, :] = img2vector('./video_cut/%s' % filenamestr) #构成数组

#保存
d = np.concatenate((labels.T, train.T, trainingMat), axis=1)#连接数组
dataframe = pd.DataFrame(d, columns=['Water kind', 'number', 'R_1', 'G_1', 'B_1', 'R_2', 'G_2', 'B_2', 'R_3', 'G_3', 'B_3'])
dataframe.to_csv('./real_data/real_moment.csv', encoding='utf-8', index=False)#保存文件

最后,划分训练集,测试集,进行模型训练,并对实时截取图片的水质进行评估!!!就大功告成了!


import matplotlib.pyplot as plt
import pandas as pd
from pandas import DataFrame,Series
import random
import numpy as np


# -*- coding:utf-8 -*-

def cm_plot(y, yp):
    from sklearn.metrics import confusion_matrix  # 导入混淆矩阵函数

    cm = confusion_matrix(y, yp)  # 混淆矩阵

    import matplotlib.pyplot as plt  # 导入作图库
    plt.matshow(cm, cmap=plt.cm.Greens)  # 画混淆矩阵图,配色风格使用cm.Greens,更多风格请参考官网。
    plt.colorbar()  # 颜色标签

    for x in range(len(cm)):  # 数据标签
        for y in range(len(cm)):
            plt.annotate(cm[x, y], xy=(x, y), horizontalalignment='center', verticalalignment='center')

    plt.ylabel('True label')  # 坐标轴标签
    plt.xlabel('Predicted label')  # 坐标轴标签
    return plt


inputfile = './data/moment.csv'
data = pd.read_csv(inputfile, encoding='gbk')
# 注意,此处不能用shuffle
sampler = np.random.permutation(len(data))
d = data.take(sampler).values

data_train = d[:int(0.8*len(data)),:] #选取前80%做训练集
data_test = d[int(0.8*len(data)):,:] #选取后20%做测试集
print(data_train.shape)
print(data_test.shape)

# 构建支持向量机模型代码
x_train = data_train[:, 2:]*30 #放大特征
y_train = data_train[:,0].astype(int)
x_test = data_test[:, 2:]*30 #放大特征
y_test = data_test[:,0].astype(int)
print(x_train.shape)
print(x_test.shape)
# 导入模型相关的支持向量机函数  建立并且训练模型
from sklearn import svm
model = svm.SVC()
model.fit(x_train, y_train)
import pickle
pickle.dump(model, open('./save_model/clf.model', 'wb'))

# model = pickle.load(open('svcmodel.model','rb'))
# 导入输出相关的库,生成混淆矩阵
from sklearn import metrics
cm_train = metrics.confusion_matrix(y_train, model.predict(x_train)) # 训练样本的混淆矩阵
cm_test = metrics.confusion_matrix(y_test, model.predict(x_test)) # 测试样本的混淆矩阵
print(cm_train.shape)
print(cm_test.shape)
df1 = DataFrame(cm_train, index = range(1,5), columns=range(1,5))
df2 = DataFrame(cm_test, index = range(1,5), columns=range(1,5))
df1.to_excel('./train_data_xlxs/trainPre.xlsx')
df2.to_excel('./train_data_xlxs/testPre.xlsx')

print(model.score(x_train,y_train)) # 评价模型训练的准确率
print(model.score(x_test,y_test)) # 评价模型测试的准确率


cm_plot(y_train, model.predict(x_train)).show() # cm_plot是自定义的画混淆矩阵的函数
cm_plot(y_test, model.predict(x_test)).show() # cm_plot是自定义的画混淆矩阵的函数








#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
#正式开始的数据
inputfile1 = './real_data/real_moment.csv'
data1 = pd.read_csv(inputfile1, encoding='gbk')

sampler = np.random.permutation(len(data1))
d = data1.take(sampler).values

data_train1 = d[:int(0.8*len(data1)),:] #选取前80%做训练集
data_test1 = d[int(0.8*len(data1)):,:] #选取后20%做测试集
print(data_train1.shape)
print(data_test1.shape)



x_train1 = data_train1[:, 2:] * 30 #放大特征
y_train1 = data_train1[:, 0].astype(int)
x_test1 = data_test1[:, 2:] * 30 #放大特征
y_test1 = data_test1[:, 0].astype(int)
print(x_train1.shape)
print(x_test1.shape)


cm_train1 = metrics.confusion_matrix(y_train1, model.predict(x_train1))
# df3 = DataFrame(cm_train1, index = range(1, 5), columns=range(1, 5))
# df3.to_excel('./real_data_xlxs/realPreTrain.xlsx')
# print(model.score(x_train1, y_train1)) # 评价模型测试的准确率
cm_plot(y_train1, model.predict(x_train1)).show() # cm_plot是自定义的画混淆矩阵的函数

cm_test1 = metrics.confusion_matrix(y_test1, model.predict(x_test1))
# df4 = DataFrame(cm_test1, index = range(1, 5), columns=range(1, 5))
# df4.to_excel('./real_data_xlxs/realPreTest.xlsx')
# print(model.score(x_test1, y_test1))
cm_plot(y_test1, model.predict(x_test1)).show()

print(model.score(x_train1, y_train1)) # 评价模型训练的准确率
print(model.score(x_test1, y_test1)) # 评价模型测试的准确率

最后就是会得到这种混淆矩阵:

 那么后续我会把代码在CSDN和GITHUB上开源,谢谢大家的支持。

CSDN:

(9条消息) 基于ESP32CAM的水质处理-智能家居文档类资源-CSDN文库icon-default.png?t=M4ADhttps://download.csdn.net/download/m0_57628341/85446035

GITHUB:

chenyuhan1997/ESP32CAM_water_quality_assessment: Perform water quality assessments by acquiring a live ESP32CAM video stream. It belongs to its own original resources and can be linked with the part of the smart fish tank. It is divided into two parts, one is the part of ESP32CAM, the other is the processing part of python with opencv. (github.com)icon-default.png?t=M4ADhttps://github.com/chenyuhan1997/ESP32CAM_water_quality_assessment

  • 1
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

ReedswayYuH.C

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值