python多线程threading+queue+Lock

基础知识点

1: 多线程基础知识.
2: queue基础.
以上是我整理出的我认为还不错的关于多线程基础知识的博客。此处不再对基础知识做赘述。主要讲实践功能。

项目总述

该项目是关于表情识别的项目,对接收到图像流的每一帧图片做解析,图像流的接收和处理的结果通过Mqtt通信:

  1. Mqtt(mosquitto) :订阅和发布消息,订阅到图像流的信息作为表情解析的参数,解析到的结果发布到Mqtt供其他模块接收,使用图形化管理工具Mqtt.fx
  2. 主要多线程模块;
import threading
from queue import Queue
import paho.mqtt.client as mqtt
import numpy as np
import time
from threading import Lock
from tensorflow.keras.models import load_model
from centerface import CenterFace
import cv2
import json
url = ""


class calculate_emotion(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.detect_w = 640
        self.detect_h = 480
        self.label2class = {0: 'Anger', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sadness', 5: 'Surprise', 6: 'Neutral'}
        self.centerface = CenterFace()

    def detect_emotion(self, frame, dets):
        '''
        emotion识别
        :param frame:
        :param dets:
        :return: box_emo_list [[[],""],[[],""],[[],""],[[],""]]
        '''
        box_emo_list = []
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        all_face = []
        all_box = []
        all_confidence = []
        for det in dets:
            boxes, confidence = det[:4], det[4]
            x, y, m, n = int(boxes[0]), int(boxes[1]), int(boxes[2]), int(boxes[3])
            box = [int(boxes[0]), int(boxes[1]), int(boxes[2]), int(boxes[3])]
            if x >= 0 and y >= 0 and m - x >= 0 and n - y >= 0:
                all_box.append(box)
                all_confidence.append(confidence)
                face = gray[y:n, x:m]
                face = cv2.resize(face, (48, 48))
                face = face.astype(np.float32)
                face /= float(face.max())
                face = np.reshape(face.flatten(), (1, 48, 48, 1))
                if len(all_face) == 0:
                    all_face = face
                else:
                    all_face = np.concatenate([all_face, face], axis=0)
            else:
                continue
        if len(all_face) != 0:
            prediction = emo_model.predict(all_face)
            emo_ps = list(np.max(prediction, axis=1))
            prediction_result = np.argmax(prediction, axis=1)
            emos = [self.label2class.get(pre, 'Neutral') for pre in prediction_result]
            for box, emo, confidence, emo_p in zip(all_box, emos, all_confidence, emo_ps):
                if confidence <= 0.7:
                    continue
                # 当负面表情概率不高时,默认为Neutral
                if emo_p < 0.6:
                    emo = 'Neutral'
                box_emo_list.append({"face": box, "emotion_text": emo})
        return json.dumps({"uid": "2021", "faces": box_emo_list})

    def run(self):
        # demo采用的离线视频,接口开发改为接收图像
        while alg_api_server.emotion_switch:
            if img_tmp_queue.qsize() > 0:
                lock.acquire()
                ret, frame = img_tmp_queue.get().read()
                lock.release()
                frame = cv2.resize(frame, (self.detect_w, self.detect_h))
                # 接收的图像开始从这里计算
                # 多人人脸检测
                dets, lms = self.centerface(frame, self.detect_h, self.detect_w, threshold=0.35)
                # 表情识别
                all_face_emotion = self.detect_emotion(frame, dets)
                alg_api_server.publish("all_face_emotion", all_face_emotion, 0, False)


class alg_api(threading.Thread):
    def __init__(self, mqtt_addr, id):
        threading.Thread.__init__(self)
        self.client = mqtt.Client()
        self.addr = mqtt_addr
        self.uid = id
        self.emotion_switch = True  # 开始处理开关

    def run(self):
        self.client.on_connect = self.on_connect
        self.client.on_message = self.on_message
        addr_tmp = self.addr[6:]
        ip_port = addr_tmp.split(":")
        self.client.connect(ip_port[0], int(ip_port[1]), 600)  # 600为keepalive的时间间隔
        self.client.subscribe([("faces_control", 2), ("2021_img", 0)])
        self.client.loop_forever()

    def publish(self, topic, msg, qos, retain):
        self.client.publish(topic, msg, qos, retain)

    def get_state(self):
        return state

    def set_state(self, value):
        global state
        state = value
        print("set state", state, value)

    def get_url(self):
        return url

    def on_connect(self, client, userdata, flags, rc):
        print("Connected with result code: " + str(rc))

    def on_message(self, client, userdata, msg):
        try:
            # 如果检测到开始
            print(msg.topic + " " + str(msg.payload))
            if msg.topic == "faces_control" and json.loads(msg.payload)["state"] and json.loads(msg.payload)[
                "uid"] == self.uid:
                self.emotion_switch = True
                for i in range(1):
                    th1 = threading.Thread(target=calculate_emotion)
                    th1.start()
            elif msg.topic == "faces_control" and json.loads(msg.payload)["state"] is False and \
                    json.loads(msg.payload)["uid"] == self.uid:
                self.emotion_switch = False
                img_tmp_queue.queue.clear()
            elif self.emotion_switch and msg.topic == "2021_img" and str(msg.payload.decode()).split(":", 1)[
                0] == self.uid:
                if len(str(msg.payload).split(":", 1)) == 2:
                    img_tmp_queue.put(str(msg.payload.decode()).split(":", 1)[1])
        except Exception as e:
            print("%s", e)


if __name__ == '__main__':
    emo_model = load_model('models/emotion.h5', compile=False)
    alg_api_server = alg_api("tcp://127.0.0.1:1883", "2021")
    alg_api_server.setDaemon(True)  # 设置守护线程,解决主线程出现问题,子线程依然被无限挂起的问题。即主线程结束后,子线程也会结束。一定要在start前
    alg_api_server.start()
    img_tmp_queue = Queue()
    lock = threading.Lock()
    img_tmp = cv2.VideoCapture('1.mp4')
    while True:
        lock.acquire()
        img_tmp_queue.put(img_tmp)
        lock.release()

使用过程解析

1.引入threading模块,定义两个线程

class calculate_emotion(threading.Thread):
class alg_api(threading.Thread):

2.设置守护线程,解决主线程出现问题,子线程依然被无限挂起的问题。即主线程结束后,子线程也会结束。一定要在start前

 emo_model = load_model('models/emotion.h5', compile=False)
 alg_api_server = alg_api("tcp://127.0.0.1:1883", "2021")
 alg_api_server.setDaemon(True)  
 alg_api_server.start()

3.定义一个队列,默认队列长度无限长

img_tmp_queue = Queue()

4.定义线程锁,每次访问队列资源时加锁,防止脏数据,无限往队列中添加图片流信息

lock = threading.Lock()
img_tmp = cv2.VideoCapture('1.mp4')
while True:
    lock.acquire()
    img_tmp_queue.put(img_tmp)
    lock.release()

5.这里开启一个线程作为测试用,若开关关闭时队列中还有数据,就清空队列,等待下次调用

 if msg.topic == "faces_control" and json.loads(msg.payload)["state"] and json.loads(msg.payload)["uid"] == self.uid:
       self.emotion_switch = True
       for i in range(1):
           th1 = threading.Thread(target=calculate_emotion)
           th1.start()
       elif msg.topic == "faces_control" and json.loads(msg.payload)["state"] is False and json.loads(msg.payload)["uid"] == self.uid:
           self.emotion_switch = False
           img_tmp_queue.queue.clear()

总结

以上是我个人在项目实战中运用到的多线程+queue模式,此项目由于多线程调用内部模型是会造成其他问题,故而放弃使用该方式。。。(留存备用)

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值