srsLTE源码学习:block_queue.h阻塞队列类

本文介绍了一种通用目的的阻塞队列实现,该队列可以作为有界或无界的阻塞队列,并允许在推入和弹出操作中进行阻塞和非阻塞操作。该实现使用了POSIX线程库提供的互斥锁和条件变量。

Table of Contents

block_queue.h  


block_queue.h  

 lib\include\srslte\common    4891    2/28/2019    

/**
 *
 * \section COPYRIGHT
 *
 * Copyright 2013-2015 Software Radio Systems Limited
 *
 * \section LICENSE
 *
 * This file is part of the srsUE library.
 *
 * srsUE is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Affero General Public License as
 * published by the Free Software Foundation, either version 3 of
 * the License, or (at your option) any later version.
 *
 * srsUE is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Affero General Public License for more details.
 *
 * A copy of the GNU Affero General Public License can be found in
 * the LICENSE file in the top-level directory of this distribution
 * and at http://www.gnu.org/licenses/.
 *
 */


/******************************************************************************
 *  File:         block_queue.h
 *  Description:  General-purpose blocking queue. It can behave as a bounded or
 *                unbounded blocking queue and allows blocking and non-blocking
 *                operations in both push and pop
 *****************************************************************************/


#ifndef SRSLTE_BLOCK_QUEUE_H
#define SRSLTE_BLOCK_QUEUE_H

#include <queue>
#include <memory>
#include <utility>
#include <pthread.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <strings.h>

namespace srslte {

template<typename myobj>
class block_queue {

public:

  // Callback functions for mutexed operations inside pop/push methods
  class call_mutexed_itf {
  public:
    virtual void popping(myobj obj) = 0;
    virtual void pushing(myobj obj) = 0;
  };

  block_queue<myobj>(int capacity = -1) {
    pthread_mutex_init(&mutex, NULL);
    pthread_cond_init(&cv_empty, NULL);
    pthread_cond_init(&cv_full, NULL);
    this->capacity = capacity;
    mutexed_callback = NULL;
    enable = true;
    num_threads = 0;
  }
  ~block_queue() {
    // Unlock threads waiting at push or pop
    pthread_mutex_lock(&mutex);
    enable = false;
    pthread_cond_signal(&cv_full);
    pthread_cond_signal(&cv_empty);
    pthread_mutex_unlock(&mutex);

    // Wait threads blocked in push/pop to exit
    while(num_threads>0) {
      usleep(100);
    }
    
    // Wait them to exit and destroy cv and mutex
    pthread_mutex_lock(&mutex);
    pthread_cond_destroy(&cv_full);
    pthread_cond_destroy(&cv_empty);
    pthread_mutex_unlock(&mutex);
    pthread_mutex_destroy(&mutex);
  }
  void set_mutexed_itf(call_mutexed_itf *itf) {
    mutexed_callback = itf;
  }
  void resize(int new_capacity) {
    capacity = new_capacity;
  }

  void push(const myobj& value) {
    push_(value, true);
  }

  bool try_push(const myobj& value) {
    return push_(value, false);
  }

  bool try_pop(myobj *value) {
    return pop_(value, false);
  }

  myobj wait_pop() { // blocking pop
    myobj value = myobj();
    pop_(&value, true);
    return value;
  }

  bool empty() { // queue is empty?
    pthread_mutex_lock(&mutex);
    bool ret = q.empty();
    pthread_mutex_unlock(&mutex);
    return ret;
  }

  void clear() { // remove all items
    myobj *item = NULL;
    while (try_pop(item));
  }

  myobj front() {
    return q.front();
  }

  size_t size() {
    return q.size();
  }

private:

  bool pop_(myobj *value, bool block) {
    if (!enable) {
      return false;
    }
    pthread_mutex_lock(&mutex);
    num_threads++;
    bool ret = false;
    if (q.empty() && !block) {
      goto exit;
    }
    while (q.empty() && enable) {
      pthread_cond_wait(&cv_empty, &mutex);
    }
    if (!enable) {
      goto exit;
    }
    if (value) {
      *value = q.front();
    }
    q.pop();
    ret = true;
    if (mutexed_callback) {
      mutexed_callback->popping(*value);
    }
    pthread_cond_signal(&cv_full);
  exit:
    num_threads--;
    pthread_mutex_unlock(&mutex);
    return ret;
  }

  bool push_(const myobj& value, bool block) {
    if (!enable) {
      return false;
    }
    pthread_mutex_lock(&mutex);
    num_threads++;
    bool ret = false;
    if (capacity > 0) {
      if (block) {
        while(q.size() >= (uint32_t) capacity && enable) {
          pthread_cond_wait(&cv_full, &mutex);
        }
        if (!enable) {
          goto exit;
        }
      } else if (q.size() >= (uint32_t) capacity) {
        goto exit;
      }
    }
    q.push(value);
    ret = true;
    if (mutexed_callback) {
      mutexed_callback->pushing(value);
    }
    pthread_cond_signal(&cv_empty);
  exit:
    num_threads--;
    pthread_mutex_unlock(&mutex);
    return ret;
  }

  std::queue<myobj> q; 
  pthread_mutex_t mutex;
  pthread_cond_t  cv_empty;
  pthread_cond_t  cv_full;
  call_mutexed_itf *mutexed_callback;
  int capacity;
  bool enable;
  uint32_t num_threads;
};

}

#endif // SRSLTE_BLOCK_QUEUE_H

 

 

以下代码有问题吗,主要是关于队列缓冲区的读写的。 #ifndef __CONCURRENT_QUEUE_H__ #define __CONCURRENT_QUEUE_H__ #include "CarerayVersion.h" #include <queue> #include <mutex> #include <condition_variable> #include <functional> using namespace std; template<typename Data> class concurrent_queue { public: concurrent_queue() { m_bStopWaiting = false; } virtual ~concurrent_queue() { } void push(Data&& data) { unique_lock<mutex> oAutoLock(the_mutex); the_queue.push(std::forward<Data>(data)); // Manual unlocking is done before notifying, // to avoid waking up the waiting thread only to block again oAutoLock.unlock(); the_condition.notify_one(); } // void push(Data& data) // { //unique_lock<mutex> oAutoLock(the_mutex); //the_queue.push(data); //// Manual unlocking is done before notifying, //// to avoid waking up the waiting thread only to block again //oAutoLock.unlock(); //the_condition.notify_one(); // } void force_stop_waiting() { unique_lock<mutex> oAutoLock(the_mutex); m_bStopWaiting = true; the_condition.notify_one(); } void reset_stop_waiting() { unique_lock<mutex> oAutoLock(the_mutex); m_bStopWaiting = false; } bool empty() const { lock_guard<mutex> oAutoLock(the_mutex); return the_queue.empty(); } int size() const { lock_guard<mutex> oAutoLock(the_mutex); return the_queue.size(); } bool try_pop(Data& popped_value) { lock_guard<mutex> oAutoLock(the_mutex); if(the_queue.empty()) { return false; } popped_value=move(the_queue.front()); the_queue.pop(); return true; } void force_pop() { lock_guard<mutex> oAutoLock(the_mutex); if (!the_queue.empty()) { the_queue.pop(); } } bool clear() { lock_guard<mutex> oAutoLock(the_mutex); if(the_queue.empty()) { return true; } queue<Data> empty_queue; the_queue.swap(empty_queue); return true; } bool wait_and_pop(Data& popped_value, int nWaitMs = -1) { bool isWaited = false; unique_lock<mutex> oAutoLock(the_mutex); if (nWaitMs > 0) { chrono::steady_clock::time_point oUtilTime = chrono::steady_clock::now() + chrono::milliseconds(nWaitMs); while (chrono::steady_clock::now().time_since_epoch().count() < oUtilTime.time_since_epoch().count()) { the_condition.wait_until(oAutoLock, oUtilTime, [&]() { return is_not_empty() || m_bStopWaiting; }); if (!the_queue.empty()) { popped_value = move(the_queue.front()); the_queue.pop(); isWaited = true; break; } if (m_bStopWaiting) { break; } } } else { while (true) { the_condition.wait(oAutoLock, [&]() { return is_not_empty() || m_bStopWaiting; }); if (!the_queue.empty()) { popped_value = move(the_queue.front()); the_queue.pop(); isWaited = true; break; } if (m_bStopWaiting) { break; } } } m_bStopWaiting = false; return isWaited; } private: bool is_not_empty() { return !the_queue.empty(); } queue<Data> the_queue; mutable mutex the_mutex; condition_variable the_condition; bool m_bStopWaiting; }; #endif
06-25
from concurrent.futures import ThreadPoolExecutor import multiprocessing import multiprocessing.queues import threading import time import asyncio import numpy as np import grpc from grpc import aio import argparse import os import video_service_pb2 import video_service_pb2_grpc class FPSCounter: def __init__(self, window_size=0.5): self.window_size = window_size # in seconds self.frame_timestamps = [] self.last_print_time = time.time() def add_frame(self): now = time.time() self.frame_timestamps.append(now) # Remove frames older than window_size while self.frame_timestamps and now - self.frame_timestamps[0] > self.window_size: self.frame_timestamps.pop(0) def get_fps(self): if not self.frame_timestamps: return 0.0 time_span = self.frame_timestamps[-1] - self.frame_timestamps[0] if time_span <= 0: return len(self.frame_timestamps) return len(self.frame_timestamps) / time_span def should_print(self): now = time.time() if now - self.last_print_time >= self.window_size: self.last_print_time = now return True return False class InferencerProcess(multiprocessing.Process): def __init__(self, inference_queue, result_queue): super().__init__() self.inference_queue = inference_queue # 从推理队列获取数据 self.result_queue = result_queue # 发送结果到结果队列 self.running = True self.lpd = None self.lnr = None self.tensor = None def init_model(self): """初始化推理模型""" import torch pid = os.getpid() print(f"[推理进程-{pid}] 初始化模型...") if torch.cuda.is_available(): torch.cuda.init() torch.cuda.set_device(0) print(f"[推理进程-{pid}] CUDA初始化成功,设备: {torch.cuda.get_device_name(0)}") else: raise RuntimeError("CUDA not available in worker process") from src.alpr.core.model_loader import Predictor, get_model from src.alpr.core.LicensePlateProcessor import project_root lpd_classes = ("LicensePlate",) lnr_classes = ("0","1","2","3","4","5","6","7","8","9", "A","B","C","D","E","F","G","H","J","K", "L","M","N","P","Q","R","S","T","U","V", "W","X","Y","Z") lpd_model = get_model(project_root/"models/yolox_lpd_s_20240201.pth", num_classes=len(lpd_classes)) lnr_model = get_model(project_root/"models/yolox_lnr_s_20240201.pth", num_classes=len(lnr_classes)) self.lpd = Predictor(lpd_model, obj_labels=lpd_classes, confthre=0.01, img_size=(544, 960)) self.lnr = Predictor(lnr_model, lnr_classes, confthre=0.7, img_size=(320, 640)) print(f"[推理进程-{pid}] 模型初始化完成") # self.tensor = torch.as_tensor(np.ones((3, 1080, 1920), dtype=np.uint8), device='cuda') def run(self): pid = os.getpid() print(f"[推理进程-{pid}] 启动成功") try: self.init_model() while self.running: try: # 从推理队列获取数据 data = self.inference_queue.get(block=False) if not data: continue tensor, rtsp_id, request_id = data #tensor = self.tensor # 执行推理 try: out, _, _ = self.lpd.inference(tensor) out, T3, T4 = self.lnr.inference(tensor) #out, T3, T4 = [], 0, 0 # 将检测结果转换为仅包含基本型的字典 detections = [] for det in out: # 确保所有值都是Python基本型 detection = { 'label': str(det['label']), 'confidence': float(det['confidence']), 'x1': float(det['bbox'][0]), 'y1': float(det['bbox'][1]), 'x2': float(det['bbox'][2]), 'y2': float(det['bbox'][3]) } detections.append(detection) # 推理成功,将结果放入结果队列 (仅包含可序列化对象) res = { 'status': 'success', 'data': { 'detections': detections, 'rtsp_id': rtsp_id, 'request_id': request_id, 'timing': { 't3': float(T3) if T3 is not None else 0, 't4': float(T4) if T4 is not None else 0 } } } self.result_queue.put_nowait(res) except Exception as e: error_msg = f"推理失败: {str(e)}" print(f"[推理进程-{pid}] {error_msg}") self.result_queue.put_nowait({ 'status': 'error', 'data': { 'message': error_msg, 'rtsp_id': rtsp_id, 'request_id': request_id } }) except multiprocessing.queues.Empty: time.sleep(0.001) except Exception as e: print(f"[推理进程-{pid}] 异常: {str(e)}") except Exception as e: print(f"[推理进程-{pid}] 初始化失败: {str(e)}") print(f"[推理进程-{pid}] 已停止") class DecodeProcess(multiprocessing.Process): def __init__(self, decode_queue, inference_queue, result_queue): super().__init__() self.decode_queue = decode_queue # 接收h264数据 self.inference_queue = inference_queue # 发送解码结果 self.result_queue = result_queue # 发送错误信息 self.running = True self.fps_counter = FPSCounter() from av_decoder import AVDecoder self.decoder = AVDecoder() print(f"[Decode进程-{os.getpid()}] 初始化完成") def run(self): pid = os.getpid() print(f"[Decode进程-{pid}] 启动成功") while self.running: try: # 从队列获取h264数据 data = self.decode_queue.get(block=False) if not data: continue h264_data, rtsp_id, request_id = data tensor, t2, t5 = self.decoder.decode(h264_data, rtsp_id) if tensor is None: error_msg = "Failed to decode H264 to tensor" print(f"[Decode进程-{pid}] {error_msg}, request_id={request_id}") self.result_queue.put_nowait({ 'status': 'error', 'data': { 'message': error_msg, 'rtsp_id': rtsp_id, 'request_id': request_id } }) continue if self.fps_counter.should_print(): print(f"Decoder FPS: {self.fps_counter.get_fps():.2f}") # 解码成功,发送到推理队列 try: self.inference_queue.put_nowait((tensor, rtsp_id, request_id)) self.fps_counter.add_frame() except multiprocessing.queues.Full: self.result_queue.put_nowait({ 'status': 'error', 'data': { 'message': '推理队列已满', 'rtsp_id': rtsp_id, 'request_id': request_id } }) print(f'Inference 隊列已滿') self.fps_counter.add_frame() except multiprocessing.queues.Empty: time.sleep(0.001) # 避免忙等待 except Exception as e: print(f"[Decode进程-{pid}] 异常: {str(e)}") break print(f"[Decode进程-{pid}] 已停止") class FinalizerProcess(multiprocessing.Process): def __init__(self, result_queue, response_queue): super().__init__() self.result_queue = result_queue # 从推理队列获取结果 self.response_queue = response_queue # 将处理好的结果发送回主进程 self.running = True def run(self): pid = os.getpid() print(f"[Finalizer进程-{pid}] 启动成功") while self.running: try: # 从结果队列获取数据 result_dict = self.result_queue.get(block=False) #print("RES:", result_dict) if result_dict: # 将结果发送回主进程 self.response_queue.put(result_dict) except multiprocessing.queues.Empty: continue except Exception as e: print(f"[Finalizer进程-{pid}] 异常: {str(e)}") break print(f"[Finalizer进程-{pid}] 已停止") class ResponseHandler: """在主进程中处理响应的""" def __init__(self): self.pending_requests = {} # request_id -> (context, response_future) self.lock = threading.Lock() def register_request(self, request_id, context, response_future): """注册新请求""" with self.lock: self.pending_requests[request_id] = (context, response_future) def process_response(self, response_dict): """处理从Finalizer进程返回的响应""" try: data = response_dict['data'] request_id = data['request_id'] with self.lock: if request_id not in self.pending_requests: print(f"请求ID {request_id} 未找到上下文") return context, response_future = self.pending_requests.pop(request_id) # 当前全部返回 Empty Response response_future.set_result(video_service_pb2.DecodeAndInferResponse()) return if response_dict['status'] == 'success': # 构造gRPC响应 detections = [] for det in data['detections']: proto_det = video_service_pb2.Detection( label=det['label'], confidence=det['confidence'], x1=det['x1'], y1=det['y1'], x2=det['x2'], y2=det['y2'] ) detections.append(proto_det) response = video_service_pb2.DecodeAndInferResponse(detections=detections) response_future.set_result(response) else: # 设置错误状态 context.set_code(grpc.StatusCode.INTERNAL) context.set_details(data['message']) response_future.set_result(video_service_pb2.DecodeAndInferResponse()) except Exception as e: print(f"响应处理异常: {str(e)}") class DecodeAndInferService(video_service_pb2_grpc.DecodeAndInferServiceServicer): decode_frame = 0 start_time = None def __init__(self, decode_queue: multiprocessing.Queue, result_queue, response_handler): self.decode_queue = decode_queue self.result_queue = result_queue self.response_handler = response_handler self.request_counter = 0 self.counter_lock = threading.Lock() self.fps_counter = FPSCounter() # 在主进程中初始化解码器 from h264_reassembler import H264Reassembler self.reassembler = H264Reassembler() print("[主进程] 解码器初始化完成") async def DecodeAndInfer(self, request, context): # 生成唯一请求ID with self.counter_lock: self.request_counter += 1 request_id = self.request_counter if DecodeAndInferService.start_time == None: DecodeAndInferService.start_time = time.time() DecodeAndInferService.decode_frame += 1 # 创建Future对象 response_future = asyncio.Future() # 注册请求 self.response_handler.register_request(request_id, context, response_future) try: # 提取需要的数据,创建可序列化的字典 h264_data = self.reassembler.reassemble(request.packets, request.rtsp_id) if h264_data is not None: self.decode_queue.put_nowait((h264_data, request.rtsp_id, request_id)) else: self.result_queue.put_nowait({ 'status': 'error', 'data': { 'message': 'cant convert rtp packets to h264', 'rtsp_id': request.rtsp_id, 'request_id': request_id } }) except multiprocessing.queues.Full: self.result_queue.put_nowait({ 'status': 'error', 'data': { 'message': 'decode队列已满', 'rtsp_id': request.rtsp_id, 'request_id': request_id } }) print(f'Decode 隊列已滿') except Exception as e: context.set_code(grpc.StatusCode.INTERNAL) context.set_details(f'处理请求失败: {str(e)}') return video_service_pb2.DecodeAndInferResponse() finally: # 等待结果 result = await response_future print(f"D&F Service FPS: {DecodeAndInferService.decode_frame / (time.time() - DecodeAndInferService.start_time)}") return result async def response_worker(response_queue, response_handler): """响应处理工作线程,在主进程中运行""" while True: try: response_dict = response_queue.get(block=False) response_handler.process_response(response_dict) except multiprocessing.queues.Empty: await asyncio.sleep(0.001) except Exception as e: print(f"响应处理线程异常: {str(e)}") async def serve(port, decode_queue, result_queue, response_queue): """启动gRPC服务""" # 创建响应处理器 response_handler = ResponseHandler() # 启动响应处理工作线程 asyncio.create_task(response_worker(response_queue, response_handler)) server = aio.server() service = DecodeAndInferService(decode_queue, result_queue, response_handler) video_service_pb2_grpc.add_DecodeAndInferServiceServicer_to_server(service, server) server.add_insecure_port(f'[::]:{port}') print(f"[gRPC服务] 启动成功,监听端口 {port}") await server.start() await server.wait_for_termination() def main(): parser = argparse.ArgumentParser(description='视频解码推理服务') parser.add_argument('--port', type=int, default=50151, help='gRPC服务端口') args = parser.parse_args() # 创建进程间通信队列 decode_queue = multiprocessing.Queue(maxsize=50) # 主进程→Decode进程 inference_queue = multiprocessing.Queue(maxsize=50) # Decode进程→Inference进程 result_queue = multiprocessing.Queue(maxsize=50) # Decode进程/Inference进程→Finalizer response_queue = multiprocessing.Queue(maxsize=50) # Finalizer→主进程 # 启动工作进程 decoder = DecodeProcess(decode_queue, inference_queue, result_queue) inferencer = InferencerProcess(inference_queue, result_queue) finalizer = FinalizerProcess(result_queue, response_queue) decoder.start() inferencer.start() finalizer.start() print(f"[主进程] 启动gRPC服务,端口 {args.port}") try: # 启动gRPC服务 (主进程同时负责解码) asyncio.run(serve(args.port, decode_queue, result_queue, response_queue)) except KeyboardInterrupt: print("\n[主进程] 收到停止信号") # 停止工作进程 inferencer.running = False finalizer.running = False # 等待进程结束 inferencer.join() finalizer.join() print("[主进程] 所有进程已停止") if __name__ == '__main__': # 确保使用spawn启动方法 multiprocessing.set_start_method('spawn', force=True) main() 修改代码,让各自组件都可以以其全速进行处理
08-12
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值