基于C++11 实现生产者消费者N:M方法

所谓生产者消费者模式,即N个线程进行生产,同时M个线程进行消费,两种角色通过内存缓冲区进行通信。

1、内存缓存

这里使用了一个模板类,实现list的同步读、写。因为模板类的定义和实现必须要在同一个文件中,通常是头文件,编译器要看到模板实现才能展开模板。故本段代码如下:

SyncQueue.h

#pragma once


#include <list>
#include <mutex>
#include <thread>
#include <condition_variable>
#include <iostream>

template <typename T>
class SyncQueue
{
public:
	SyncQueue(int maxSize);

	void Put(const T& x);

	void Put(T&& x);

	void Take(std::list<T>& list);

	void Take(T& t);

	void Stop(void);

	bool Empty(void);

	bool Full(void);

	size_t Size(void);

private:
	bool NotFull(void) const;

	bool NotEmpty(void) const;

	template <typename F> 
	void Add(F&& x);

private:
	std::list<T> m_queue;
	std::mutex m_mutex;
	std::condition_variable m_notEmpty;
	std::condition_variable m_notFull;
	unsigned int m_maxSize;
	bool m_needStop;
};




template<typename T>
inline SyncQueue<T>::SyncQueue(int maxSize) : m_maxSize(maxSize), m_needStop(false)
{
}

template<typename T>
inline void SyncQueue<T>::Put(const T & x)
{
	Add(x);
}

template<typename T>
inline void SyncQueue<T>::Put(T && x)
{
	Add(std::forward<T>(x));
}

template<typename T>
inline void SyncQueue<T>::Take(std::list<T>& list)
{
	std::unique_lock<std::mutex> locker(m_mutex);
	m_notEmpty.wait(locker, [this]() {return m_needStop || NotEmpty(); });
	if (m_needStop)
	{
		return;
	}
	list = std::move(m_queue);
	m_notFull.notify_one();
	return;
}

template<typename T>
inline void SyncQueue<T>::Take(T& t)
{
	std::unique_lock<std::mutex> locker(m_mutex);
	m_notEmpty.wait(locker, [this]() {return m_needStop || NotEmpty(); });
	if (m_needStop)
	{
		return;
	}
	t = m_queue.front();
	m_queue.pop_front();
	m_notFull.notify_one();
	return;
}

template<typename T>
inline void SyncQueue<T>::Stop(void)
{
	{
		std::lock_guard<std::mutex> locker(m_mutex);
		m_needStop = true;
	}
	m_notEmpty.notify_all();
	m_notFull.notify_all();
	return;
}

template<typename T>
inline bool SyncQueue<T>::Empty(void)
{
	std::lock_guard<std::mutex> locker(m_mutex);
	return m_queue.empty();
}

template<typename T>
inline bool SyncQueue<T>::Full(void)
{
	std::lock_guard<std::mutex> locker(m_mutex);
	return m_queue.size() == m_maxSize;
}

template<typename T>
inline size_t SyncQueue<T>::Size(void)
{
	std::lock_guard<std::mutex> locker(m_mutex);
	return m_queue.size();
}

template<typename T>
inline bool SyncQueue<T>::NotFull(void) const
{
	bool full = m_queue.size() >= m_maxSize;
	if (full)
	{
		std::cout << "缓冲区溢出,需等待,异步线程ID:" << std::this_thread::get_id() << std::endl;
	}
	return !full;
}

template<typename T>
inline bool SyncQueue<T>::NotEmpty(void) const
{
	bool empty = m_queue.empty();
	if (empty)
	{
		std::cout << "缓冲区为空,需等待,异步线程ID:" << std::this_thread::get_id() << std::endl;
	}
	return !empty;
}

template<typename T>
template<typename F>
inline void SyncQueue<T>::Add(F && x)
{
	std::unique_lock<std::mutex> locker(m_mutex);
	m_notFull.wait(locker, [this]() {return m_needStop || NotFull(); });
	if (m_needStop)
	{
		return;
	}
	m_queue.push_back(std::forward<F>(x));
	m_notEmpty.notify_one();
	return;
}

2、线程池

消费线程使用了线程池,自动从缓存队列中读数据,并回调至应用层。

ThreadPool.h

#pragma once

#include "SyncQueue.h"

#include <functional>
#include <memory>
#include <atomic>
#include <string>
#include <chrono>




// using Task = std::function<void()>;
using Task = std::string;

class ThreadPool
{
public:
	ThreadPool(int maxTaskCount = 100);

	~ThreadPool(void);

	void Start(const std::function<void(Task&)>& fr, int numThreads = std::thread::hardware_concurrency());

	void Stop(void);

	void AddTask(Task&& task);

	void AddTask(const Task& task);

private:
	
	void RunThread();

	void StopThreadGroup();

private:
	std::list<std::shared_ptr<std::thread>> m_threadGroup;
	std::function<void(Task&)> m_callbackTask{ nullptr };
	SyncQueue<Task> m_queue;
	std::atomic<bool> m_runing;
	std::once_flag m_flagStart;
	std::once_flag m_flagStop;
};

ThreadPool.cpp

#include "ThreadPool.h"

ThreadPool::ThreadPool(int maxTaskCount):m_queue(maxTaskCount)
{
	return;
}

ThreadPool::~ThreadPool()
{
	Stop();
	return;
}

void ThreadPool::Start(const std::function<void(Task&)>& fr, int numThreads)
{
	std::call_once(m_flagStart, [this, fr, numThreads]() {
		m_callbackTask = fr;
		m_runing = true;
		for (int i = 0; i < numThreads; i++)
		{
			m_threadGroup.push_back(std::make_shared<std::thread>(&ThreadPool::RunThread, this));
		}
		return; 
	});
	return;
}

void ThreadPool::Stop()
{
	std::call_once(m_flagStop, [this]() { return StopThreadGroup(); });
	return;
}

void ThreadPool::AddTask(Task && task)
{
	m_queue.Put(std::forward<Task>(task));
	return;
}

void ThreadPool::AddTask(const Task & task)
{
	m_queue.Put(task);
	return;
}

void ThreadPool::RunThread()
{
	while (m_runing)
	{
		std::list<Task> list;
		m_queue.Take(list);

		for (auto& task : list)
		{
			if (!m_runing)
			{
				return;
			}
			// task(); 
			if (nullptr != m_callbackTask)
			{
				m_callbackTask(task);
			}
			

			//std::cout << std::this_thread::get_id() << "=============get task: " << task.strData << std::endl;
		}

		std::this_thread::sleep_for(std::chrono::seconds(1));
	}
	return;
}

void ThreadPool::StopThreadGroup()
{
	m_queue.Stop();
	m_runing = false;
	for (auto& thread : m_threadGroup)
	{
		if (thread)
		{
			thread->join();
		}
	}
	m_threadGroup.clear();
	return;
}

3 测试

main.cpp

#include "ThreadPool.h"


#include <iostream>

std::atomic<int> taskCount{ 0 };

void getTask(Task& task)
{
	std::cout << std::this_thread::get_id() << "=============get task: " << task << std::endl;
}

void TestThreadPool()
{
	std::function<void(Task&)> fr = std::bind(getTask, std::placeholders::_1);

	ThreadPool pool;

	pool.Start(fr);

	std::thread thd1([&pool]() {
		for (size_t i = 0; i < 100; i++)
		{
			auto thdId = std::this_thread::get_id();
			// pool.AddTask([thdId]() {std::cout <<"第 " << ++taskCount << " 个同步线程1的线程ID:" << thdId << std::endl; });

			Task task = std::to_string(++taskCount);
			pool.AddTask(task);
			std::cout << "set task: " << task << std::endl;
		}
	});

	std::thread thd2([&pool]() {
		for (size_t i = 0; i < 10; i++)
		{
			auto thdId = std::this_thread::get_id();
			// pool.AddTask([thdId]() {std::cout << "同步线程2的线程ID:" << thdId << std::endl; });

			Task task = std::to_string(++taskCount);
			pool.AddTask(task);
			std::cout << "set task: " << task << std::endl;
		}
	});

	std::this_thread::sleep_for(std::chrono::seconds(2));
	getchar();
	pool.Stop();

	thd1.join();
	thd2.join();
	return;
}

void func()
{
	int count{ 0 };
	while (count < 10)
	{
		std::this_thread::sleep_for(std::chrono::seconds(1));
		std::cout << "thread is runing" << std::endl;
		count++;
	}
	return;
}

int main()
{
	TestThreadPool();

	return 0;
}

测试结果:

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值