C++ 委托

最近看了看C#委托,突然想到C++有没有委托呢,一查之下,原来是有的,参考了很多人的博客,自己也整理了下,写下来,做个笔记。

委托的作用

我想理解一样知识,最关键的是理解这是干什么的,同样,委托有什么作用呢?我查了查网上的资料,各有各的说法。我的理解主要是以下四点:
  1. 实现策略模式,隔离变化
  2. 实现观察者模式,也就是multicast delegate 
  3. 解耦和,这个有点像面向接口的编程(依赖倒置)
  4. 实现异步任务。
上面只是应用,具体到委托的实现方式,我目前只实现了同步委托(包含前三点),和异步委托(实现异步任务)。

同步委托

同步委托说白了就是对函数指针的封装,multicast delegate添加了一个简单的观察者模式,下面的代码简单描述了这种思想:

class IDelegate
{
private:
	std::vector<IDelegate *> mList;

public:
	virtual void Invoke(WPARAM wParam, LPARAM lParam) {};

	IDelegate& operator += (IDelegate & iDelegate)
	{
		mList.push_back(&iDelegate);
		return *this;
	}

	virtual void Signal(WPARAM wParam, LPARAM lParam)
	{
		for (auto ptr = mList.begin(); ptr != mList.end(); ptr++)
		{
			(*ptr)->Invoke(wParam, lParam);
		}
	}
};

template<typename T>
class CDelegate : public IDelegate
{
	typedef void(T::* PFunc)(WPARAM wParam, LPARAM lParam);
private:
	T * m_pThis;
	PFunc m_pFunc;

public:
	CDelegate(T *pThis, PFunc pFunc)
	{
		m_pThis = pThis;
		m_pFunc = pFunc;
	}

	virtual void Invoke(WPARAM wParam, LPARAM lParam)
	{
		(m_pThis->*m_pFunc)(wParam, lParam);
	}
};

测试代码如下:
//
//   TEST DELEGATE
//

class CA
{
public: 
	void Calc(WPARAM wParam, LPARAM lParam)
	{
		int count = 0;
		for (int i = 0; i < 1000; ++i)
		{
			count += i;
			Sleep(1);
		}
		TRACE(_T("Thread[%d] invoke wParam = %d, lParam = %d"), GetCurrentThreadId(), wParam, count);
	}
};

class CB
{	
public:

	System::Concurrency::IDelegate mDelegate;

	void Notify()
	{
		mDelegate.Signal(30202, 2020101);
	}
};

int _tmain(int argc, _TCHAR* argv[])
{
	TRACE(_T("Thread[%d] start main"), GetCurrentThreadId());


	//
	//  Synchronize delegate
	//
	CA ca1; 
	System::Concurrency::CDelegate<CA> caDelegate1(&ca1, &CA::Calc);

	CA ca2; 
	System::Concurrency::CDelegate<CA> caDelegate2(&ca2, &CA::Calc);

	CB cb;
	cb.mDelegate += caDelegate1;
	cb.mDelegate += caDelegate2;
	cb.Notify();
}

异步委托

我下面主要说说异步委托,实际上我一直很怀疑自己是不是将这个概念理解错误,毕竟和C#里面的task 很相似,anyway,能用就好,能解决问题就行。异步委托实际上是通过将任务(函数调用)发送给工作线程来执行,从而避免调用线程被阻塞。比如说我们项目中经常碰到的存储数据(数据量较小)到文件,开个单独的线程吧,觉得浪费了,不开吧,频繁得写文件,导致调用线程性能下降。所以想来想去,异步委托是非常合适的一种方法。下面是我的实现:

1. 实现异步任务接口:
class ITask
{
public:		
	WPARAM wParam;
	LPARAM lParam;
	HANDLE hEvent;

public:
	ITask()
	{
		wParam = 0;
		lParam = 0;
		hEvent = NULL;
	}

	virtual void Execute() = 0;
	virtual void Complete() { }; 

	virtual void Signal()
	{
		if (hEvent != NULL)
		{
			SetEvent(hEvent);
		}
	}
};

2. 实现异步委托接口,通过异步委托来发送task.
template<typename T> class CAsyncTask : public ITask
{
public:
	T *pThis;
	typedef void (T::* PFunc)(WPARAM wParam, LPARAM lParam);
	PFunc pFunc; // function pointer to method of class T

	virtual void Execute()
	{
		if (pThis != NULL && pFunc != NULL)
		{
			(pThis->*pFunc)(wParam, lParam);
		}

		if (hEvent != NULL)
		{
			SetEvent(hEvent);
		}
	}
};

template<typename T>
class CAsyncDelegate
{
	typedef void(T::* PFunc)(WPARAM wParam, LPARAM lParam);
private:
	T * m_pThis;
	PFunc m_pFunc;
	HANDLE m_hEvent;
	bool m_bComplete;
public:
	CAsyncDelegate(T *pThis, PFunc pFunc)
	{
		m_pThis = pThis;
		m_pFunc = pFunc;

		m_hEvent = CreateEvent(NULL, TRUE, FALSE, _T("Async"));
		m_bComplete = true;
	}

	~CAsyncDelegate()
	{
		if (m_hEvent != NULL)
		{
			CloseHandle(m_hEvent);
			m_hEvent = NULL;
		}
	}

	void BeginInvoke(WPARAM wParam, LPARAM lParam)
	{
		CAsyncTask<T> *task = new CAsyncTask<T>();
		task->pThis = m_pThis;
		task->pFunc = m_pFunc;
		task->wParam = wParam;
		task->lParam = lParam;
		task->hEvent = m_hEvent;

		System::Concurrency::ThreadPool.SubmitTask(task);
		m_bComplete = false;
	}

	void EndInvoke(DWORD dwTimetout = INFINITE)
	{
		if (!m_bComplete)
		{
			DWORD dw = WaitForSingleObject(m_hEvent, dwTimetout);
			if (dw == WAIT_OBJECT_0)
			{
				ResetEvent(m_hEvent);
				m_bComplete = true;
			}
		}
	}

	bool IsCompleted() const
	{
		return m_bComplete;
	}
};


3. 实现工作者线程,工作者线程采用windows 线程池技术。曾经我想过实现自己的线程池,不过后来想想,windows提供了这么好的功能,我为什么不用呢。至于windows线程池的一些方法和函数,我觉得我说的没有MSDN好,所以我就不说了。

ThreadPool.h
class CThreadPool
{
public:
	CThreadPool();
	~CThreadPool();

	/* Create thread pool and specify the minimum number and max number of threads that
	 * thread pool could create and use. */
	bool Create(const int nMinThreads, const int nMaxThreads);

	/* Close thread pool, cleanup the resources. */
	void Close();

	/* Submit a task to thread pool */
	bool SubmitTask(ITask *pTask);

	int GetTasks() const;

protected:

	struct tWorkContext
	{
		CThreadPool *pThis;
		ITask *pTask;
	};

	/* Callback function.... */
	static VOID CALLBACK _WorkCallBack(PTP_CALLBACK_INSTANCE, PVOID, PTP_WORK);

	void incTasks();
	void decTasks();

private:
	PTP_POOL  m_threadPool;
	PTP_CLEANUP_GROUP m_cleanupGroup;
	TP_CALLBACK_ENVIRON m_callbackEnv;
	bool m_isThreadPoolWorking;
	int m_nTasks;  // Count the number of tasks, including executing tasks and outstanding tasks.
};

extern CThreadPool ThreadPool;

ThreadPool.cpp
CThreadPool ThreadPool;

CThreadPool::CThreadPool()
{
	m_threadPool = NULL;
	m_cleanupGroup = NULL;
	m_isThreadPoolWorking = false;
	m_nTasks = 0;
}

CThreadPool::~CThreadPool()
{

}

bool CThreadPool::Create(const int nMinThreads, const int nMaxThreads)
{
	ASSERT(nMinThreads > 0);
	ASSERT(nMaxThreads > 1);
	ASSERT(nMaxThreads > nMinThreads);

	#ifndef FAILED
	#define FAILED(step) TRACE(_T("%s failed error=%d"), step, GetLastError());
	#endif

	BOOL bSucceed = TRUE;

	// Initializes a callback environment. 
	// The thread pool must associate itself with this callback environment.
	InitializeThreadpoolEnvironment(&m_callbackEnv);

	m_threadPool = CreateThreadpool(NULL);
	if (m_threadPool == NULL)
	{
		FAILED(_T("create thread pool"));
		bSucceed = FALSE;
	}

	if (bSucceed)
	{
		bSucceed = SetThreadpoolThreadMinimum(m_threadPool, (DWORD)nMinThreads);
		SetThreadpoolThreadMaximum(m_threadPool, (DWORD)nMaxThreads);
		if (!bSucceed) 
			FAILED(_T("SetThreadpoolThreadMinimum"));
	}

	// Associate the callback environment with thread pool.
	if (bSucceed)
	{
		SetThreadpoolCallbackPool(&m_callbackEnv, m_threadPool);
	}

	// Create new cleaup up group and associate it with thread pool (callback environment)
	m_cleanupGroup = CreateThreadpoolCleanupGroup();
	if (m_cleanupGroup != NULL)
	{
		SetThreadpoolCallbackCleanupGroup(&m_callbackEnv, m_cleanupGroup, NULL);
	}
	else
	{
		bSucceed = FALSE;
		FAILED(_T("CreateThreadpoolCleanupGroup"));
	}

	// If initialize fails, cleanup the resouces.
	if (!bSucceed)
	{
		if (m_cleanupGroup != NULL)
		{
			CloseThreadpoolCleanupGroup(m_cleanupGroup);
			m_cleanupGroup = NULL;
		}

		if (m_threadPool != NULL)
		{
			CloseThreadpool(m_threadPool);
			m_threadPool = NULL;
		}
	}

	#undef  FAILED

	m_isThreadPoolWorking = (bSucceed == TRUE);
	
	return m_isThreadPoolWorking;
}

void CThreadPool::Close()
{
	m_isThreadPoolWorking = false;

	if (m_cleanupGroup != NULL)
	{
		// Release the members of specified cleanup group. And then close the cleanup group.
		// @ fCancelPendingCallbacks:
		//   -FALSE: Waits for outstanding callback functions to complete. 
		//   -TRUE : Cancal outstanding callbacks that have not yet started. In the other words, 
		//           it means that it will blocks until all currently executing funtions finish, 
		//           but, it will cancel callbacks which are still waiting for executing.
		//
		// After calling CloseThreadpoolCleanupGroupMembers, any other release of objects should not do,
		// because all objects have been released by it.
		//
		CloseThreadpoolCleanupGroupMembers(m_cleanupGroup, FALSE/*fCancelPendingCallbacks */, NULL);
		CloseThreadpoolCleanupGroup(m_cleanupGroup);
		m_cleanupGroup = NULL;
	}

	if (m_threadPool != NULL)
	{
		CloseThreadpool(m_threadPool);
		m_threadPool = NULL;
	}

	DestroyThreadpoolEnvironment(&m_callbackEnv);
}

bool CThreadPool::SubmitTask(ITask *pTask)
{
	ASSERT(pTask != NULL);

	if (!m_isThreadPoolWorking)
	{
		return false;
	}

	tWorkContext *pWorkContext = new tWorkContext();
	pWorkContext->pThis = this;
	pWorkContext->pTask = pTask;

	PTP_WORK pWork = CreateThreadpoolWork(_WorkCallBack, (PVOID)pWorkContext, &m_callbackEnv);
	if (pWork != NULL)
	{
		SubmitThreadpoolWork(pWork);
		incTasks();
	}
	else
	{
		TRACE(_T("CreateThreadpoolWork failed. error=%d"), GetLastError());
	}

	return (pWork != NULL);
}

/*static*/ 
VOID CALLBACK CThreadPool::_WorkCallBack(PTP_CALLBACK_INSTANCE Instance, PVOID context, PTP_WORK work)
{
	tWorkContext *pWorkContext = static_cast<tWorkContext *>(context);
	if (pWorkContext != NULL)
	{
		if (pWorkContext->pTask != NULL)
		{
			pWorkContext->pTask->Execute();
			pWorkContext->pTask->Complete();
			pWorkContext->pTask->Signal();

			// Here, release task object, but, don't close event handle, because
			// this event should managed by caller, it belong to caller, so caller
			// is responsible for releasing it.
			delete pWorkContext->pTask; 
		}

		pWorkContext->pThis->decTasks();

		delete pWorkContext;
	}
}

int CThreadPool::GetTasks() const
{
	return m_nTasks;
}

void CThreadPool::incTasks()
{
	++m_nTasks;
}

void CThreadPool::decTasks()
{
	--m_nTasks;
}

测试代码:
	//
	//  Asynchronize delegate...
	//
	System::Concurrency::ThreadPool.Create(2, 20);

	std::vector<System::Concurrency::CAsyncDelegate<CA> *> waitList;
	for (int i = 0; i < 500; ++i)
	{
		CA ca1;
		System::Concurrency::CAsyncDelegate<CA> *asyncDelegate = new System::Concurrency::CAsyncDelegate<CA>(&ca1, &CA::Calc);
		asyncDelegate->BeginInvoke(i, i);
		waitList.push_back(asyncDelegate);
	}

	TRACE(_T("pushed all list, now, waiting..."));

	for (auto ptr = waitList.begin(); ptr != waitList.end(); ptr++)
	{
		if (!(*ptr)->IsCompleted())
		{
			(*ptr)->EndInvoke();
		//	TRACE(_T("one more done."));
		}
	}

	for (auto ptr = waitList.begin(); ptr != waitList.end(); ptr++)
	{
		delete (*ptr);
	}
	waitList.clear();

	System::Concurrency::ThreadPool.Close();



OK,以上就是委托的大概内容了,不过,这段代码只是测试代码,没有考虑很多问题,比如说多线程同步,如线程池里面的 m_nTasks 等变量都没有进行同步处理,所以说如果需要在真实项目中使用,还需要在完善!


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值