做C++服务器的时候,因为有的业务处理时间稍微比较长,如果在单线程里面,可能会形成阻塞,影响下一个业务的处理,这是我们最不想看到的。所以必须得做到并行,因此优先考虑到多线程,如果每来一个业务处理,新创建一个线程,业务量比较大的情况下,频繁创建与销毁线程会带来比较大的消
#include <stdio.h>
#include <string.h>
#include <iostream>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <iostream>
#include <vector>
#include <event.h>
#include <pthread.h>
#include <unistd.h>
#include <deque>
using namespace std;
// 事件base
struct event_base* base;
char buffer[1024];
int iCount = 0;
struct task {
task() {
cout << "create task ---------------->" << endl;
fd = 0;
memset(data, 0, sizeof(data));
next = NULL;
}
~task() {
cout << "release the data in task ====> " << fd << endl;
iCount -= 1;
}
int fd;
char data[1024];
struct task* next;
};
pthread_mutex_t mutex;
pthread_cond_t cond1;
struct task* new_task = NULL;
struct task *readhead = NULL, *readtail = NULL, *writehead = NULL;
// 对socket 进行管理
struct sConnectInfo {
sConnectInfo() {
iSockfd = 0;
iRemotePort = 0;
memset(cRemoteIp, 0, sizeof(cRemoteIp));
}
int iSockfd;
int iRemotePort;
char cRemoteIp[32];
};
vector<sConnectInfo> kvConInfo;
struct sTheadInfo {
sTheadInfo() {
iSockfd = 0;
memset(cBuffer, 0, sizeof(cBuffer));
}
int iSockfd;
char cBuffer[1024];
};
int fd = -1;
int memCount = 0;
void *thread(void *ptr)
{
while (1) {
pthread_mutex_lock(&mutex);
//等待到任务队列不为空
while (readhead == NULL)
pthread_cond_wait(&cond1, &mutex);
struct task* tmp = readhead;
readhead = readhead->next;
cout << tmp->data << endl;
cout << tmp->fd << endl;
cout << "iCount == " <<iCount << endl;
int tmpFd = tmp->fd;
//char data[] = "recv msg success ---->";
char data[1024];
sprintf(data, "%s --> %d", tmp->data, tmp->fd);
//int ret = send(dTask[0].fd, kvMemData[memId].data, strlen(kvMemData[memId].data), 0);
int ret = send(tmp->fd, data, strlen(data), 0);
if (ret < 0) {
cout << "send msg to client error ====================>" << endl;
}
delete tmp;
pthread_mutex_unlock(&mutex);
cout << "this thread will to work ======> thread id = " << pthread_self() << endl;
if (tmpFd == 45) {
cout << "this thread will sleep ===> " <<tmpFd <<"^^^^^^^^^^^^^^^^ thread id " << pthread_self() << endl;
sleep(20);
cout << "this task has done fd = " << tmpFd << endl;
}
if (tmpFd == 52) {
cout << "this thread will sleep ===> " << tmpFd << "^^^^^^^^^^^^^^^^ thread id " << pthread_self() << endl;
sleep(40);
cout << "this task has done fd = " << tmpFd << endl;
}
}
return 0;
}
void on_write(int sock, short event, void* arg)
{
char* buffer = (char*)arg;
if (buffer == NULL)
return;
pthread_mutex_lock(&mutex);
struct task* pkTask = new task();
pkTask->fd = sock;
memcpy(pkTask->data, buffer, strlen(buffer));
pkTask->next = NULL;
iCount += 1;
if (readhead == NULL)
{
readhead = pkTask;
readtail = pkTask;
}
else
{
readtail->next = pkTask;
readtail = pkTask;
}
pthread_cond_broadcast(&cond1);
pthread_mutex_unlock(&mutex);
//pthread_cond_signal(&(cond1));
}
// 读事件回调函数
void onRead(int iCliFd, short iEvent, void *arg)
{
int iLen;
char buf[1500];
iLen = recv(iCliFd, buf, 1500, 0);
if (iLen <= 0) {
cout << "Client Close" << endl;
// 连接结束(=0)或连接错误(<0),将事件删除并释放内存空间
struct event *pEvRead = (struct event*)arg;
event_del(pEvRead);
delete pEvRead;
close(iCliFd);
return;
}
buf[iLen] = 0;
cout << "Client Info:" << buf << endl;
struct event* pkEventWrite = new event;
memset(buffer, 0, sizeof(buffer));
memcpy(buffer, buf, strlen(buf));
event_set(pkEventWrite, iCliFd, EV_WRITE, on_write, buffer);
event_base_set(base, pkEventWrite);
event_add(pkEventWrite, NULL);
}
// 连接请求事件回调函数
void onAccept(int iSvrFd, short iEvent, void *arg)
{
int iCliFd;
struct sockaddr_in sCliAddr;
socklen_t iSinSize = sizeof(sCliAddr);
iCliFd = accept(iSvrFd, (struct sockaddr*)&sCliAddr, &iSinSize);
cout << "=============> " <<iSvrFd <<endl;
char remote[INET_ADDRSTRLEN];
cout <<ntohs(sCliAddr.sin_port) << " " << inet_ntop(AF_INET, &sCliAddr.sin_addr, remote, INET_ADDRSTRLEN) <<endl;
sConnectInfo kInfo;
kInfo.iSockfd = iCliFd;
kInfo.iRemotePort = ntohs(sCliAddr.sin_port) ;
const char* pkTmp = inet_ntop(AF_INET, &sCliAddr.sin_addr, remote, INET_ADDRSTRLEN);
if (pkTmp != NULL) {
memcpy(kInfo.cRemoteIp, pkTmp, strlen(pkTmp));
}
kvConInfo.push_back(kInfo);
cout << "kvConInfo.size = " << kvConInfo.size() << endl;
// 连接注册为新事件 (EV_PERSIST为事件触发后不默认删除)
struct event *pEvRead = new event;
event_set(pEvRead, iCliFd, EV_READ|EV_PERSIST, onRead, pEvRead);
event_base_set(base, pEvRead);
event_add(pEvRead, NULL);
}
int main()
{
pthread_t tid1, tid2, tid3, tid4, tid5;
pthread_mutex_init(&mutex, NULL);
pthread_cond_init(&cond1, NULL);
//初始化用于读线程池的线程
pthread_create(&tid1, NULL, thread, NULL);
pthread_create(&tid2, NULL, thread, NULL);
pthread_create(&tid3, NULL, thread, NULL);
pthread_create(&tid4, NULL, thread, NULL);
pthread_create(&tid5, NULL, thread, NULL);
int iSvrFd;
struct sockaddr_in sSvrAddr;
memset(&sSvrAddr, 0, sizeof(sSvrAddr));
sSvrAddr.sin_family = AF_INET;
sSvrAddr.sin_addr.s_addr = inet_addr("192.168.67.130");
sSvrAddr.sin_port = htons(12347);
// 创建tcpSocket(iSvrFd),监听本机8888端口
iSvrFd = socket(AF_INET, SOCK_STREAM, 0);
int reuse = 1;
setsockopt(iSvrFd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse));
bind(iSvrFd, (struct sockaddr*)&sSvrAddr, sizeof(sSvrAddr));
listen(iSvrFd, 10);
// 初始化base
base = event_base_new();
cout <<"================>dddd" <<endl;
struct event evListen;
// 设置事件
event_set(&evListen, iSvrFd, EV_READ|EV_PERSIST, onAccept, NULL);
// 设置为base事件
event_base_set(base, &evListen);
// 添加事件
event_add(&evListen, NULL);
// 事件循环
event_base_dispatch(base);
return 0;
}