libevent测试

10.bufferevent_socket_new中的fd文件描述符可以自行建立,并绑定IP,或指定网卡等;bufferevent_socket_connect默认是阻塞的,可以用evutil_make_socket_nonblocking配置为非阻塞,返回0就可以认为是成功了,无需等待BEV_EVENT_CONNECTED事件,因为我这里尝试过,其实连接成功,但没有返回BEV_EVENT_CONNECTED事件的情况。

1.搞一个libvent连接服务器:

参考链接中的高级服务器代码:Libevent使用例子,从简单到复杂_luotuo44的博客-CSDN博客_libevent

修改:evconnlistener_new_bind监听数量为1000,在连接服务器的时候如果大批量同时连接,需要修改。如果想知道(连接过程,如何配置端口重用,非堵塞等)最好使用初级那个例子

客户端程序上面链接也有,就不贴了。

#include<netinet/in.h>  
#include<sys/socket.h>  
#include<unistd.h>  
  
#include<stdio.h>  
#include<stdlib.h>
#include<string.h>  
  
#include<event.h>  
#include<event2/listener.h>  
#include<event2/bufferevent.h>  
#include <arpa/inet.h>
//#include<thread.h>  
  
  
void listener_cb(evconnlistener *listener, evutil_socket_t fd,  
                 struct sockaddr *sock, int socklen, void *arg);  
  
void socket_read_cb(bufferevent *bev, void *arg);  
void socket_event_cb(bufferevent *bev, short events, void *arg);  
  
int main()  
{  
    //evthread_use_pthreads();//enable threads  
    //system("ulimit -n 20000");

    struct sockaddr_in sin;  
    memset(&sin, 0, sizeof(struct sockaddr_in));  
    sin.sin_family = AF_INET;  
    sin.sin_port = htons(9999);  
  
    event_base *base = event_base_new();  
    evconnlistener *listener  
            = evconnlistener_new_bind(base, listener_cb, base,  
                                      LEV_OPT_REUSEABLE|LEV_OPT_CLOSE_ON_FREE,  
                                      1000, (struct sockaddr*)&sin,  
                                      sizeof(struct sockaddr_in));  
  
    event_base_dispatch(base);  
  
    evconnlistener_free(listener);  
    event_base_free(base);  
  
    return 0;  
}  
  
  
//一个新客户端连接上服务器了  
//当此函数被调用时,libevent已经帮我们accept了这个客户端。该客户端的
//文件描述符为fd
void listener_cb(evconnlistener *listener, evutil_socket_t fd,  
                 struct sockaddr *sock, int socklen, void *arg)  
{  
    printf("accept a client %d.ip=%s->%d\n", fd, inet_ntoa(((sockaddr_in *)sock)->sin_addr), ((sockaddr_in *)sock)->sin_port);  
  
    event_base *base = (event_base*)arg;  
  
    //为这个客户端分配一个bufferevent  
    bufferevent *bev =  bufferevent_socket_new(base, fd,  
                                               BEV_OPT_CLOSE_ON_FREE);  
  
    bufferevent_setcb(bev, socket_read_cb, NULL, socket_event_cb, NULL);  
    bufferevent_enable(bev, EV_READ | EV_PERSIST);  
}  
  
  
void socket_read_cb(bufferevent *bev, void *arg)  
{  
    char msg[4096];  
  
    size_t len = bufferevent_read(bev, msg, sizeof(msg)-1 ); 
    
    int clientConfd = bufferevent_getfd(bev);
    struct sockaddr_in cliSock;
    socklen_t tmpslen = sizeof(cliSock);
    getpeername(clientConfd, (sockaddr *)(&cliSock), &tmpslen);
    msg[len] = '\0';  
    printf("%s,%d:server read the data %s\n", inet_ntoa(cliSock.sin_addr), cliSock.sin_port, msg);  
  
    char reply[] = "I has read your data";  
    bufferevent_write(bev, reply, strlen(reply) );  
}  
  
  
void socket_event_cb(bufferevent *bev, short events, void *arg)  
{  
    if (events & BEV_EVENT_EOF)  
        printf("connection closed\n");  
    else if (events & BEV_EVENT_ERROR)  
        printf("some other error\n");  
  
    //这将自动close套接字和free读写缓冲区  
    bufferevent_free(bev);  
}  

2.写一个python脚本来同时连接服务器,做调试:

from socket import *
import time
import os
import math
import sys
import datetime
addr = ('192.168.6.48',9999)

clientTestNumber = 10000

def printdbg(pstr = ""):
    dt_ms = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
    print("[%s][%s:%s:%s]%s"%(dt_ms, sys._getframe().f_back.f_code.co_filename, 
        sys._getframe().f_back.f_code.co_name, sys._getframe().f_back.f_lineno, pstr))

class timepy:
    def __init__(self):
        self.begintime = time.time()
    def timenow(self):
        return time.time()
    def begin(self):
        self.begintime = time.time()
    def interv(self):
        return time.time() - self.begintime
    def timenowstr(self):
        tmptnow = time.time()
        # print("%s.%s"%(tmptnow.strftime("%Y-%m-%d %H:%M:%S"), (".%3f"%(math.modf(tmptnow))))
        return time.strftime("%Y-%m-%d %H:%M:", time.localtime(tmptnow)) + ("%.3f"%(tmptnow % 60))

sockList = []
#os.popen('ulimit -n 20000')
def testconnectServer():
    for i in range(clientTestNumber):
        try:
            #print("connet index = %d"%(i))
            tmpTcpCliSock = socket(AF_INET,SOCK_STREAM)
            tmpTcpCliSock.connect(addr)
            #print(data)
            sockList.append(tmpTcpCliSock)
        except Exception as e:
            print("err->%d:%s"%(i, e))
            break
def tSendDataToSerser():
    for i in range(len(sockList)):
        try:
            strbytes = bytes("abcdef", encoding = "utf8")
            sockList[i].send(strbytes)
            data = sockList[i].recv(2048)
        except Exception as e:
            print("err->%d:%s"%(i, e))
def freeConnectServer():
    for node in sockList:
        try:
            node.close()
        except Exception as e:
            print(e)
printdbg(str(clientTestNumber))
tmptimec = timepy()
#print(tmptimec.timenowstr())
testconnectServer()
print('connect finish:%f'%(tmptimec.interv()))

tmptimec.begin()
tSendDataToSerser()
print('send finish:%f'%(tmptimec.interv()))

tmptimec.begin()
freeConnectServer()
print('free finish:%f'%(tmptimec.interv()))

# while True:
#     time.sleep(1)
sys.exit()

测试结果:

2000个连接,1000的监听数量:
$ python conTestClient.py 
2020-12-16 15:22:53.855
connect finish:0.039051
send finish:0.381684
free finish:0.010715
20000个连接,1000的监听数量:
$ python conTestClient.py 
2020-12-16 15:34:53.056
connect finish:18.168898
send finish:3.826404
free finish:0.148340
20000个连接,5000的监听数量:
$ python conTestClient.py 
2020-12-16 15:41:32.881
connect finish:17.182906
send finish:3.559645
free finish:0.157570
难道服务器连接过程应该有一个极限?答案估计是错误的:我通过下面两个测试来得出结论
10000个连接,5000的监听数量:
$ python conTestClient.py 
2020-12-16 15:47:54.656
connect finish:0.196735
send finish:1.842052
free finish:0.079848
同时运行windows和linux环境的脚本(都是10000个连接,5000的监听数量),结果如下:
linux结果:
$ python conTestClient.py 
[2020-12-16 16:09:06.178233][conTestClient.py:<module>:57]10000
connect finish:0.164447
send finish:3.635609
free finish:0.081261

window结果
================= RESTART: D:\lzwork\pythondir\serverconnect.py ================
[2020-12-16 16:08:26.664964][D:\lzwork\pythondir\serverconnect.py:<module>:57]10000
connect finish:5.184148
send finish:9.729989
free finish:0.236367
说明不是libevent的问题,这是脚本使用连接数量越多,导致越慢(估计是可用的端口号变少了,查询慢了)
libevent同时处理几万个连接和数据收发!如果心跳包做成30秒,同时几十万个客户端长连接,应该也没有问题(同一时间通讯量大的活跃客户端不多的情况下)。



我这里时单线程处理,如果搞成多线程处理收发,速度还会提升很多倍。待提高吧。目标做一个单台支持100万个客户端的服务器?

改进一下测试python程序,使用多线程请求:

from socket import *
import time
import os
import math
import sys
import datetime
import threading
addr = ('192.168.6.48',9999)

oneThreadConnectNumber = 5000
threadNumberNeed = 5
clientTestNumber = oneThreadConnectNumber * threadNumberNeed

def printdbg(pstr = ""):
    dt_ms = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
    print("[%s][%s:%s:%s]%s"%(dt_ms, sys._getframe().f_back.f_code.co_filename, 
        sys._getframe().f_back.f_code.co_name, sys._getframe().f_back.f_lineno, pstr))

class timepy:
    def __init__(self):
        self.begintime = time.time()
    def timenow(self):
        return time.time()
    def begin(self):
        self.begintime = time.time()
    def interv(self):
        return time.time() - self.begintime
    def timenowstr(self):
        tmptnow = time.time()
        # print("%s.%s"%(tmptnow.strftime("%Y-%m-%d %H:%M:%S"), (".%3f"%(math.modf(tmptnow))))
        return time.strftime("%Y-%m-%d %H:%M:", time.localtime(tmptnow)) + ("%.3f"%(tmptnow % 60))

sockListLock = threading.Lock()
sockList = []
#os.popen('ulimit -n 20000')
def testconnectServer():
    tmpSockListBuf = []
    for i in range(oneThreadConnectNumber):
        try:
            #print("connet index = %d"%(i))
            tmpTcpCliSock = socket(AF_INET,SOCK_STREAM)
            tmpTcpCliSock.connect(addr)
            #print(data)
            tmpSockListBuf.append(tmpTcpCliSock)
        except Exception as e:
            print("err->%d:%s"%(i, e))
            break
    sockListLock.acquire()
    sockList.append(tmpSockListBuf)
    sockListLock.release()
def tSendDataToSerser(threadindex):
    for i in range(len(sockList[threadindex])):
        try:
            strbytes = bytes("abcdef", encoding = "utf8")
            sockList[threadindex][i].send(strbytes)
            data = sockList[threadindex][i].recv(2048)
        except Exception as e:
            print("err->%d:%s"%(i, e))
def freeConnectServer(threadindex):
    for node in sockList[threadindex]:
        try:
            node.close()
        except Exception as e:
            print(e)

runthreadlist = []
tmptimec = timepy()
printdbg('total number : %s;thread num:%d'%(str(clientTestNumber), threadNumberNeed))
for i in range(threadNumberNeed):
    runthreadlist.append(threading.Thread(target = testconnectServer))
for thnode in runthreadlist:
    thnode.start()
for thnode in runthreadlist:
    thnode.join()
print('connect finish:%f'%(tmptimec.interv()))

tmptimec.begin()
runthreadlist.clear()
for i in range(threadNumberNeed):
    runthreadlist.append(threading.Thread(target = tSendDataToSerser, args=(i,)))
for thnode in runthreadlist:
    thnode.start()
for thnode in runthreadlist:
    thnode.join()
print('send finish:%f'%(tmptimec.interv()))

tmptimec.begin()
runthreadlist.clear()
for i in range(threadNumberNeed):
    runthreadlist.append(threading.Thread(target = freeConnectServer, args=(i,)))
for thnode in runthreadlist:
    thnode.start()
for thnode in runthreadlist:
    thnode.join()
print('free finish:%f'%(tmptimec.interv()))
# while True:
#     time.sleep(1)
sys.exit()

结果:

[2020-12-16 17:46:07.874744][conTestClient.py:<module>:68]total number : 25000;thread num:5
connect finish:12.087830
send finish:1.750825
free finish:0.318889
这里测试的是25000个连接。发送和接收快了2-3倍。服务器每秒至少处理了14705条客户端数据。当然这个比较理想的环境,数据量不大。
连接时间长是脚本原因,服务器连接时间很少的。

在测试过程可能会遇到一下问题:

a.   错误异常socket : too many open files解决方法

linux的文件描述符最大默认1024;使用ulimit -n 100000即可

https://blog.csdn.net/qq_18298439/article/details/83896777

b.    Cannot assign requested address出现的原因及解决方案(转)

因为linux系统下最大端口个数为65535。实际我使用到28000多个就会提示这个错误了

释放连接了服务器的端口号后,在短时间内处于TIME_WAIT状态。有地方是说1分钟。

Cannot assign requested address出现的原因及解决方案(转) _fzhqcjc的博客-CSDN博客

c.    python str与bytes之间的转换

python的socket发送数据需要bytes,所以需要转换!

python str与bytes之间的转换 - zqifa - 博客园

d. 关于心跳包设置问题可以参考:bufferevent 设置超时_樱桃诗翡雨的博客-CSDN博客

总结:关闭写事件,回调为NULL,运行write函数会不会重置timer?通过server不应答客户端测试得出:客服的使用bufferevent_write不会重置timer计数。

如果只开read事件,回调read的时候timer会重置。所以我们客户端定时20秒发数据给服务器,服务器被动返回数据,这样会触发read时间,超时设定为62秒,连续3个包丢失,判定为网线断开!

1. 通过调用 bufferevent_set_timeouts 设置定时器

2. 定时器触发时会清除read/write 事件。

3. 当读事件触发时,对应的timer会被重置,即重新计时。

因为要测试一下定时器做断网检测,贴一下客户端程序,参考链接中的高级客户端:Libevent使用例子,从简单到复杂_luotuo44的博客-CSDN博客_libevent

#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <errno.h>
#include <unistd.h>

#include <stdio.h>
#include <string.h>
#include <stdlib.h>

#include <event.h>
#include <event2/bufferevent.h>
#include <event2/buffer.h>
#include <event2/util.h>
#include <logBaseWork.h>
#include <stddef.h>
#include <execinfo.h>
#include <signal.h>

int tcp_connect_server(const char *server_ip, int port);
void cmd_msg_cb(int fd, short events, void *arg);
void server_msg_cb(struct bufferevent *bev, void *arg);
void event_cb(struct bufferevent *bev, short event, void *arg);

int main(int argc, char **argv)
{
    if (argc < 3)
    {
        //两个参数依次是服务器端的IP地址、端口号
        printf("please input 2 parameter\n");
        return -1;
    }
    struct event_base *base = event_base_new();

    struct bufferevent *bev = bufferevent_socket_new(base, -1,
                                                     BEV_OPT_CLOSE_ON_FREE);

    //监听终端输入事件
    struct event *ev_cmd = event_new(base, STDIN_FILENO,
                                     EV_READ | EV_PERSIST,
                                     cmd_msg_cb, (void *)bev);

    event_add(ev_cmd, NULL);

    struct sockaddr_in server_addr;

    memset(&server_addr, 0, sizeof(server_addr));

    server_addr.sin_family = AF_INET;
    server_addr.sin_port = htons(atoi(argv[2]));
    inet_aton(argv[1], &server_addr.sin_addr);

    bufferevent_socket_connect(bev, (struct sockaddr *)&server_addr,
                               sizeof(server_addr));

    bufferevent_setcb(bev, server_msg_cb, NULL, event_cb, (void *)ev_cmd);
    bufferevent_enable(bev, EV_READ | EV_PERSIST);
    struct timeval tv = {10, 0};
    bufferevent_set_timeouts(bev, &tv, NULL);

    //event_base_dispatch这个函数在没有事件后,会退出。
    //上面注册了bufferevent连接事件和ev_cmd输入事件
    //bufferevent_free释放连接和event_free释放输入事件都完成,下面这个函数也会退出!
    //备注bufferevent_set_timeouts其实也是bufferevent连接事件内的!bufferevent_free后也就没有了
    event_base_dispatch(base);
    //我看服务器那边有调用这个释放base!
    event_base_free(base);
    printf("finished \n");
    return 0;
}

void cmd_msg_cb(int fd, short events, void *arg)
{
    char msg[1024];

    int ret = read(fd, msg, sizeof(msg));
    if (ret < 0)
    {
        perror("read fail ");
        exit(1);
    }

    struct bufferevent *bev = (struct bufferevent *)arg;

    //把终端的消息发送给服务器端
    bufferevent_write(bev, msg, ret);
    logwdbg("send data:%s", msg);
}

void server_msg_cb(struct bufferevent *bev, void *arg)
{
    char msg[1024];

    size_t len = bufferevent_read(bev, msg, sizeof(msg));
    msg[len] = '\0';
    logwdbg("recv %s from server\n", msg);
    //printf("recv %s from server\n", msg);
}

void event_cb(struct bufferevent *bev, short event, void *arg)
{
    logwdbg("event = %d\n", event);
    //BEV_EVENT_READING 表示读事件对应中的事件,跟随其他事件触发
    //BEV_EVENT_EOF 当服务器主动断开连接会触发该事件
    //BEV_EVENT_ERROR socket连接服务器失败会触发
    //BEV_EVENT_TIMEOUT bufferevent_set_timeouts函数配置;当读写事件没有触发,导致timer没有重置,出现了与服务器长时间未通讯。用于判断断开链接
    //BEV_EVENT_CONNECTED 与服务器连接成功
    if (event & BEV_EVENT_CONNECTED)
    {
        printf("the client has connected to server\n");
        return;
    }

    //这将自动close套接字和free读写缓冲区
    bufferevent_free(bev);

    struct event *ev = (struct event *)arg;
    event_free(ev);
}

e.     配置水位bufferevent_setwatermark参考:libevent笔记5:水位watermarks - 孙敏铭 - 博客园

也就是收到的包大于一定长度才触发read事件!

f.    libevent多线程的使用需要在所有的初始化之前加evthread_use_pthreads()函数

多线程环境下没有调用evthread_use_windows_threads或evthread_use_threads函数导致event_base_dispatch函数一直阻塞,即使调用了event_base_loopbreak或event_base_loopexit也无法让event_base_dispatch退出事件循环。

g. 使用崩溃问题:用libevent做客户端,连接服务器,如果服务器不在线,20秒连接一次,并设置乐2分钟超时。如果服务器不在线,多次连接后bufferevent_set_timeouts崩溃。

1.在多线程不停尝试重新连接和断开时出现:event_base_loop崩溃!参考:【Libevent】多线程使用bufferevent,解决多线程调用bufferevent_write方法后消息发送不出去的问题_阿卡基YUAN的博客-CSDN博客_bev_opt_threadsafe

evthread_use_pthreads();//在event_base_new之前开始多线程!

libeventRootBase = event_base_new();

bufferevent *bev = bufferevent_socket_new(base, fd,BEV_OPT_CLOSE_ON_FREE|BEV_OPT_THREADSAFE);//创建连接sock使用BEV_OPT_THREADSAFE!

BEV_OPT_THREADSAFE会自动为bufferevent分配锁,这样就可以安全地在多个线程中使用bufferevent

2.在事件回调函数event_cb中,不要使用bufferevent_free,否则可能出现导致崩溃,可能回调之后,又使用了这个bufferevent!可以event_cb中标记ev,在多线程中使用bufferevent_free

3.bufferevent_set_timeouts函数崩溃:参考:libevent设置超时后取消超时(bufferevent_set_timeouts取消超时无效问题)_glen30的博客-CSDN博客

我这里的原因是需要先配置超时bufferevent_set_timeouts,再使能读事件bufferevent_enable!

bufferevent_setcb(hcpcWorkList[i].bev, server_msg_cb, NULL, event_cb, (void *)&hcpcWorkList[i]);

//bufferevent_enable(hcpcWorkList[i].bev, EV_READ | EV_PERSIST);

struct timeval tv = {120, 0};

bufferevent_set_timeouts(hcpcWorkList[i].bev, &tv, NULL);//bufferevent_enable后再bufferevent_set_timeouts出错!调整到bufferevent_enable之前

bufferevent_enable(hcpcWorkList[i].bev, EV_READ | EV_PERSIST);

查看 bufferevent_set_timeouts 的源码,超时时间参数传入NULL就会清掉对应的读或者写的超时时间设置,实际测试发现调用bufferevent_set_timeouts 传入NULL,并没有效果,依然会产生超时timeout事件。

正确的做法是,在超时timeout事件处理函数里面,调用 bufferevent_set_timeouts 来取消超时设置,并且enable read事件。

void freeConnectBuffEvent(parseHostClientClass *hcinf)
{
    if(!hcinf)
    {
        return;
    }
    if(hcinf->bev)
    {
        bufferevent_set_timeouts(hcinf->bev, NULL, NULL);
        bufferevent_enable (hcinf->bev, EV_READ);

        bufferevent_free(hcinf->bev);
        hcinf->bev = NULL;
    }
}

void initHostClientInform()
{
    for(int i = 0; i < HOST_CLIENT_MAX_NUMBER; i++)
    {
        hcpcWorkList[i].servIp = clientBackIplist[i];
        hcpcWorkList[i].servPort = clientBackPortlist[i];
        hcpcWorkList[i].bev = NULL;
    }
}

static void clientTimeout_cb(int fd, short event, void *arg)
{
    //logwdbg("run clientTimeout_cb\n");
}

void freeConnectBuffEvent(parseHostClientClass *hcinf)
{
    if(!hcinf)
    {
        return;
    }
    if(hcinf->bev)
    {
        bufferevent_set_timeouts(hcinf->bev, NULL, NULL);
        bufferevent_enable (hcinf->bev, EV_READ);
        bufferevent_free(hcinf->bev);
        hcinf->bev = NULL;
    }
}

void libClientEventWork()
{
    evthread_use_pthreads();
    base = event_base_new();
    struct event basetimeout;
    if(base == NULL)
    {
        logwerr("err");
        return;
    }
    event_assign(&basetimeout, base, -1, EV_PERSIST, clientTimeout_cb, (void*) &basetimeout);
    struct timeval tv;
    tv.tv_sec = 20;
    tv.tv_usec = 0;
    event_add(&basetimeout, &tv);
    event_base_dispatch(base);
    event_base_free(base);
    logwdbg("finished");
}

void libClientCommunicatWork()
{
    int tmpFlag, i;
    std::string tmpStr;
    initHostClientInform();
    backClientRunflag = 1;
    timerc respontTask, hb_reconTime;
    while(backClientRunflag)
    {
        usleep(10 * 1000);
        msgWrkCls::instance()->rcvmsg(tmpStr, msgTypeIdHostCli);
        if(respontTask.intms() > 5000 || tmpStr == "1")
        {
            respontTask.begin();
        }
        
        if(hb_reconTime.ints() < 20)
        {
            continue;
        }
        hb_reconTime.begin();
        for(i = 0; i < HOST_CLIENT_MAX_NUMBER; i++)
        {        
            if(!(hcpcWorkList[i].bev != NULL && hcpcWorkList[i].servIp != "" && hcpcWorkList[i].servPort != ""))
            {
                continue;
            }
            logwdbg("%p,%s,%s", hcpcWorkList[i].bev, hcpcWorkList[i].servIp.c_str(), hcpcWorkList[i].servPort.c_str());
            hcpcWorkList[i].sendHeartbeatToServer();
            // hcpcWorkList[i].sendOnlineDevsToServer();
            // hcpcWorkList[i].getTableInformCom(&tabHostWrkRegister);
            // hcpcWorkList[i].getTableInformCom(&tabHostWrkParm);
        }
        // logwdbg("-->");
        for(i = 0; i < HOST_CLIENT_MAX_NUMBER; i++)
        {
            if(!(hcpcWorkList[i].bev == NULL && hcpcWorkList[i].servIp != "" && hcpcWorkList[i].servPort != ""))
            {
                continue;
            }
            hcpcWorkList[i].bev = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE);//没有添加BEV_OPT_THREADSAFE
            if(hcpcWorkList[i].bev == NULL)
            {
                logwerr("err");
                continue;
            }
            struct sockaddr_in server_addr;
            memset(&server_addr, 0, sizeof(server_addr));
            server_addr.sin_family = AF_INET;
            server_addr.sin_port = htons(atoi(hcpcWorkList[i].servPort.c_str()));
            inet_aton(hcpcWorkList[i].servIp.c_str(), &server_addr.sin_addr);
            tmpFlag = bufferevent_socket_connect(hcpcWorkList[i].bev, (struct sockaddr *)&server_addr,
                                    sizeof(server_addr));
            logwdbg("%s:%s,%d", hcpcWorkList[i].servIp.c_str(), hcpcWorkList[i].servPort.c_str(), tmpFlag);
            if(tmpFlag < 0)
            {
                logwerr("err");
                continue;
            }
            hcpcWorkList[i].initParseClass();
            bufferevent_setcb(hcpcWorkList[i].bev, server_msg_cb, NULL, event_cb, (void *)&hcpcWorkList[i]);
            bufferevent_enable(hcpcWorkList[i].bev, EV_READ | EV_PERSIST);
            struct timeval tv = {120, 0};
            bufferevent_set_timeouts(hcpcWorkList[i].bev, &tv, NULL);
        }
    }

}

void beginHostClientWork()
{
    hostClientThead = std::thread(libClientEventWork);
    hostClientConnectThead = std::thread(libClientCommunicatWork);
}

void exitHostClientWork()
{
    backClientRunflag = 0;
    // for(int i = 0; i < HOST_CLIENT_MAX_NUMBER; i++)
    // {
    //     freeConnectBuffEvent(&clientBackList[i]);
    // }
    if(base)
    {
        event_base_loopbreak(base);
        event_base_loopexit(base, NULL);
    }
    hostClientConnectThead.join();
    hostClientThead.join();
}

void server_msg_cb(struct bufferevent *bev, void *arg)
{
    static char msg[16 * 1024];
    size_t len = bufferevent_read(bev, msg, sizeof(msg));
    if(len < sizeof(msg))
    {
        msg[len] = '\0';
    }
    logwdbg("recv[%ld]", len);
    // logwdbg("recv[%ld]:%s", len, msg);
    parseHostClientClass *tmpClient = (parseHostClientClass *)arg;
    if(!tmpClient)
    {
        logwerr("err:%p", tmpClient);
        return;
    }
    tmpClient->parseData(msg, len);
}

void event_cb(struct bufferevent *bev, short event, void *arg)
{
    logwdbg("event = 0x%x\n", event);
    parseHostClientClass *tmpClient = (parseHostClientClass *)arg;
    if(!tmpClient)
    {
        return;
    }
    //BEV_EVENT_READING 表示读事件对应中的事件,跟随其他事件触发
    //BEV_EVENT_EOF 当服务器主动断开连接会触发该事件
    //BEV_EVENT_ERROR socket连接服务器失败会触发
    //BEV_EVENT_TIMEOUT bufferevent_set_timeouts函数配置;当读写事件没有触发,导致timer没有重置,出现了与服务器长时间未通讯。用于判断断开链接
    //BEV_EVENT_CONNECTED 与服务器连接成功
    if ((event & BEV_EVENT_EOF) || (event & BEV_EVENT_ERROR))
    {
        //这将自动close套接字和free读写缓冲区
        // logwdbg("%p", tmpClient->bev);
        freeConnectBuffEvent(tmpClient);
    }
    else if (event & BEV_EVENT_CONNECTED)
    {
        tmpClient->sendLoginToServer();
        return;
    }
    else if (event & BEV_EVENT_TIMEOUT)
    {
        freeConnectBuffEvent(tmpClient);
        return;
    }
}

h.event_base_loopbreak与event_base_loopexit不能退出event_base_dispatch问题。在event_base_new加上了evthread_use_pthreads也不行。我的做法是:要退出之前,使用bufferevent_free断开所有连接,然后evconnlistener_free释放监听,在event_base_loopbreak就可以退出了。event_base_loopexit之后使用event_base_free会崩溃

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值