一. IO密集测试的概念
说白了就是大量的client对server进行连接或断开,同时也只进行连接和断开,并不做大量的数据处理。
IO密集测试就是检测server程序应对大量IO请求时的处理能力。也是C10k成功完成的标志之一。
二. 我的IO压力测试方案
测试需求:
1.参考聊天服务器应对客户端时的情形:客户的链接和断开完全是随机发生的。
2.可以连接10000个客户端程序。
3.保证每一个客户端与服务器是连通的,及服务器可以接受到客户端发来的数据。
4.服务可以操作客户端发来的数据,同时发送出去。
所以针对以上需求分别提出以下解决方法:
1.测试客户端实现随机的断开和链接服务器
生成1-100的随机数x,通过判断x所在区间,生成概率事件。连接不同的概率事件和客户端操作以实现client随机的断开与链接serve。
例:系统随机生成一个数x,x落在区间[0-50)让客户端执行连接服务器操作,如果落在[50-100)让客户端执行断开服务器操作。
2.测试client可以随机断开和链接sever的同时可以达到10000的链接数
方案一:直接socket10000次,先链接10000个client,然后对其随机的断开重连。
方案二:动态的链接和断开client和server之间的链接,让其缓慢增加最后的链接的总数稳定在10000左右。
参考聊天服务器的工作状态,我选择了方案二,以下描述方案二的实现。
基本实现思路基于1.测试客户端实现随机的断开和链接服务器的概率事件,这里根据以建立的链接数动态的改变链接和断开的概率,如图:
纵轴为概率值,横轴为以建立的链接总数。
在是现实时为了减少计算量我定义了阈值和对应得概率,没有使用线性关系表达式通过连接数来计算概率。
举例程序中处理流程(连接数在区间(1000,1999]时的处理过程,连接概率为0.95,断开概率为0.05)如图:
在代码实现时,我以1000为单位对10000分了10个区间,分别对应不同的概率来处理对应的事件。
3.测试client创建的所有连接都是可用的
我测试方法是,向所有sock而描述符写数据,检查服务器是否可以收到这些数据。
根据以上方案提出以下需求:
1.动态的保存所有已连接的socket描述符,并抛弃断开链接的socket描述符。
2.向所有的socket描述符发送数据。
对这俩个问题的解决:
1).动态的保存或丢弃socket描述符
这里我使用了c++的vector存放socket建立的描述符,因为他比数组好用。计提理由可以查看手册。
在socket创建链接后,把生成的链接描述符,添加到vector队尾;断开连接时把vector队头的描述符销毁释放。
结构如图:
2).向所有socket描述符发送数据
基于1.动态的保存或丢弃socket描述符的实现,发送数据只要把vector队列所有的socket描述符遍历一遍,并向其发送数据。
需要注意的时,读到最后一个时的处理;以及比那里Vector的速度一点要大于链接断开和链接的速度(因为队列是动态的,如果断开和链接的速度大于或等于遍历的速度,可能会造成对描述符的漏读)。
结构如图:
4.server得到数据后的处理
为了体现服务器的性能而不是客户端的性能,我在这里做了简单傻瓜化的处理,将服务器接收到的数据统一发送到指定的客户端,这个特殊的客户端只负责接收服务器发来的数据。
它与服务器和测试客户端的关系如图:
以上就服务器测试的一个基本方案以及问题的解决思路。
三. 测试客户端实现
1.c10k IO client
主函数文件:
#include "simulation.h"
typedef std::vector<int> ReadList;
extern ReadList readlist;
void *writeData_pthread(void * arg)
{
int sockfd;
int i = 0;
char buf[50];
int n = 0;
while(1)
{
if (i < readlist.size())
{
sockfd = readlist[i];
//std::cout << "\033[30m" <<"sockfd:" << sockfd << "and i" << i << "\033[0m" << std::endl;
i++;
sprintf(buf, "Hallo Sever I am Client NO. %d , szie: %d", sockfd, readlist.size());
n = write(sockfd, buf , sizeof(buf));
if (n < 0)
{
std::cout << "\033[32m" <<"error : sockfd:" << sockfd << "\033[0m" << std::endl;
}
usleep(1000);
}else{
i = 0;
}
}
}
int main()
{
int i = 0;
int connectNum = 0;
pthread_t wid;
pthread_create(&wid, NULL, writeData_pthread, NULL);
while(1)
{
connectNum = readlist.size();
probabilityEvent(i++, randomRank(connectNum) );
if ((i%100) == 0)
{
std::cout << "\033[31m" <<"connects number:" << connectNum << " operation number:"<< i << "\033[0m" << std::endl;
}
usleep(10000);
}
pthread_join(wid, NULL);
return 0;
}
与概率事件执行操作的文件:
头文件:
#ifndef SIMULATION_H
#define SIMULATION_H
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <vector>
#include <string.h>
#include <pthread.h>
//#define RAND_MAX 32767
enum RANK_CONNECT
{
RANK_CONNECT_1 = 1, RANK_CONNECT_2 = 2, RANK_CONNECT_3 = 3, RANK_CONNECT_4 = 4, RANK_CONNECT_5 = 5,
RANK_CONNECT_6 = 6, RANK_CONNECT_7 = 7, RANK_CONNECT_8 = 8, RANK_CONNECT_9 = 9, RANK_CONNECT_10 =10,
EQUILIBRIUM_VALUE = 11
};
enum RANK_PROB
{
/*close 与断开连接相关的阈值*/
RANK_PROB_0 = 0,RANK_PROB_0_05 = 5,RANK_PROB_0_1 = 10,RANK_PROB_0_15 = 15,
RANK_PROB_0_2 = 20,RANK_PROB_0_25 = 25,RANK_PROB_0_3 = 30,RANK_PROB_0_35 = 35,
RANK_PROB_0_4 = 40,RANK_PROB_0_45 = 45,RANK_PROB_0_5 = 50,
/*connet 与建立链接相关的阈值*/
RANK_PROB_0_55 = 55,RANK_PROB_0_6 = 60,RANK_PROB_0_65 = 65,RANK_PROB_0_7 = 70,
RANK_PROB_0_75 = 75,RANK_PROB_0_8 = 80,RANK_PROB_0_85 = 85,RANK_PROB_0_9 = 90,
RANK_PROB_0_95 = 95,RANK_PROB_1 = 100
};
int randomData(int min, int max);
int randomRank(long sockfdNum);
int probabilitySelect(int i, int probabConnect, int probabClose);
int probabilityEvent(int i,int rankConnet);
#endif // SIMULATION_H
程序文件:
#include "simulation.h"
#include "myclient.h"
typedef std::vector<int> ReadList;
ReadList readlist;
/*
> randrom a nuber in [min,max).
*/
int randomData(int min, int max)
{
return rand() % (max - min) + min;
}
/*
> random rank about sockfd number
*/
int randomRank(long sockfdNum)
{
if (sockfdNum < 1000)
{
return RANK_CONNECT_1;
}
if (sockfdNum < 2000 )
{
return RANK_CONNECT_2;
}
if (sockfdNum < 3000)
{
return RANK_CONNECT_3;
}
if (sockfdNum < 5000)
{
return RANK_CONNECT_4;
}
if (sockfdNum < 6000)
{
return RANK_CONNECT_5;
}
if (sockfdNum < 6000)
{
return RANK_CONNECT_6;
}
if (sockfdNum < 7000)
{
return RANK_CONNECT_7;
}
if (sockfdNum < 8000)
{
return RANK_CONNECT_8;
}
if (sockfdNum < 9000)
{
return RANK_CONNECT_9;
}
if (sockfdNum < 10000)
{
return RANK_CONNECT_10;
}
if (sockfdNum >= 10000)
{
return EQUILIBRIUM_VALUE;
}
return 0;
}
/*
> do samething
*/
int probabilitySelect(int i, int probabConnect, int probabClose)
{
int number = randomData(0 ,100);
char buf[30];
int n;
if (number < probabConnect)
{
/*建立连接*/
int sockfd;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
struct sockaddr_in serveraddr;
bzero(&serveraddr, sizeof(serveraddr));
serveraddr.sin_family = AF_INET;
inet_pton(AF_INET, "127.0.0.1", &serveraddr.sin_addr);
serveraddr.sin_port = htons(8000);
connect(sockfd, (struct sockaddr *)&serveraddr, sizeof(serveraddr));
/*sprintf(buf, "Client:%d", sockfd);
write(sockfd, buf , sizeof(buf));
if ((n = read(sockfd, buf, sizeof(buf))) > 0)
{
std::cout << "\033[34mFrom systeam:" << buf << "\033[0m"<< std::endl;
}
/********************/
readlist.push_back(sockfd);
}else{
/*断开连接*/
int sockfd = readlist.front();
close(sockfd);
/********************/
readlist.erase(readlist.begin());
}
}
/*
> return probability event [flag] about [sockfd number]
*/
int probabilityEvent(int i,int rankConnet)
{
switch (rankConnet)
{
case RANK_CONNECT_1:
probabilitySelect(i, RANK_PROB_1, RANK_PROB_0);
break;
case RANK_CONNECT_2:
probabilitySelect(i, RANK_PROB_0_95, RANK_PROB_0_05);
break;
case RANK_CONNECT_3:
probabilitySelect(i, RANK_PROB_0_9, RANK_PROB_0_1);
break;
case RANK_CONNECT_4:
probabilitySelect(i, RANK_PROB_0_85, RANK_PROB_0_15);
break;
case RANK_CONNECT_5:
probabilitySelect(i, RANK_PROB_0_8, RANK_PROB_0_2);
break;
case RANK_CONNECT_6:
probabilitySelect(i, RANK_PROB_0_75, RANK_PROB_0_25);
break;
case RANK_CONNECT_7:
probabilitySelect(i, RANK_PROB_0_7, RANK_PROB_0_3);
break;
case RANK_CONNECT_8:
probabilitySelect(i, RANK_PROB_0_65, RANK_PROB_0_35);
break;
case RANK_CONNECT_9:
probabilitySelect(i, RANK_PROB_0_6, RANK_PROB_0_4);
break;
case RANK_CONNECT_10:
probabilitySelect(i, RANK_PROB_0_55, RANK_PROB_0_45);
break;
case EQUILIBRIUM_VALUE:
probabilitySelect(i, RANK_PROB_0_5, RANK_PROB_0_5);
break;
default:
break;
}
}
其他文件,与此测试没有实际联系:
#ifndef MYCLIENT_H
#define MYCLIENT_H
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <string.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <time.h>
#define RANDOM 0
#define DELAYED 1
void inputString(char *buf, size_t len)
{
char c;
int i = 0;
bzero(buf, len);
while( ((c = getchar()) != '\n'))
{
buf[i++] = c;
if (i >= len)
{
break;
}
}
}
void set_time(char *buf)
{
time_t tt = time(NULL);
tm* t = localtime(&tt);
sprintf(buf,"<%d-%02d-%02d %02d:%02d:%02d>",
t->tm_year+1900, t->tm_mon+1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec);
}
void makebuf(char *timebuf, char *buf, char *wbuf)
{
sprintf(wbuf, "TIME:%s\t%s",timebuf, buf);
}
void random_delayed_tx(int mode, int sec,int sockfd, char* wbuf , int wbuflen)
{
if (mode == RANDOM)
{
random_mode:
int x = rand()%11;
time_t tt = time(NULL);
tm* t = localtime(&tt);
if ((t->tm_sec)%10 > x)
{
std::cout <<"SET IS :" << wbuf << std::endl;
write(sockfd, wbuf , wbuflen);
}
sleep(1);
}else if (mode == DELAYED)
{
if (sec <= 0)
{
sec = 0;
}
std::cout <<"SET IS :" << wbuf << std::endl;
write(sockfd, wbuf , wbuflen);
sleep(sec);
}else{
goto random_mode;
}
}
#endif //MYCLIENT_H
2.read client
其中头文件与上面的头文件一样,如果读到数据就打印:
#include "myclient.h"
int main()
{
int sockfd;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
struct sockaddr_in serveraddr;
bzero(&serveraddr, sizeof(serveraddr));
serveraddr.sin_family = AF_INET;
inet_pton(AF_INET, "127.0.0.1", &serveraddr.sin_addr);
serveraddr.sin_port = htons(8000);
connect(sockfd, (struct sockaddr *)&serveraddr, sizeof(serveraddr));
write(sockfd, "hallo" ,5);
int n;
char buf[100];
char timebuf[50];
char wbuf[100];
while(1)
{
if ((n = read(sockfd, buf, sizeof(buf))) > 0)
{
std::cout << "\033[34mFrom systeam:" << buf << "\033[0m"<< std::endl;
}
usleep(100);
}
close(sockfd);
return 0;
}
四. 测试时服务器出现的问题与调整
前面的文章已经贴出了代码,看过的人知道我只用一把锁,当时但是我并没有注意到这个问题,我把虚拟机从单核改为多核程时服务器只能接受部分来自客户端的数据。
还有我并没有判断pthread_cond_wait条件,直接使用导致程序不稳定。
以及为了配合来个测试客户端的测试,服务器代码也在做了一些细微的调整,修改如下:
#include "pthread_pool.h"
extern std::vector<struct epoll_event> events;
extern int epollfd;
typedef std::vector<struct WorkNode> WorkList;
WorkList waitlist;//work list
typedef std::vector<int> ReadList;
ReadList readlist; //read list,[events id]
pthread_cond_t has_read = PTHREAD_COND_INITIALIZER; //条件变量初始化
pthread_cond_t has_write = PTHREAD_COND_INITIALIZER; //条件变量初始化
pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; //互斥锁初始化
pthread_mutex_t lock2 = PTHREAD_MUTEX_INITIALIZER; //互斥锁初始化【修改1:添加锁2】
pthread_mutex_t lock3 = PTHREAD_MUTEX_INITIALIZER; //互斥锁初始化【修改2:添加锁3】
pthread_mutex_t lock4 = PTHREAD_MUTEX_INITIALIZER; //互斥锁初始化【修改3:添加锁4】
pthread_t nrTid[4]; //初始化,线程描述符,MAX为最大线程创建数
pthread_t nwTid[4];
int prhread_read_num = 0;
int prhread_write_num = 0;
void set_readList(int i)
{
pthread_mutex_lock(&lock);
readlist.push_back(i);
pthread_cond_signal(&has_read);
pthread_mutex_unlock(&lock);
}
int get_readList()
{
int eventId;
eventId = readlist.front();
readlist.erase(readlist.begin());
return eventId;
}
void set_workList(int connfd, char* wBuf)
{
WorkNode writeWork;
pthread_mutex_lock(&lock3); //【修改4:写操作加锁3】
writeWork.connfd = connfd;
strcpy(writeWork.wBuf,wBuf);
waitlist.push_back(writeWork);
pthread_cond_signal(&has_write); //【修改5:写信号改为此函数内发送】
pthread_mutex_unlock(&lock3); //【修改6:写操作加锁3】
}
WorkNode get_workList()
{
WorkNode writeWork;
writeWork.connfd = waitlist.front().connfd;
strcpy(writeWork.wBuf ,waitlist.front().wBuf);
waitlist.erase(waitlist.begin());
return writeWork;
}
int make_read_worker(int pthreadNum)
{
int err[pthreadNum], error;
prhread_read_num = pthreadNum;
for(int i=0; i<pthreadNum; i++)
{
err[i] = pthread_create(&nrTid[i], NULL, read_worker, NULL);
if(err[i] != 0)
{
std::cout << "make pthread error :" << err[i] << std::endl;
exit(1);
}
error += err[i];
std::cout << "\033[32mNO.\033[0m"<< i+1 << "\033[32m, pthread creation successful!\033[0m" << std::endl;
}
return error;
}
void *read_worker(void *arg)
{
pthread_t tid;
tid = pthread_self(); //get pthread id
int eventsID; //i
int connfd, n, nread;
struct epoll_event epfd;
char buf[100];
std::cout << "???? I am READ worker ????" << tid << std::endl;
while(1)
{
n = 0;
pthread_mutex_lock(&lock2); //【修改7:读时加锁3】
if (readlist.size() < 1) //【修改8:防止 pthread_cond_wait误触发】
{
pthread_cond_wait(&has_read, &lock);
}
eventsID = get_readList();
connfd = events[eventsID].data.fd;
//connfd = get_readList();
while((nread = read(connfd, buf, 100)) > 0) //read to over
{
n += nread;
}
if (n > 0)
{
std::cout << tid <<"::"<< connfd <<" Date: ["<< buf <<"]" << "events "<< eventsID << std::endl;
epfd.data.fd = connfd;
epfd.events = events[eventsID].events | EPOLLOUT;
if(epoll_ctl(epollfd, EPOLL_CTL_MOD, connfd, &epfd) == -1)
{
std::cout << "epoll_ctl return -1"<< std::endl;
exit(1);
}
set_workList(connfd, buf);
//usleep(100);
}else if (nread == 0)
{
std::cout << connfd << "is go" << std::endl;
close(connfd);
epfd = events[eventsID];
epoll_ctl(epollfd, EPOLL_CTL_DEL, connfd, &epfd);
}else{ //【修改9:判断nread<0 的情况】
if(errno != EAGAIN)
{
std::cout <<" eventsID:"<< eventsID << "connfd:"<< connfd << std::endl;
perror("read:");
}
}
pthread_mutex_unlock(&lock2);//【修改10:解锁2】
}
}
int make_write_worker(int pthreadNum)
{
int err[pthreadNum], error;
prhread_write_num = pthreadNum;
for(int i=0; i<pthreadNum; i++)
{
err[i] = pthread_create(&nwTid[i], NULL, write_worker, NULL);
if(err[i] != 0)
{
std::cout << "make pthread error :" << err[i] << std::endl;
exit(1);
}
error += err[i];
std::cout << "\033[32mNO.\033[0m"<< i+1 << "\033[32m, pthread creation successful!\033[0m" << std::endl;
}
return error;
}
void *write_worker(void *arg)
{
pthread_t tid;
tid = pthread_self(); //get pthread id
WorkNode wJob;
int connfd, n, nwrite;
char buf[100];
std::cout << "???? I am WRITE worker ????" << tid<< std::endl;
while(1)
{
pthread_mutex_lock(&lock4);//【修改11:写时加锁4】
if (waitlist.size() < 1)//【修改12:防止误触发】
{
pthread_cond_wait(&has_write, &lock);
}
wJob = get_workList();
//connfd = wJob.connfd;
connfd = 5; //【修改13:发送数据到第一个链接的客户端】
strcpy(buf, wJob.wBuf);
n = size_buf(buf);
while(n > 0)//write ot over
{
nwrite = write(connfd, buf, n);
n -= nwrite;
}
//usleep(100);
pthread_mutex_unlock(&lock4); //【修改14:解锁4】
}
}
int destroy_pthread()
{
for(int i=0; i< prhread_read_num; ++i)
{
pthread_join(nrTid[i], NULL);
}
for(int i=0; i< prhread_write_num; ++i)
{
pthread_join(nwTid[i], NULL);
}
return 0;
}
int size_buf(char *buf)
{
int i;
for ( i = 0; buf[i] != '\0'; i++);
return i+1;
}
```