1. 基本概念
提前fork
若干的client形成一个children pool,但是很显然这样做很耗费资源,如何动态分配也是比较麻烦的地方,UNP给出了该server模型最简单的实现,笔者稍微做了修改。
2. 代码分析
#include <sys/socket.h>
#include <sys/un.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <errno.h>
#include <err.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <fcntl.h>
#include <sys/mman.h>
#define KEY 0x1 /* key for first message queue */
#define LISTENQ 10
#define MAXLINE 20
#define MAXN 16384
#define handle_error(msg) \
do { perror(msg); exit(EXIT_FAILURE); } while (0)
typedef struct {
long mtype;
char mtext[MAXLINE];
} Mymsg;
static int read_cnt;
static char *read_ptr;
static char read_buf[MAXLINE];
char flag;
void web_child(int sockfd);
ssize_t readline(int fd, void *vptr, size_t maxlen);
static ssize_t my_read(int fd, char *ptr) ;
ssize_t writen(int fd, const void *vptr, size_t n);
void pr_cpu_time(void);
pid_t child_make(int, long *, int listenfd);
void child_main(int,long *, int listenfd);
int main(int argc, char **argv){
int listenfd, connfd,ident,flags;
socklen_t clilen;
int nchildren;
in_port_t port;
pid_t *pids;
long * ptr;
Mymsg msg;
int i=0;
struct sockaddr_in cliaddr, servaddr;
if (argc != 4)
errx(1,"tcp_fork_server <addr> <port> <childnum>\n");
nchildren = atoi(argv[3]);
if((pids = calloc(nchildren, sizeof(pid_t))) == NULL)
handle_error("calloc");
port = atoi(argv[2]);
if((listenfd = socket(AF_INET, SOCK_STREAM, 0)) == -1)
handle_error("socket");
bzero(&servaddr, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_port = htons(port);
if(inet_pton(AF_INET, argv[1], &servaddr.sin_addr) == -1)
handle_error("inet_pton");
//这种匿名映射共享内存只能用于具有一定关系的进程中通信
if((ptr = mmap(0, nchildren * sizeof(long), PROT_READ | PROT_WRITE,MAP_ANON | MAP_SHARED, -1, 0)) == MAP_FAILED)
handle_error("mmap");
if((flags = fcntl(listenfd, F_GETFL, 0)) == -1)
handle_error("fcntl");
else
if(fcntl(listenfd, F_SETFL, flags | O_NONBLOCK) == -1)
handle_error("fcntl");
if(bind(listenfd, (struct sockaddr *) &servaddr, sizeof(servaddr)) == -1)
handle_error("bind");
if(listen(listenfd, LISTENQ) == -1)
handle_error("listen");
for (i = 0; i < nchildren; i++)
pids[i] = child_make(i,ptr,listenfd); /* parent returns */
for(;;){
//和上一篇的逻辑比起来,这里就需要把message IPC的判断放在这里
if(!flag){
if((ident=msgget(KEY,0660)) == -1 )
continue;
flag=1;
}
if(flag)
//每次判断是否client发送消息给server
if (msgrcv(ident,&msg,MAXLINE,1,IPC_NOWAIT) == -1){
if(errno != ENOMSG)
handle_error("msgrcv");
}
else{
for (i = 0; i < nchildren; i++)
kill(pids[i], SIGTERM);
while (wait(NULL) > 0);
if (errno != ECHILD)
errx(1,"wait error");
pr_cpu_time();
for(i =0;i<nchildren;i++)
printf("child %d connected number:%d\n",i,ptr[i]);
msg.mtype=2;
memcpy(msg.mtext,"done",5);
if (msgsnd(ident,&msg,MAXLINE,0) == -1 )
handle_error("msgrcv");
return 0;
}
}
}
void pr_cpu_time(void){
double user, sys;
struct rusage myusage, childusage;
if (getrusage(RUSAGE_SELF, &myusage) < 0)
handle_error("getrusage error");
if (getrusage(RUSAGE_CHILDREN, &childusage) < 0)
handle_error("getrusage error");
user = (double) myusage.ru_utime.tv_sec +myusage.ru_utime.tv_usec / 1000000.0;
user += (double) childusage.ru_utime.tv_sec +childusage.ru_utime.tv_usec / 1000000.0;
sys = (double) myusage.ru_stime.tv_sec + myusage.ru_stime.tv_usec / 1000000.0;
sys += (double) childusage.ru_stime.tv_sec + childusage.ru_stime.tv_usec / 1000000.0;
printf("\nuser time = %g, sys time = %g\n", user, sys);
}
pid_t child_make(int i, long * ptr, int listenfd){
pid_t pid;
if ((pid = fork()) <0)
handle_error("fork");
else if(pid > 0)
return (pid); /* parent */
else
child_main(i, ptr,listenfd); /* never returns */
}
void child_main(int i, long * ptr, int listenfd){
int connfd;
socklen_t clilen;
struct sockaddr *cliaddr;
if((cliaddr = malloc(sizeof(struct sockaddr_in))) == NULL)
handle_error("malloc");
printf("child %ld starting\n", (long) getpid());
for ( ; ; ) {
clilen = sizeof(struct sockaddr_in);
if((connfd = accept(listenfd, (struct sockaddr *) &cliaddr, &clilen)) == -1 )
if( errno == EAGAIN)
continue;
else
handle_error("accept");
//在共享存储区域,每个client对应一个long存储区域
ptr[i]++;
web_child(connfd); /* process the request */
if(close(connfd) == -1)
handle_error("close"); /* parent closes connected socket */
}
}
void web_child(int sockfd){
int ntowrite;
ssize_t nread;
char line[MAXLINE], result[MAXN];
for ( ; ; ) {
if((nread=readline(sockfd, line, MAXLINE)) == -1)
handle_error("readline");
else if(nread == 0)
return ;
ntowrite = atol(line);
if ((ntowrite <= 0) || (ntowrite > MAXN))
errx(1,"client request for %d bytes,max size is %d\n", ntowrite,MAXN);
if(writen(sockfd, result, ntowrite) == -1)
handle_error("writen");
}
}
ssize_t writen(int fd, const void *vptr, size_t n){
size_t nleft;
ssize_t nwritten;
const char *ptr;
ptr = vptr;
nleft = n;
while (nleft > 0){
if ( (nwritten = write(fd, ptr, nleft)) <= 0){
if (nwritten < 0 && errno == EINTR)
nwritten = 0; /* and call write() again */
else
return (-1); /* error */
}
nleft -= nwritten;
ptr += nwritten;
}
return (n);
}
static ssize_t my_read(int fd, char *ptr){
if (read_cnt <= 0) {
again:
if ( (read_cnt = read(fd, read_buf, sizeof(read_buf))) < 0) {
if (errno == EINTR)
goto again;
return (-1);
} else if (read_cnt == 0)
return (0);
read_ptr = read_buf;
}
read_cnt--;
*ptr = *read_ptr++;
return (1);
}
ssize_t readline(int fd, void *vptr, size_t maxlen){
ssize_t n, rc;
char c, *ptr;
ptr = vptr;
for (n = 1; n < maxlen; n++) {
if ( (rc = my_read(fd, &c)) == 1) {
*ptr++ = c;
if (c == '\n')
break; /* newline is stored, like fgets() */
} else if (rc == 0) {
*ptr = 0;
return (n - 1); /* EOF, n - 1 bytes were read */
} else
return (-1); /* error, errno set by read() */
}
*ptr = 0; /* null terminate like fgets() */
return (n);
}
3. 结果
//server端
[root@localhost ~]# ./30_prefork_ser 127.0.0.1 9877 15
child 2787 starting
child 2785 starting
child 2788 starting
child 2786 starting
child 2789 starting
child 2790 starting
child 2784 starting
child 2791 starting
child 2792 starting
child 2793 starting
child 2783 starting
child 2794 starting
child 2795 starting
child 2782 starting
child 2781 starting
user time = 0.173836, sys time = 2.89636
child 0 connected number:109
child 1 connected number:569
child 2 connected number:605
child 3 connected number:201
child 4 connected number:586
child 5 connected number:606
child 6 connected number:693
child 7 connected number:743
child 8 connected number:717
child 9 connected number:690
child 10 connected number:592
child 11 connected number:100
child 12 connected number:621
child 13 connected number:100
child 14 connected number:568
[root@localhost ~]#
//testbench端
[root@localhost ~]# ./30_testbench 127.0.0.1 9877 15 500 4000
[root@localhost ~]#
可以看出来,每个child完成的连接数,还是可以接受的。最多的child的连接数是最少的child的7倍多。按照理想的状态,希望每个child的连接数都差不多,也就是负载平衡点。
4. 其他需要强调的问题
- When the program starts, N children are created, and all N call accept and all are put to sleep by the kernel. When the first client connection arrives, all N children are awakened. This is because all N have gone to sleep on the same ‘‘wait channel,’’This is sometimes called the thundering herd problem because all N are awakened even though only one will obtain the connection. Nevertheless, the code works, with the performance side effect of waking up too many processes each time a connection is ready to be accepted. We now measure this performance effect.
- A collision occurs when multiple processes call select on the same descriptor, because room is allocated in the socket structure for only one process ID to be awakened when the descriptor is ready. If multiple processes are waiting for the same descriptor, the kernel must wake up all processes that are blocked in a call to select since it doesn’t know which processes are affected by the descriptor that just became ready.
多个进程调用select
或者accept
在同一个fd上,这个时候会带来性能的损失,但是使用accept
的性能代价远远小于select
。这里也提到了惊群现象。