一 用进程池实现CGI服务器
这个服务器实现的任务是,将客户端传送过来的信息打印出来.
服务器使用高效的半同步半异步模式。主进程只负责监听,连接和数据处理都由子进程来实现。(图8-11)
1 方式一 ,没有使用类封装
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <assert.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <signal.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#define BUFFER_SIZE 1024
#define MAX_EVENT_NUMBER 1024
#define PROCESS_COUNT 5
#define USER_PER_PROCESS 65535
struct process_in_pool
{
pid_t pid;
int pipefd[2];
};
struct client_data
{
sockaddr_in address;
char buf[ BUFFER_SIZE ];
int read_idx;
};
int sig_pipefd[2];
int epollfd;
int listenfd;
process_in_pool sub_process[ PROCESS_COUNT ];
bool stop_child = false;
int setnonblocking( int fd )//设置fd为非阻塞
{
int old_option = fcntl( fd, F_GETFL );
int new_option = old_option | O_NONBLOCK;
fcntl( fd, F_SETFL, new_option );
return old_option;
}
void addfd( int epollfd, int fd )// 将fd放到EPOLL上,并使用ET模式,并且设置fd为非阻塞
{
epoll_event event;
event.data.fd = fd;
event.events = EPOLLIN | EPOLLET;
epoll_ctl( epollfd, EPOLL_CTL_ADD, fd, &event );
setnonblocking( fd );
}
void sig_handler( int sig )//信号处理函数,接受到的信号写到信号管道的写端
{
int save_errno = errno;
int msg = sig;
send( sig_pipefd[1], ( char* )&msg, 1, 0 );//1 是写端
errno = save_errno;
}
void addsig( int sig, void(*handler)(int), bool restart = true )
{
struct sigaction sa;
memset( &sa, '\0', sizeof( sa ) );
sa.sa_handler = handler;
if( restart )
{
sa.sa_flags |= SA_RESTART;
}
sigfillset( &sa.sa_mask );
assert( sigaction( sig, &sa, NULL ) != -1 );
}
void del_resource()
{
close( sig_pipefd[0] );
close( sig_pipefd[1] );
close( listenfd );
close( epollfd );
}
void child_term_handler( int sig )
{
stop_child = true;
}
void child_child_handler( int sig )
{
pid_t pid;
int stat;
while ( ( pid = waitpid( -1, &stat, WNOHANG ) ) > 0 )
{
continue;
}
}
int run_child( int idx )
{
epoll_event events[ MAX_EVENT_NUMBER ];
int child_epollfd = epoll_create( 5 );
assert( child_epollfd != -1 );
int pipefd = sub_process[idx].pipefd[1];//子进程的管道的读端
addfd( child_epollfd, pipefd );//将管道的读端设为非阻塞并将其放到EPOLL上,ET模式
int ret;
addsig( SIGTERM, child_term_handler, false );
addsig( SIGCHLD, child_child_handler );
client_data* users = new client_data[ USER_PER_PROCESS ];
while( !stop_child )
{
int number = epoll_wait( child_epollfd, events, MAX_EVENT_NUMBER, -1 );
if ( ( number < 0 ) && ( errno != EINTR ) )
{
printf( "epoll failure\n" );
break;
}
for ( int i = 0; i < number; i++ )
{
int sockfd = events[i].data.fd;
if( ( sockfd == pipefd ) && ( events[i].events & EPOLLIN ) )
{
int client = 0;
ret = recv( sockfd, ( char* )&client, sizeof( client ), 0 );
if( ret < 0 )
{
if( errno != EAGAIN )
{
stop_child = true;
}
}
else if( ret == 0 )
{
stop_child = true;
}
else
{
struct sockaddr_in client_address;
socklen_t client_addrlength = sizeof( client_address );
int connfd = accept( listenfd, ( struct sockaddr* )&client_address, &client_addrlength );//子进程和父进程共享同一个listen和他的缓冲区吗
if ( connfd < 0 )
{
printf( "errno is: %d\n", errno );
continue;
}
memset( users[connfd].buf, '\0', BUFFER_SIZE );
users[connfd].address = client_address;
users[connfd].read_idx = 0;
addfd( child_epollfd, connfd );
}
}
else if( events[i].events & EPOLLIN )
{
int idx = 0;
while( true )
{
idx = users[sockfd].read_idx;
ret = recv( sockfd, users[sockfd].buf + idx, BUFFER_SIZE-1-idx, 0 );
if( ret < 0 )
{
if( errno != EAGAIN )
{
epoll_ctl( child_epollfd, EPOLL_CTL_DEL, sockfd, 0 );
close( sockfd );
}
break;
}
else if( ret == 0 )
{
epoll_ctl( child_epollfd, EPOLL_CTL_DEL, sockfd, 0 );
close( sockfd );
break;
}
else
{
users[sockfd].read_idx += ret;
printf( "user content is: %s\n", users[sockfd].buf );
idx = users[sockfd].read_idx;
if( ( idx < 2 ) || ( users[sockfd].buf[idx-2] != '\r' ) || ( users[sockfd].buf[idx-1] != '\n' ) )
{
continue;
}
users[sockfd].buf[users[sockfd].read_idx-2] = '\0';
char* file_name = users[sockfd].buf;
if( access( file_name, F_OK ) == -1 )
{
epoll_ctl( child_epollfd, EPOLL_CTL_DEL, sockfd, 0 );
close( sockfd );
break;
}
ret = fork();
if( ret == -1 )
{
epoll_ctl( child_epollfd, EPOLL_CTL_DEL, sockfd, 0 );
close( sockfd );
break;
}
else if( ret > 0 )
{
epoll_ctl( child_epollfd, EPOLL_CTL_DEL, sockfd, 0 );
close( sockfd );
break;
}
else
{
close( STDOUT_FILENO );
dup( sockfd );
execl( users[sockfd].buf, users[sockfd].buf, 0 );
exit( 0 );
}
}
}
}
else
{
continue;
}
}
}
delete [] users;
close( pipefd );
close( child_epollfd );
return 0;
}
int main( int argc, char* argv[] )
{
if( argc <= 2 )
{
printf( "usage: %s ip_address port_number\n", basename( argv[0] ) );
return 1;
}
const char* ip = argv[1];
int port = atoi( argv[2] );
int ret = 0;
struct sockaddr_in address;
bzero( &address, sizeof( address ) );
address.sin_family = AF_INET;
inet_pton( AF_INET, ip, &address.sin_addr );
address.sin_port = htons( port );
listenfd = socket( PF_INET, SOCK_STREAM, 0 );
assert( listenfd >= 0 );
ret = bind( listenfd, ( struct sockaddr* )&address, sizeof( address ) );
assert( ret != -1 );
ret = listen( listenfd, 5 );
assert( ret != -1 );
for( int i = 0; i < PROCESS_COUNT; ++i )
{
ret = socketpair( PF_UNIX, SOCK_STREAM, 0, sub_process[i].pipefd );//用于父子进程通信的管道,管道是在fork之前创建
assert( ret != -1 );
sub_process[i].pid = fork();
if( sub_process[i].pid < 0 )
{
continue;
}
else if( sub_process[i].pid > 0 )//父进程
{
close( sub_process[i].pipefd[1] );
setnonblocking( sub_process[i].pipefd[0] );//管道为什么要设置成非阻塞??主要是为了后面这个描述符要放到EPOLL上
continue;
}
else//子进程
{
close( sub_process[i].pipefd[0] );
setnonblocking( sub_process[i].pipefd[1] );
run_child( i );
exit( 0 );
}
}
//下面是父进程要干的,因为子进程在上面已经exit了(虽然不会执行到)
epoll_event events[ MAX_EVENT_NUMBER ];
epollfd = epoll_create( 5 );
assert( epollfd != -1 );
addfd( epollfd, listenfd );//监听文件描述符设置为非阻塞,并将listenfd放到EPOLL上
ret = socketpair( PF_UNIX, SOCK_STREAM, 0, sig_pipefd );//用于统一信号源的管道
assert( ret != -1 );
setnonblocking( sig_pipefd[1] );
addfd( epollfd, sig_pipefd[0] );//将管道的读端放到EPOLL上,并将其设置为非阻塞
addsig( SIGCHLD, sig_handler );
addsig( SIGTERM, sig_handler );
addsig( SIGINT, sig_handler );
addsig( SIGPIPE, SIG_IGN );
bool stop_server = false;
int sub_process_counter = 0;
while( !stop_server )
{
int number = epoll_wait( epollfd, events, MAX_EVENT_NUMBER, -1 );
if ( ( number < 0 ) && ( errno != EINTR ) )
{
printf( "epoll failure\n" );
break;
}
for ( int i = 0; i < number; i++ )
{
int sockfd = events[i].data.fd;
if( sockfd == listenfd )//如果监听文件描述符就绪,将其发送给子进程
{
int new_conn = 1;
send( sub_process[sub_process_counter++].pipefd[0], ( char* )&new_conn, sizeof( new_conn ), 0 );// 这里是怎么回事?
/*这里写一下关于这里的这个问题,这里看不懂得主要问题是,父进程只需告诉子进程连接来了,子进程自己去从listenfd中取。
实际上,要把监听文件描述符看做是一个文件比较合适,在APUE上所描述的一样,子进程创建的时候,只是复制父进程的地址空间,而不是复制文件内容,
所以父子进程共享同一个文件表项!!这个文件表项中有诸如文件偏移量等的信息 。
所以这样是完全可以的,当listenfd准备就绪的时候,也就是说,listenfd中的缓冲区或者说listenfd对应的文件中有了内容,父进程只需要通知某一个子进程即可,
子进程的listenfd指向的缓冲区是和父进程是相同的!子进程取完这个连接后,这个连接就从listenfd的缓冲区队列中取出 */
printf( "send request to child %d\n", sub_process_counter-1 );
sub_process_counter %= PROCESS_COUNT;
}
else if( ( sockfd == sig_pipefd[0] ) && ( events[i].events & EPOLLIN ) )
{
int sig;
char signals[1024];
ret = recv( sig_pipefd[0], signals, sizeof( signals ), 0 );
if( ret == -1 )
{
continue;
}
else if( ret == 0 )
{
continue;
}
else
{
for( int i = 0; i < ret; ++i )
{
switch( signals[i] )
{
case SIGCHLD://有子进程结束,给子进程收尸
{
pid_t pid;
int stat;
while ( ( pid = waitpid( -1, &stat, WNOHANG ) ) > 0 )
{
for( int i = 0; i < PROCESS_COUNT; ++i )
{
if( sub_process[i].pid == pid )
{
close( sub_process[i].pipefd[0] );
sub_process[i].pid = -1;
}
}
}
stop_server = true;
for( int i = 0; i < PROCESS_COUNT; ++i )
{
if( sub_process[i].pid != -1 )
{
stop_server = false;
}
}
break;
}
case SIGTERM:
case SIGINT:
{
printf( "kill all the clild now\n" );
for( int i = 0; i < PROCESS_COUNT; ++i )
{
int pid = sub_process[i].pid;
kill( pid, SIGTERM );
}
break;
}
default:
{
break;
}
}
}
}
}
else
{
continue;
}
}
}
del_resource();
return 0;
}
关于这个程序,要注意的几点是:
1 关于listenfd的问题:子进程是复制的父进程的这个文件描述符。所以如果把文件描述符看做是一个文件的话,那么这将使父进程和子进程的listenfd指向同一个文件表项!也可以理解为共享同一个listenfd的缓冲区!
父进程将listenfd放入epoll,所以当连接到来的时候,listenfd读就绪,epoll_wait返回,父进程并不accept这个连接,而是按照一定的算法通知某一个子进程,这个子进程accept这个连接,这就将这个连接从listenfd的缓冲队列中取出。所以父进程同时可以看到这个连接已经被取出!
2 使用类封装
//processpool.h
#ifndef PROCESSPOOL_H
#define PROCESSPOOL_H
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <assert.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <signal.h>
#include <sys/wait.h>
#include <sys/stat.h>
//描述一个子进程的类
class process
{
public:
process() : m_pid( -1 ){}
public:
pid_t m_pid;//目标子进程的PID
int m_pipefd[2];//父进程和子进程通信的管道
};
template< typename T >
class processpool//模板类
{
private:
processpool( int listenfd, int process_number = 8 );
public:
static processpool< T >* create( int listenfd, int process_number = 8 )
{
if( !m_instance )
{
m_instance = new processpool< T >( listenfd, process_number );
}
return m_instance;
}
~processpool()
{
delete [] m_sub_process;
}
void run();
private:
void setup_sig_pipe();
void run_parent();
void run_child();
private:
static const int MAX_PROCESS_NUMBER = 16;//进程池允许的最大子进程的数量
static const int USER_PER_PROCESS = 65536;//每个子进程最大能处理的客户数量
static const int MAX_EVENT_NUMBER = 10000;//epoll最多能处理的事件数
int m_process_number;//进程池中的进程总数
int m_idx;//子进程在池中的序号,从0开始,也标志着这个进程是主进程还是子进程
int m_epollfd;//每个进程都有一个
int m_listenfd;//监听fd
int m_stop;//子进程通过这个标志来决定是否停止运行
process* m_sub_process;//保存所有子进程的运行信息
static processpool< T >* m_instance;//进程池静态实例
};//类声明结束
template< typename T >
processpool< T >* processpool< T >::m_instance = NULL;
//下面的大多数函数都是全局函数,不是类中的方法
static int sig_pipefd[2];//信号管道,用于实现统一事件源,需要注意的是,这个sig_pipefd在每个进程都有!!!,因为子进程是复制的父进程的地址映像!
static int setnonblocking( int fd )//将fd设置为非阻塞
{
int old_option = fcntl( fd, F_GETFL );
int new_option = old_option | O_NONBLOCK;
fcntl( fd, F_SETFL, new_option );
return old_option;
}
static void addfd( int epollfd, int fd )//将fd添加到epoll事件
{
epoll_event event;
event.data.fd = fd;
event.events = EPOLLIN | EPOLLET;//注意这是ET模式,所以文件描述符必须是非阻塞的!
epoll_ctl( epollfd, EPOLL_CTL_ADD, fd, &event );
setnonblocking( fd );
}
static void removefd( int epollfd, int fd )//将fd从epoll事件中删除
{
epoll_ctl( epollfd, EPOLL_CTL_DEL, fd, 0 );
close( fd );
}
static void sig_handler( int sig )//信号处理函数,当收到信号的时候,将信号编码发送给调用进程(即实现统一信号源)
{
int save_errno = errno;
int msg = sig;
send( sig_pipefd[1], ( char* )&msg, 1, 0 );//有没有结束符??
errno = save_errno;
}
static void addsig( int sig, void( handler )(int), bool restart = true )//向信号集中添加信号
{
struct sigaction sa;
memset( &sa, '\0', sizeof( sa ) );
sa.sa_handler = handler;
if( restart )
{
sa.sa_flags |= SA_RESTART;
}
sigfillset( &sa.sa_mask );
assert( sigaction( sig, &sa, NULL ) != -1 );
}
//进程池类的构造函数的实现
template< typename T >
processpool< T >::processpool( int listenfd, int process_number )
: m_listenfd( listenfd ), m_process_number( process_number ), m_idx( -1 ), m_stop( false )
{
assert( ( process_number > 0 ) && ( process_number <= MAX_PROCESS_NUMBER ) );
m_sub_process = new process[ process_number ];//在进程池创建中指定进程个数的子进程类对象信息
assert( m_sub_process );
for( int i = 0; i < process_number; ++i )
{
int ret = socketpair( PF_UNIX, SOCK_STREAM, 0, m_sub_process[i].m_pipefd );//这个管道是父子进程进行通信的管道,不是信号那个
assert( ret == 0 );
m_sub_process[i].m_pid = fork();
assert( m_sub_process[i].m_pid >= 0 );
if( m_sub_process[i].m_pid > 0 )//父进程
{
close( m_sub_process[i].m_pipefd[1] );
continue;
}
else//子进程
{
close( m_sub_process[i].m_pipefd[0] );
m_idx = i;
break;
}
}
}
template< typename T >
void processpool< T >::setup_sig_pipe()//这实际上是在进程内部进行通信
{
m_epollfd = epoll_create( 5 );
assert( m_epollfd != -1 );
int ret = socketpair( PF_UNIX, SOCK_STREAM, 0, sig_pipefd );//这个sig_pipefd在每个子进程中都有!,所以这里操作的只是每个进程内部的那个。
assert( ret != -1 );
setnonblocking( sig_pipefd[1] );//将写端修改成非阻塞
addfd( m_epollfd, sig_pipefd[0] );//读端
addsig( SIGCHLD, sig_handler );
addsig( SIGTERM, sig_handler );
addsig( SIGINT, sig_handler );
addsig( SIGPIPE, SIG_IGN );
}
template< typename T >
void processpool< T >::run()
{
if( m_idx != -1 )//进程池对象每个进程都有一个!里面的m_idx代表着是主进程还是子进程
{
run_child();
return;
}
run_parent();
}
template< typename T >
void processpool< T >::run_child()
{
setup_sig_pipe();
int pipefd = m_sub_process[m_idx].m_pipefd[ 1 ];//父子进程通信管道的读端,父进程将监听到的描述符发过来
addfd( m_epollfd, pipefd );
epoll_event events[ MAX_EVENT_NUMBER ];
T* users = new T [ USER_PER_PROCESS ];
assert( users );
int number = 0;
int ret = -1;
while( ! m_stop )
{
number = epoll_wait( m_epollfd, events, MAX_EVENT_NUMBER, -1 );
if ( ( number < 0 ) && ( errno != EINTR ) )
{
printf( "epoll failure\n" );
break;
}
for ( int i = 0; i < number; i++ )
{
int sockfd = events[i].data.fd;
if( ( sockfd == pipefd ) && ( events[i].events & EPOLLIN ) )
{
int client = 0;
ret = recv( sockfd, ( char* )&client, sizeof( client ), 0 );
if( ( ( ret < 0 ) && ( errno != EAGAIN ) ) || ret == 0 )
{
continue;
}
else
{
struct sockaddr_in client_address;
socklen_t client_addrlength = sizeof( client_address );
int connfd = accept( m_listenfd, ( struct sockaddr* )&client_address, &client_addrlength );//子进程中accept
if ( connfd < 0 )
{
printf( "errno is: %d\n", errno );
continue;
}
addfd( m_epollfd, connfd );
users[connfd].init( m_epollfd, connfd, client_address );
}
}
else if( ( sockfd == sig_pipefd[0] ) && ( events[i].events & EPOLLIN ) )
{
int sig;
char signals[1024];
ret = recv( sig_pipefd[0], signals, sizeof( signals ), 0 );//send会不会往管道写结束符?recv读到的结束标志是什么
if( ret <= 0 )
{
continue;
}
else
{
for( int i = 0; i < ret; ++i )
{
switch( signals[i] )
{
case SIGCHLD:
{
pid_t pid;
int stat;
while ( ( pid = waitpid( -1, &stat, WNOHANG ) ) > 0 )
{
continue;
}
break;
}
case SIGTERM:
case SIGINT:
{
m_stop = true;
break;
}
default:
{
break;
}
}
}
}
}
else if( events[i].events & EPOLLIN )
{
users[sockfd].process();
}
else
{
continue;
}
}
}
delete [] users;
users = NULL;
close( pipefd );
//close( m_listenfd );
close( m_epollfd );
}
template< typename T >
void processpool< T >::run_parent()
{
setup_sig_pipe();
addfd( m_epollfd, m_listenfd );
epoll_event events[ MAX_EVENT_NUMBER ];
int sub_process_counter = 0;
int new_conn = 1;
int number = 0;
int ret = -1;
while( ! m_stop )
{
number = epoll_wait( m_epollfd, events, MAX_EVENT_NUMBER, -1 );
if ( ( number < 0 ) && ( errno != EINTR ) )
{
printf( "epoll failure\n" );
break;
}
for ( int i = 0; i < number; i++ )
{
int sockfd = events[i].data.fd;
if( sockfd == m_listenfd )
{
int i = sub_process_counter;
do
{
if( m_sub_process[i].m_pid != -1 )
{
break;
}
i = (i+1)%m_process_number;
}
while( i != sub_process_counter );
if( m_sub_process[i].m_pid == -1 )
{
m_stop = true;
break;
}
sub_process_counter = (i+1)%m_process_number;
//send( m_sub_process[sub_process_counter++].m_pipefd[0], ( char* )&new_conn, sizeof( new_conn ), 0 );
send( m_sub_process[i].m_pipefd[0], ( char* )&new_conn, sizeof( new_conn ), 0 );
printf( "send request to child %d\n", i );
//sub_process_counter %= m_process_number;
}
else if( ( sockfd == sig_pipefd[0] ) && ( events[i].events & EPOLLIN ) )
{
int sig;
char signals[1024];
ret = recv( sig_pipefd[0], signals, sizeof( signals ), 0 );
if( ret <= 0 )
{
continue;
}
else
{
for( int i = 0; i < ret; ++i )
{
switch( signals[i] )
{
case SIGCHLD:
{
pid_t pid;
int stat;
while ( ( pid = waitpid( -1, &stat, WNOHANG ) ) > 0 )
{
for( int i = 0; i < m_process_number; ++i )
{
if( m_sub_process[i].m_pid == pid )
{
printf( "child %d join\n", i );
close( m_sub_process[i].m_pipefd[0] );
m_sub_process[i].m_pid = -1;
}
}
}
m_stop = true;
for( int i = 0; i < m_process_number; ++i )
{
if( m_sub_process[i].m_pid != -1 )
{
m_stop = false;
}
}
break;
}
case SIGTERM:
case SIGINT:
{
printf( "kill all the clild now\n" );
for( int i = 0; i < m_process_number; ++i )
{
int pid = m_sub_process[i].m_pid;
if( pid != -1 )
{
kill( pid, SIGTERM );
}
}
break;
}
default:
{
break;
}
}
}
}
}
else
{
continue;
}
}
}
//close( m_listenfd );
close( m_epollfd );
}
#endif
//processpool.cpp
#include "processpool.h"
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <assert.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <signal.h>
#include <sys/wait.h>
#include <sys/stat.h>
class cgi_conn
{
public:
cgi_conn()
{
}
~cgi_conn()
{
}
void init(int epollfd,int sockfd, const sockaddr_in & client_addr)
{
m_epollfd=epollfd;
m_sockfd=sockfd;
m_address=client_addr;
memset(m_buf,'\0',BUFFER_SIZE);
m_read_idx=0;
}
void process()
{
int idx=0;
int ret=-1;
while(true)
{
idx=m_read_idx;
ret=recv(m_sockfd,m_buf+idx,BUFFER_SIZE,0);
if(ret<0)
{
if(errno!=EAGAIN)
{
removefd(m_epollfd,m_sockfd);
}
break;
}
else if(ret==0)
{
removefd(m_epollfd,m_sockfd);
break;
}
else
{
m_read_idx+=ret;
printf("user content is : %s\n",m_buf);
//如果遇到\r,开始处理客户请求
for(;idx<m_read_idx;++idx)
{
if((idx>=1) && (m_buf[idx-1]=='\r')&& (m_buf[idx]=='\n'))
{
break;
}
}
//如果没有遇到字符"\r\n",则需要读取更多的客户数据
if(idx==m_read_idx)
{
continue;
}
m_buf[idx-1]='\0';
char *file_name=m_buf;
//判断客户要运行的CGI服务器是否存在
if(access(file_name,F_OK)==-1)
{
removefd(m_epollfd,m_sockfd);
break;
}
//创建子进程来执行CGI服务器
ret=fork();
if(ret==-1)
{
removefd(m_epollfd,m_sockfd);
break;
}
else if(ret>0)
{
//父进程直需要关闭连接
removefd(m_epollfd,m_sockfd);
break;
}
else
{
//子进程将标准输出定向到m_sockfd,并执行CGI程序
close(STDOUT_FILENO);
dup(m_sockfd);
execl(m_buf,m_buf,0);
exit(0);
}
}
}
}
private:
//读缓冲区的大小
static const int BUFFER_SIZE= 1024;
static int m_epollfd;
int m_sockfd;
sockaddr_in m_address;
char m_buf[BUFFER_SIZE];
int m_read_idx;
};
int cgi_conn::m_epollfd=-1;
int main(int argc,char *argv[])
{
if(argc<=2)
{
printf("usage is :%s ip_address portnumber\n",basename(argv[0]));
return 1;
}
const char *ip=argv[1];
int port =atoi(argv[2]);
int listenfd=socket(AF_INET,SOCK_STREAM,0);
assert(listen>=0);
int ret =0;
struct sockaddr_in address;
bzero(&address,sizeof(address));
address.sin_family=AF_INET;
inet_pton(AF_INET,ip,&address.sin_addr);
address.sin_port=htons(port);
ret=bind(listenfd,(struct sockaddr*)&address,sizeof(address));
assert(ret!=1);
ret=listen(listenfd,5);
assert(ret!=1);
processpool<cgi_conn> *pool=processpool<cgi_conn>::create(listenfd);
if(pool)
{
pool->run();
delete pool;
}
close(listenfd);//main函数创建的listenfd,那么就由main关闭
return 0;
}
关于父进程和子进程的地址空间简要的如下图所示:
二 线程池实现CGI服务器
3.1 整体流程与框架
线程池对象的作用,连接池对象的作用等等
代码的组织线索、组织原则
1)半同步半反应堆式:
主线程建立连接池对象,当客户请求连接的时候,由主线程负责将这个连接信息放入连接池的其中一个对象中,并维护这个信息。当客户端想服务端发送信息的时候,由主进程接受这些内容,接收到的信息内容放在这个连接对象的特定的缓冲区中。并将这个连接对象放到队列中,等待子线程来处理。
某个子线程从任务队列中获取到这个对象,并处理缓冲区的数据,处理完毕后,向这个连接对象的些缓冲区写上数据,然后将这个连接的连接描述符上epoll_out。主线程的epoll_wait就会注意到这个信息,就会将这个缓冲区的信息全部发送给客户端。这就是整个流程。
(也就是说,主线程负责对远端客户的读和写,子线程只负责处理数据)
3.2 源码
见代码仓库
3.2 几个基础知识点
1)writev和readv的用法以及与常规read、write的区别以及send、recv的用法总结
2)epoll以及其ET模式,以及其epoll_one_shot
3) 信号处理函数;信号打断阻塞的系统调用后,是否重启这个系统调用:涉及到SA_RESTART的使用
4)send以及rece,以及read、write等的读写操作的使用汇总
5)线程池对象、连接conn对象中的static成员的性质;主线程与子线程的哪些资源是共用的,一般来说,单线程环境下,全局变量是共用的,但是多线程环境下并且在基于对象的环境下,哪些变量时共用的呢,使用static可以使所其成为全局的吗?各个对象以及各种变量类型在内存中是怎么分布的?
6)关于epoll的异常,EPOLLRDHUP、EPOLLHUP、EPOLLERR的情况是什么情况
7)线程池的实现中涉及到的C++知识
8)hTTP解析中的状态机运行,推动方式 ,主从状态机的耦合问题
9)套接字选项中的SO_ERROR
10)mmap
11)什么是reactor和proactor
12)打开的文件的上限?文件描述符的上限?