优秀的轻量级网络开发框架spserver源码分析(二)

1。分析socket读写流程

void SP_EventCallback :: onRead( int fd, short events, void * arg )
{
    SP_Session * session = (SP_Session*)arg;

    session->setReading( 0 );

    SP_Sid_t sid = session->getSid();

    if( EV_READ & events ) {
        int len = session->getIOChannel()->receive( session );

        if( len > 0 ) {
            session->addRead( len );
            if( 0 == session->getRunning() ) {
                SP_MsgDecoder * decoder = session->getRequest()->getMsgDecoder();
               
if( SP_MsgDecoder::eOK == decoder->decode( session->getInBuffer() ) ) {
                    SP_EventHelper::doWork( session );
                }
            }
            addEvent( session, EV_READ, -1 );
        } else {
            int saved = errno;

            if( 0 != errno ) {
                sp_syslog( LOG_WARNING, "session(%d.%d) read error, errno %d, status %d",
                        sid.mKey, sid.mSeq, errno, session->getStatus() );
            }

            if( EAGAIN != saved ) {
                if( 0 == session->getRunning() ) {
                    SP_EventHelper::doError( session );
                } else {
                    sp_syslog( LOG_NOTICE, "session(%d.%d) busy, process session error later",
                            sid.mKey, sid.mSeq );
                    // If this session is running, then onResponse will add write event for this session.
                    // It will be processed as write fail at the last. So no need to re-add event here.
                }
            } else {
                addEvent( session, EV_READ, -1 );
            }
        }
    } else {
        if( 0 == session->getRunning() ) {
            SP_EventHelper::doTimeout( session );
        } else {
            sp_syslog( LOG_NOTICE, "session(%d.%d) busy, process session timeout later",
                    sid.mKey, sid.mSeq );
            // If this session is running, then onResponse will add write event for this session.
            // It will be processed as write fail at the last. So no need to re-add event here.
        }
    }
}

拿到读事件之后。int len = session->getIOChannel()->receive( session ); 通过IOChannel到底层 socket中读取数据,此时的IOChannel实际上转入SP_DefaultIOChannel的receive方法,该方法执行libevent中的evbuffer_read方法读数据。

注意此时读数据是由onRead触发的,所以不会阻塞,读完数据后,执行SP_MsgDecoder::eOK == decoder->decode( session->getInBuffer(),此时会转入到协议处理流程,由协议解码器去解码缓冲区,得到相关协议。我们来看一个典型的解码器写法

int SP_LineMsgDecoder :: decode( SP_Buffer * inBuffer )
{
    if( NULL != mLine ) free( mLine );
    mLine = inBuffer->getLine();

    return NULL == mLine ? eMoreData : eOK;
}

 

底层缓冲区是inBuffer,从底层缓冲读一个完整的行,读到之后就将ret=eOK.

SP_EventHelper::doWork( session ); 回去找协议处理器去执行。我们看一个典型的代码。

class SP_EchoHandler : public SP_Handler {
public:
    SP_EchoHandler(){}
    virtual ~SP_EchoHandler(){}

    // return -1 : terminate session, 0 : continue
    virtual int start( SP_Request * request, SP_Response * response ) {
        request->setMsgDecoder( new SP_MultiLineMsgDecoder() );
        response->getReply()->getMsg()->append(
            "Welcome to line echo server, enter 'quit' to quit./r/n" );

        return 0;
    }

    // return -1 : terminate session, 0 : continue
    virtual int handle( SP_Request * request, SP_Response * response ) {
        SP_MultiLineMsgDecoder * decoder = (SP_MultiLineMsgDecoder*)request->getMsgDecoder();
        SP_CircleQueue * queue = decoder->getQueue();

        int ret = 0;
        for( ; NULL != queue->top(); ) {
            char * line = (char*)queue->pop();

            if( 0 != strcasecmp( line, "quit" ) ) {
                response->getReply()->getMsg()->append( line );
                response->getReply()->getMsg()->append( "/r/n" );
            } else {
                response->getReply()->getMsg()->append( "Byebye/r/n" );
                ret = -1;
            }

            free( line );
        }

        return ret;
    }

    virtual void error( SP_Response * response ) {}

    virtual void timeout( SP_Response * response ) {}

    virtual void close() {}
};

这是 testecho服务器的代码,SP_EventHelper::doWork( session ); 最终会定位到virtual int handle( SP_Request * request, SP_Response * response )来执行

具体执行过程如下

void SP_EventHelper :: doWork( SP_Session * session )
{
    if( SP_Session::eNormal == session->getStatus() ) {
        session->setRunning( 1 );
        SP_EventArg * eventArg = (SP_EventArg*)session->getArg();
       
eventArg->getInputResultQueue()->push( new SP_SimpleTask( worker, session, 1 ) );
    } else {
        SP_Sid_t sid = session->getSid();

        char buffer[ 16 ] = { 0 };
        session->getInBuffer()->take( buffer, sizeof( buffer ) );
        sp_syslog( LOG_WARNING, "session(%d.%d) status is %d, ignore [%s...] (%dB)",
            sid.mKey, sid.mSeq, session->getStatus(), buffer, session->getInBuffer()->getSize() );
        session->getInBuffer()->reset();
    }
}

SP_EventHelper::doWork( session )  转到 eventArg->getInputResultQueue()->push( new SP_SimpleTask( worker, session, 1 ) ); 执行

eventArg内部包含InputResultQueue队列,我们回到int SP_Server :: start()

int SP_Server :: start()
{
#ifdef SIGPIPE
    /* Don't die with SIGPIPE on remote read shutdown. That's dumb. */
    signal( SIGPIPE, SIG_IGN );
#endif

    int ret = 0;
    int listenFD = -1;

    ret = SP_IOUtils::tcpListen( mBindIP, mPort, &listenFD, 0 );

    if( 0 == ret ) {

        SP_EventArg eventArg( mTimeout );

        // Clean close on SIGINT or SIGTERM.
        struct event evSigInt, evSigTerm;
        signal_set( &evSigInt, SIGINT,  sigHandler, this );
        event_base_set( eventArg.getEventBase(), &evSigInt );
        signal_add( &evSigInt, NULL);
        signal_set( &evSigTerm, SIGTERM, sigHandler, this );
        event_base_set( eventArg.getEventBase(), &evSigTerm );
        signal_add( &evSigTerm, NULL);

        SP_AcceptArg_t acceptArg;
        memset( &acceptArg, 0, sizeof( SP_AcceptArg_t ) );

        if( NULL == mIOChannelFactory ) {
            mIOChannelFactory = new SP_DefaultIOChannelFactory();
        }
        acceptArg.mEventArg = &eventArg;
        acceptArg.mHandlerFactory = mHandlerFactory;
        acceptArg.mIOChannelFactory = mIOChannelFactory;
        acceptArg.mReqQueueSize = mReqQueueSize;
        acceptArg.mMaxConnections = mMaxConnections;
        acceptArg.mRefusedMsg = mRefusedMsg;

        struct event evAccept;
        event_set( &evAccept, listenFD, EV_READ|EV_PERSIST,
                SP_EventCallback::onAccept, &acceptArg );
        event_base_set( eventArg.getEventBase(), &evAccept );
        event_add( &evAccept, NULL );

        SP_Executor workerExecutor( mMaxThreads, "work" );
        SP_Executor actExecutor( 1, "act" );

        SP_CompletionHandler * completionHandler = mHandlerFactory->createCompletionHandler();

        /* Start the event loop. */
        while( 0 == mIsShutdown ) {
            event_base_loop( eventArg.getEventBase(), EVLOOP_ONCE );

            for( ; NULL != eventArg.getInputResultQueue()->top(); ) {
                SP_Task * task = (SP_Task*)eventArg.getInputResultQueue()->pop();
                workerExecutor.execute( task );
            }

           for( ; NULL != eventArg.getOutputResultQueue()->top(); ) {
                SP_Message * msg = (SP_Message*)eventArg.getOutputResultQueue()->pop();

                void ** arg = ( void** )malloc( sizeof( void * ) * 2 );
                arg[ 0 ] = (void*)completionHandler;
                arg[ 1 ] = (void*)msg;

                actExecutor.execute( outputCompleted, arg );
            }
        }

        delete completionHandler;

……   

}

SP_Executor workerExecutor( mMaxThreads, "work" );  SP_Executor actExecutor( 1, "act" );是两个重要的执行线程池。

在                 for( ; NULL != eventArg.getInputResultQueue()->top(); ) {
                SP_Task * task = (SP_Task*)eventArg.getInputResultQueue()->pop();
                workerExecutor.execute( task );
            }

循环中, eventArg.getInputResultQueue将任务出队,然后使用workerExecutor.execute( task );执行任务,而task封装了virtual int handle( SP_Request * request, SP_Response * response )

总结:到此为止,我们分析清楚,客户端数据到达socket,触发onRead事件,调用decoder判断是否协议解码成功(eOK),解码成功则将handel方法封装到Task,送入InputResultQueue队列,由workerExecutor线程池出队并执行。
 

数据写出与这个过程类似。

(未完待续……)

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值