基于boost.asio的echo服务器3

15 篇文章 0 订阅
3 篇文章 0 订阅

公司赶项目拿人当狗用,fuck!

上次2是给list加了层锁,这里为每个io_service都分配一个list来减少竞争。

class TestEchoServer2 : public boost::enable_shared_from_this<TestEchoServer2>
        , private boost::noncopyable
{
public:
    TestEchoServer2(io_service_pool &service_pool, int port)
        : m_io_service_pool(service_pool)
        , m_endpoint(ip::tcp::v4(), port)
        , m_acceptor(*(m_io_service_pool.get()), m_endpoint)
    {

    }

    ~TestEchoServer2()
    {

    }

    void start_accept()
    {
        io_service_ptr tp = m_io_service_pool.get();
        m_connect.reset(new TestEchoConnection(*tp, *getConnectManager(tp)));
        m_acceptor.async_accept(m_connect->socket(), boost::bind(&TestEchoServer2::handle_accept, shared_from_this(), _1));
    }



private:

    void handle_accept(const boost::system::error_code &err)
    {
        if (!err) {
            //            m_connectionManager.push_back(m_connect);
            m_connect->start();

        }
        start_accept();
    }

    ConnectionManager_ptr getConnectManager(const io_service_ptr& val)
    {
        for (auto ite = m_map.begin(); ite != m_map.end(); ++ite) {
            if (ite->first == val) {
                return ite->second;
            }
        }
        std::pair<io_service_ptr, ConnectionManager_ptr> temp;
        temp.first = val;
        temp.second = ConnectionManager_ptr(new ConnectionManager());
        m_map.push_back(temp);
        return temp.second;

    }


private:
    io_service_pool& m_io_service_pool;
    ip::tcp::endpoint m_endpoint;
    ip::tcp::acceptor m_acceptor;
    //    MutexConnectionManager m_connectionManager;
    TestEchoConnection_ptr m_connect;
    std::vector<std::pair<io_service_ptr, ConnectionManager_ptr>> m_map;

};

这个做了一个io_service和ConnectionManager的映射关系。
本来是在handle_accept中将Connection加入ConnectionManager中,可是为了防止accept和处理这个Connection的线程不一致,所以将push_back使用bind移植这个Connection的线程中做。

class TestEchoConnection : public boost::enable_shared_from_this<TestEchoConnection>
        , private boost::noncopyable
{
public:
    TestEchoConnection(io_service &server, ConnectionManager &connectManager)
        : m_server(server)
        , m_connectManager(connectManager)
        , m_socket(new ip::tcp::socket(m_server))
    {

    }

    ~TestEchoConnection()
    {
        std::cout << "~TestEchoConnection" << std::endl;
    }

    ip::tcp::socket& socket()
    {
        return *m_socket;
    }

    void start()
    {
        m_server.post(boost::bind(static_cast<void (ConnectionManager::*)(const ConnectionManager::value_type&)>(&ConnectionManager::push_back),
                                  &m_connectManager, TestEchoConnection_ptr(shared_from_this())));
        m_socket->async_read_some(buffer(m_buffer), boost::bind(&TestEchoConnection::handle_read, shared_from_this(),
                                                                _1, _2));

    }


    void stop()
    {
        m_socket->close();
        //        m_connectManager.remove(TestEchoConnection_ptr(shared_from_this()));
        m_server.post(boost::bind(&ConnectionManager::remove, &m_connectManager,
                                  TestEchoConnection_ptr(shared_from_this())));
    }


private:
    void handle_read(const boost::system::error_code &ec, size_t data_size)
    {
        if (!ec) {
            std::cout << "handle_read->data size:" << data_size << std::endl;
            m_socket->async_write_some(buffer(m_buffer), boost::bind(&TestEchoConnection::handler_write,
                                                                     shared_from_this(), _1));
            //            m_socket->async_read_some(buffer(m_buffer), boost::bind(&TestEchoConnection::handle_read, shared_from_this(),
            //                                                                    _1, _2));


        }
        else if (ec == error::operation_aborted || ec == error::eof) {
            std::cout << "handle_read" << "remove" << std::endl;
            stop();

        }
    }

    void handler_write(const boost::system::error_code &ec)
    {
        if (!ec) {
            memset(&m_buffer, 0, sizeof(m_buffer));
            //            m_socket->async_read_some(buffer(m_buffer), boost::bind(&TestEchoConnection::handle_read, shared_from_this(),
            //                                                                    _1, _2));
            if (ec == error::operation_aborted) {
                std::cout << "handler_write" << "remove" << std::endl;
                stop();

            }
        }
        stop();
    }




private:
    io_service& m_server;
    //    MutexConnectionManager& m_connectManager;
    ConnectionManager& m_connectManager;
    socket_ptr m_socket;
    boost::array<char, 1029> m_buffer;


};

调用io_service的post操作,list的push_back现在是一个重载的函数,有一个右值引用的参数。所以在bind的时候用static_cast指定那个函数。
测试结果

Transactions:               3808 hits
Availability:             100.00 %
Elapsed time:              19.38 secs
Data transferred:           3.20 MB
Response time:              0.00 secs
Transaction rate:         196.49 trans/sec
Throughput:             0.16 MB/sec
Concurrency:                0.27
Successful transactions:        3808
Failed transactions:               0
Longest transaction:            0.01
Shortest transaction:           0.00

引入多线程性能没有证明样,原因可能是还没有到达它们之间区分的临界值,电脑不行,不能开太多的用户。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值