C++实现gRPC异步双向流式的客户端和服务端(一)

客户端有几点说明
1.回调对象用的对象的函数指针,形如connected_func_=std::bind(&AsyncClientBidiStreamImpl::Connected, this, std::placeholders::_1),没有用状态机变量控制异步回调,
因为如果用状态变量控制,连续调用两次不同api并需要传入调用对象时没法实现,比如        
obj.statu = DISCONNECT;
stream_->Finish(&status_, &obj);
obj.statu = READ_DONE;
stream_->Read(&status_, &obj);
这种写法就会有问题,变量statu的值会被第二次调用给覆盖为READ_DONE,起不到状态流转的作用了。
2.发送请求时候进行连接调用,因为我这边需求是请求地址不能提前知道,只有在请求时才能知道地址,这块实现当时遇到一些坑需要注意,连接失败重试的时候必须是新的调用对象
3.客户端是有缓存stub对象,起到重用连接的作用

Client.cpp

#include <iostream>
#include "Client.h"
#include "RpcClient.h"
#include "Common.h"
#include "LuaInterface.h"
#include "Log.h"

int32_t AsyncClientBidiStreamImpl::object_count_ = 0;

AsyncClientBidiStreamImpl::AsyncClientBidiStreamImpl(RpcClient* rpc, 
    CompletionQueue* cq, CommonService::Stub* stub,
    std::function<void(grpc::Status, CommonResponse*)> callback)
    :context_(new ClientContext),
    rpc_(rpc),
    callback_(callback),
    stub_(stub),
    cq_(cq),
    response_(),
    connected_func_(std::bind(&AsyncClientBidiStreamImpl::Connected, this, std::placeholders::_1)),
    write_done_func_(std::bind(&AsyncClientBidiStreamImpl::WriteDone, this, std::placeholders::_1)),
    read_done_func_(std::bind(&AsyncClientBidiStreamImpl::ReadDone, this, std::placeholders::_1)),
    disconnect_func_(std::bind(&AsyncClientBidiStreamImpl::Disconnect, this, std::placeholders::_1)),
    call_id_(UniqueIdGenerator::GetInstance().GetUniqueId())
{
    object_count_++;
    //ConfigSetting::InitAsyncContext(context_.get(), rpc->GetChannelMgr()->GetAsyncTimeOut());

    Start();
}

void AsyncClientBidiStreamImpl::Start()
{
    stream_ = stub_->AsyncSendBidiStreamMsg(context_.get(), cq_, &connected_func_);
    is_need_reset_ = false;
}

bool AsyncClientBidiStreamImpl::Connected(bool ok)
{
    if (ok)
    {

        is_connected = true;

        if (!stream_)
        {
            return false;
        }

        stream_->Finish(&status_, &disconnect_func_);
        stream_->Read(&response_, &read_done_func_);
        


        if(!queueReq.empty())
        {
            if (!is_writting_)
            {
                auto req = queueReq.front();

                queueReq.pop_front();

                is_writting_ = true;

                stream_->Write(req, &write_done_func_);

                LOG_DEBUG("[++xeg++]stream write 2 ...");
            }
        }
    }
    else
    {
        queueReq.clear();

        //close
        is_need_reset_ = true;
    }
    return true;
}

bool AsyncClientBidiStreamImpl::ReadDone(bool ok)
{
    if (!ok)
    {
        LOG_DEBUG("BidiStreamImpl|ReadDone() failed!!!Code:%d,Msg:%s",status_.error_code(),status_.error_message().c_str());

        //��ok == false��˵��stream�Ѿ��Ͽ�
        is_connected = false;
        return false;
    }

    int32_t ret = call_lua_func(LuaInterface::getInstance().LuaState(), sLuaCallBackFuncName, GetCallId(),(int32_t)status_.error_code(), response_.cmd(),response_.content(),response_.context());
    stream_->Read(&response_, &read_done_func_);
    return true;
}

int AsyncClientBidiStreamImpl::AsyncSendBidiStreamMsg(CommonRequest& req)
{
    WriteMsg(req);
    return GetCallId();
}

bool AsyncClientBidiStreamImpl::WriteMsg(CommonRequest& req)
{
    queueReq.push_back(req);
    

    if(is_connected)
    {
        if(!is_writting_)
        {
            auto item = queueReq.front();

            queueReq.pop_front();

            is_writting_ = true;
            stream_->Write(item, &write_done_func_);
        }
    }
    else if (is_need_reset_)
    {

        Reset();
        return true;
    }

    return true;

}

bool AsyncClientBidiStreamImpl::WriteDone(bool ok)
{
    if (!ok)
    {
        is_connected = false;
        queueReq.clear();
        return false;
    }

    is_writting_ = false;

    if(!queueReq.empty())
    {
        auto req = queueReq.front();

        queueReq.pop_front();

        is_writting_ = true;
        stream_->Write(req, &write_done_func_);
    }

    return true;
}

bool AsyncClientBidiStreamImpl::Disconnect(bool ok)
{
    is_connected = false;
    is_need_reset_ = true;

    return false;
}

void AsyncClientBidiStreamImpl::Reset()
{

    std::unique_ptr<AsyncClientBidiStreamImpl> clone = std::make_unique<AsyncClientBidiStreamImpl>(rpc_, cq_, stub_, callback_);
    rpc_->ResetBidiStream((uint64_t)stub_,std::move(clone));
}

client.h

#pragma once
#include <condition_variable>
#include <iostream>
#include <memory>
#include <mutex>
#include <string>
#include <vector>
#include <queue>
#include <grpcpp/grpcpp.h>

#include "common_service.grpc.pb.h"
#include "IRpcCall.h"
#include "Log.h"

class AsyncClientBidiStreamImpl :public IRpcCall
{
public:
	AsyncClientBidiStreamImpl(RpcClient* rpc,CompletionQueue* cq, CommonService::Stub* stub,
		 std::function<void(grpc::Status, CommonResponse*)> callback);
	~AsyncClientBidiStreamImpl() override 
	{ 
		LOG_DEBUG("AsyncClientBidiStreamImpl release call:%u,object_count:%d",call_id_,--object_count_);

	}
	void Reset() override;
	int GetCallId() { return call_id_;}
	static uint32_t GetObjectCount() { return object_count_; }

	bool Connected(bool ok);
	bool WriteDone(bool ok);
	bool ReadDone(bool ok);
	bool Disconnect(bool ok);

	void Start();
	void TryCancel() { if (context_) { context_->TryCancel(); } }

	int AsyncSendBidiStreamMsg(CommonRequest& req);
private:
	bool WriteMsg(CommonRequest& req);

	std::unique_ptr<ClientContext> context_;
	CommonResponse response_;
	CompletionQueue* cq_;
	CommonService::Stub* stub_;
	grpc::Status status_;
	RpcClient* rpc_;
	NextCallFunc connected_func_;
	NextCallFunc write_done_func_;
	NextCallFunc read_done_func_;
	NextCallFunc disconnect_func_;

	std::function<void(grpc::Status, CommonResponse*)> callback_;
	std::unique_ptr<grpc::ClientAsyncReaderWriter<CommonRequest, CommonResponse>>
		stream_;
	bool coalesce_{false};
	bool is_connected{ false };
	bool is_writting_{ false };
	bool is_need_reset_{ false };
	uint32_t message_per_stream_{100};
	int32_t call_id_;

	std::deque<CommonRequest> queueReq;

	//test
	static int32_t object_count_;
};

RpcClient.h

#pragma once
#include <condition_variable>
#include <iostream>
#include <memory>
#include <mutex>
#include <string>
#include <vector>
#include <map>
#include "Client.h"
#include "ChannelMgr.h"

#include <grpcpp/grpcpp.h>

#include "common_service.grpc.pb.h"

using grpc::Channel;
using grpc::ClientAsyncResponseReader;
using grpc::ClientContext;
using grpc::CompletionQueue;
using grpc::Status;

using CallBackType = std::function<void(bool)>;

class RpcClient
{
public:
	RpcClient(std::function<void(grpc::Status, CommonResponse*)>&& callback);
	virtual ~RpcClient(){}
	void Init();
	void SetCompletionQueue(CompletionQueue* cq);

	void SetChannelArgs(const std::string& name, int32_t value);
	void SetChannelMaxReceiveMessageSize(int size);
	void SetChannelMaxSendMessageSize(int size);
	void SetSyncTimeOut(uint32_t seconds);
	void SetAsyncTimeOut(uint32_t seconds);

	std::pair<int,std::string> SendMsg(const std::string& srv_addr,int32_t type,int32_t cmd,const std::string& body, const std::string& context);

	std::pair<int, std::string> SyncSendMsg(CommonService::Stub* stub,const CommonRequest& req);
	int AsyncSendMsg(CommonService::Stub* stub,const CommonRequest& req);
	int AsyncSendBidiStreamMsg(CommonService::Stub* stub,CommonRequest& req);


	void ResetBidiStream(uint64_t ptr,std::unique_ptr<AsyncClientBidiStreamImpl> newOne);

	ChannelMgr* GetChannelMgr();
protected:
	CompletionQueue* cq_;
	std::function<void(grpc::Status, CommonResponse*)> callback_;
	std::map<uint64_t,std::unique_ptr<AsyncClientBidiStreamImpl> > bidi_stream_impls_;
	std::shared_ptr<ChannelMgr> channel_mgr_{ nullptr };
};

RpcClient.cpp

#include <iostream>
#include "RpcClient.h"
#include "Common.h"


RpcClient::RpcClient(std::function<void(grpc::Status, CommonResponse*)>&& callback)
    :callback_(std::move(callback))
    , channel_mgr_(new ChannelMgr())
{}

void RpcClient::SetCompletionQueue(CompletionQueue* cq)
{
    cq_ = cq;
}

void RpcClient::Init()
{
    SetChannelArgs(GRPC_ARG_KEEPALIVE_TIME_MS, 30 * 1000);
    SetChannelArgs(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, 10 * 1000);
    SetChannelArgs(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS, 1);
    SetChannelArgs(GRPC_ARG_MAX_RECONNECT_BACKOFF_MS, 10 * 1000);

    SetChannelMaxReceiveMessageSize(1024 * 1024 * 64);
    SetChannelMaxSendMessageSize(1024 * 1024 * 64);
}

void RpcClient::SetChannelArgs(const std::string& name, int32_t value)
{
    channel_mgr_->SetChannelArgs(name, value);
}

void RpcClient::SetChannelMaxReceiveMessageSize(int size)
{
    channel_mgr_->SetChannelMaxReceiveMessageSize(size);
}

void RpcClient::SetChannelMaxSendMessageSize(int size)
{
    channel_mgr_->SetChannelMaxSendMessageSize(size);
}

void RpcClient::SetSyncTimeOut(uint32_t seconds)
{
    channel_mgr_->SetSyncTimeOut(seconds);
}

void RpcClient::SetAsyncTimeOut(uint32_t seconds)
{
    channel_mgr_->SetAsyncTimeOut(seconds);
}

void RpcClient::ResetBidiStream(uint64_t ptr,std::unique_ptr<AsyncClientBidiStreamImpl> newOne)
{
    bidi_stream_impls_[ptr] = std::move(newOne);
}

ChannelMgr* RpcClient::GetChannelMgr()
{
    return channel_mgr_.get();
}

std::pair<int, std::string> RpcClient::SendMsg(const std::string& srv_addr,int32_t type,int32_t cmd,const std::string& body, const std::string& context)
{
    std::pair<int, std::string> result;
    std::shared_ptr<CommonService::Stub> pStub = channel_mgr_->FetchOrCreateStub(srv_addr);
    if (pStub == nullptr)
    {
        LOG_ERROR("[++xeg++]create stub fail , addr:%s",srv_addr.c_str());

        return std::pair<int, std::string>();
    }

    if (type == (int)ESYN_UNARY)
    {
        CommonRequest req;
        req.set_cmd(cmd);
        req.set_content(body);
        req.set_context(context);

        result = SyncSendMsg(pStub.get(),req);

    }
    else if (type == (int)EASYN_UNARY)
    {
        CommonRequest req;
        req.set_cmd(cmd);
        req.set_content(body);
        req.set_context(context);
        result = { AsyncSendMsg(pStub.get(),req),"" };
    }
    else if (type == (int)EASYN_BIDI_STREAM)
    {
        CommonRequest req;
        req.set_cmd(cmd);
        req.set_content(body);
        req.set_context(context);

        result = { AsyncSendBidiStreamMsg(pStub.get(),req), ""};
    }
    return result;
}

std::pair<int, std::string> RpcClient::SyncSendMsg(CommonService::Stub* stub,const CommonRequest& req)
{
    auto call = std::make_unique<SyncClientUnaryImpl>(stub, channel_mgr_);
    std::string outStr;
    grpc::Status st;

    CommonResponse response = call->SyncSendMsg(req,st);

    if(st.ok())
    {
        response.SerializeToString(&outStr);
        return { call->GetCallId(), outStr };
    }
    else
    {
        //打印errorcode,错误处理等

        return { -1 , "" };
    }

    
    
}

int RpcClient::AsyncSendMsg(CommonService::Stub* stub,const CommonRequest& req)
{
    AsyncClientUnaryImpl* call = new AsyncClientUnaryImpl(cq_, stub, channel_mgr_);
    call->AsyncSendMsg(req);
    return call->GetCallId();
}



int RpcClient::AsyncSendBidiStreamMsg(CommonService::Stub* stub, CommonRequest& req)
{
    if (stub == nullptr)
    {
        return 0;
    }

    auto it = bidi_stream_impls_.find((uint64_t)stub);
    if (it != bidi_stream_impls_.end()) 
    {
        LOG_DEBUG("reuse stub");
    }
    else 
    {
        
        bidi_stream_impls_[(uint64_t)stub] = std::make_unique<AsyncClientBidiStreamImpl>(this, cq_, stub, callback_);

        LOG_DEBUG("create new stub");

    }

    bidi_stream_impls_[(uint64_t)stub]->AsyncSendBidiStreamMsg(req);
    return bidi_stream_impls_[(uint64_t)stub]->GetCallId();
}

ChannelMgr.cpp的关键接口如下

std::shared_ptr<CommonService::Stub> ChannelMgr::FetchOrCreateStub(const std::string& name)
{
	auto it = stubs_.find(name);
	if (it != stubs_.end()) {
		LOG_DEBUG("Get Old Stub ok!");
		return it->second;
	}
	else 
	{
		LOG_DEBUG("Create New Stub in %s",name.c_str());

		std::shared_ptr<Channel> channel = grpc::CreateCustomChannel(
			ParseEndPointName(name),
			grpc::InsecureChannelCredentials(),
			GetChannelArgs());
		std::shared_ptr<CommonService::Stub> stub = CommonService::NewStub(channel);
		return stubs_.emplace(name, std::move(stub)).first->second;
	}
	return nullptr;
}

IRpcCall.h

#pragma once
#include <algorithm>
#include <forward_list>
#include <functional>
#include <memory>
#include <mutex>
#include <thread>

#include <grpcpp/grpcpp.h>
#include <grpcpp/alarm.h>
#include "common_service.grpc.pb.h"




using grpc::ServerContext;
using grpc::CompletionQueue;
using grpc::ServerCompletionQueue;
using grpc::ServerAsyncResponseWriter;
using grpc::ServerAsyncReader;
using grpc::ServerAsyncWriter;
using grpc::ServerAsyncReaderWriter;
using grpc::Status;
using grpc::Alarm;

using NextCallFunc = std::function<bool(bool)>;

class IRpcCall
{
public:
    IRpcCall() {_callID = 0;}
    virtual ~IRpcCall() {_callID = 0;};
    virtual void Reset() = 0;             // start this back at a clean state
    virtual void Response(CommonResponse& resp,bool reset) {};

protected:
    uint64_t _callID;
};

  • 6
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值