C++实现gRPC异步双向流式的客户端和服务端(二)

下面是服务端的实现,主要逻辑是参考gRPC源码里的测试用例

grpc-framework-master\test\cpp\qps\server_async.cc,

代码没有贴全,比如主循环和调用,main函数中形如    while (is_loop)
    {
        rpc_server_->RunOnce();
    }这样就可以,同时rpc_server_.init()必须提前调用。

有几点说明:

1.RPCRecorder这个类是为了便于lua端控制调用对象结束的,不需要的可以忽略;

2.我这边的框架需求是业务线程尽量用一个,也就是不另外自己开线程,基于此就设计了主循环使用AsyncNext()的非阻塞调用,同时程序中的客户端和服务端共用一个CompleteQueue,这种写法目前网上不多见,经过测试目前没出现什么问题;

3.还有个待解决的问题,由于我们希望框架程序使用尽量少的线程,是出于考虑集群部署的资源利用率,但是gRPC自身线程数目前查看达到了27个左右,代码中下列几行初衷是想设置线程数,但是实际并没有起作用,目前版本应该是没有可以直接控制的接口,可能需要修改gRPC源码来解决。

	int maxThreadNum = maxThread;
	builder_->SetResourceQuota(
		grpc::ResourceQuota("CommonService").SetMaxThreads(maxThreadNum));

	builder_->SetSyncServerOption(ServerBuilder::SyncServerOption::MIN_POLLERS, 1);
	builder_->SetSyncServerOption(ServerBuilder::SyncServerOption::MAX_POLLERS, 1);
	builder_->SetSyncServerOption(ServerBuilder::SyncServerOption::NUM_CQS, 1);

ServerCallImpl.h

#pragma once
#include <algorithm>
#include <forward_list>
#include <functional>
#include <memory>
#include <mutex>
#include <thread>
#include <string>
#include <grpcpp/grpcpp.h>
#include <grpcpp/alarm.h>
#include "common_service.grpc.pb.h"
#include "IRpcCall.h"

typedef xengine::CommonService::AsyncService ServiceType;


    using grpc::ServerContext;
    using grpc::CompletionQueue;
    using grpc::ServerCompletionQueue;
    using grpc::ServerAsyncResponseWriter;
    using grpc::ServerAsyncReader;
    using grpc::ServerAsyncWriter;
    using grpc::ServerAsyncReaderWriter;
    using grpc::Status;
    using grpc::Alarm;

    using NextCallFunc = std::function<bool(bool)>;

    //定时器
    class ServerAlarm final : public IRpcCall
    {
    public:
        ServerAlarm(std::function<void()>&& call);
        ~ServerAlarm() override;
        void Reset() override;

        bool AlarmDone(bool ok);
        void Set(grpc::CompletionQueue* cq, const gpr_timespec& deadline);
        void Set(grpc::CompletionQueue* cq, int64_t ms);

    private:
        NextCallFunc alarm_done_func_;

        Alarm alarm_;
        std::function<void()> callback_;
    };


    //单向问答
    class ServerCallUnaryImpl final : public IRpcCall
    {
        using Method_Func = std::function<void(ServerContext*, CommonRequest*,
            grpc::ServerAsyncResponseWriter<CommonResponse>*, void*)>;
    public:
        ServerCallUnaryImpl(Method_Func request_method,ServiceType* svc,grpc::ServerCompletionQueue* cq);
        ~ServerCallUnaryImpl() override {}
        void Reset() override;
        void CallLuaProcess(grpc::Status& status);
        void Response(CommonResponse& resp,bool reset) override;
    private:
        bool Finisher(bool ok);
        bool Invoker(bool ok);

        void makeNew();

        std::unique_ptr<ServerContext> srv_ctx_;
        CommonRequest req_;

        NextCallFunc finish_func_;
        NextCallFunc invoke_func_;
        std::function<void(ServerContext*, CommonRequest*,
            grpc::ServerAsyncResponseWriter<CommonResponse>*, void*)> request_method_;

        grpc::ServerAsyncResponseWriter<CommonResponse> response_writer_;

        ServiceType* sv_;
        grpc::ServerCompletionQueue* cq_;
    };


    //双向流式
    class ServerCallBidiStreamImpl final : public IRpcCall
    {
        using Method_Func = std::function<void(
            ServerContext*,
            grpc::ServerAsyncReaderWriter<CommonResponse, CommonRequest>*, void*)>;
    public:
        ServerCallBidiStreamImpl(Method_Func request_method,ServiceType* svc,grpc::ServerCompletionQueue* cq);
        ~ServerCallBidiStreamImpl() override {}
        void Reset() override;
        void CallLuaProcess(grpc::Status& status);

        void Response(CommonResponse& resp,bool reset) override;

    private:
        bool RequestDone(bool ok);
        bool ReadDone(bool ok);
        bool WriteDone(bool ok);
        bool FinishDone(bool ok);

        void makeNew();

        std::unique_ptr<ServerContext> srv_ctx_;
        CommonRequest req_;

        NextCallFunc request_done_func_;
        NextCallFunc read_done_func_;
        NextCallFunc write_done_func_;
        NextCallFunc finish_done_func_;

        Method_Func request_method_;

        grpc::ServerAsyncReaderWriter<CommonResponse, CommonRequest> stream_;
        
        ServiceType* sv_;
        grpc::ServerCompletionQueue* cq_;
    };


    class RPCRecorder
    {
        public:
            RPCRecorder(){
                _id = 1;

                stamp = 0;
            }
            virtual ~RPCRecorder(){}
        public:
            //回包
            int SendMsg2Client(uint64_t requestID,int cmd,const std::string& body,const std::string& context,bool reset = true);
            //重置请求
            int ResetReqest(uint64_t requestID);

            void AddRequest(uint64_t id,IRpcCall* p)
            {
                _requestMap[id] = p;
            }

            void RemoveRequest(uint64_t id)
            {
                _requestMap.erase(id);
            }

            IRpcCall* Get(uint64_t id)
            {
                std::map<uint64_t,IRpcCall*>::iterator it = _requestMap.find(id);

                if(it == _requestMap.end())
                {
                    return NULL;
                }

                return it->second;

            }
            //生成请求的唯一id
            uint64_t GenGUID();

        private:
            std::map<uint64_t,IRpcCall*> _requestMap;
            uint32_t _id;
            uint32_t stamp;
    };

extern RPCRecorder *gRequestRecorder;

ServerCallImpl.cpp

#include "ServerCallImpl.h"
#include <iostream>
#include "Common.h"
#include "LuaInterface.h"
#include "Log.h"
#include "TimeProvider.h"


//全局class,记录请求信息给lua调用
RPCRecorder *gRequestRecorder = new RPCRecorder();

#pragma region ServerCallUnaryImpl 

ServerCallUnaryImpl::ServerCallUnaryImpl(Method_Func request_method,ServiceType* svc,grpc::ServerCompletionQueue* cq)
	:srv_ctx_(new ServerContext),
	request_method_(request_method),
	invoke_func_(std::bind(&ServerCallUnaryImpl::Invoker, this, std::placeholders::_1)),
	finish_func_(std::bind(&ServerCallUnaryImpl::Finisher, this, std::placeholders::_1)),
	response_writer_(srv_ctx_.get()),
	sv_(svc),
	cq_(cq)
{
	_callID = gRequestRecorder->GenGUID();

	request_method_(srv_ctx_.get(), &req_, &response_writer_,
		&invoke_func_);

	gRequestRecorder->AddRequest(_callID,this);

	LOG_DEBUG("register unary msg event,call id:%llu",_callID);
	
}

void ServerCallUnaryImpl::Reset()
{
	gRequestRecorder->RemoveRequest(_callID);

	delete this;
}

bool ServerCallUnaryImpl::Finisher(bool ok)
{
	Reset();
	return false;
}

void ServerCallUnaryImpl::CallLuaProcess(grpc::Status& status)
{
	//std::pair<int, std::string> results = 
	
	//参数:cmd,body,context,callid
	int ret = call_lua_func(LuaInterface::getInstance().LuaState(), sLuaHandleMsgFuncName, req_.cmd(),req_.content(),req_.context(),_callID);
	
}

void ServerCallUnaryImpl::makeNew()
{
	auto request_unary = std::bind(
		&ServiceType::RequestSendMsg, sv_, std::placeholders::_1,
		std::placeholders::_2, std::placeholders::_3, cq_,
		cq_, std::placeholders::_4);

	new ServerCallUnaryImpl(request_unary,sv_,cq_);
}

bool ServerCallUnaryImpl::Invoker(bool ok)
{
	//创建一个新ServerCallUnaryImpl对象
	makeNew();

	if (!ok)
	{

		Reset();
		return false;
	}

	grpc::Status status;
	CallLuaProcess(status);

	return true;
}


void ServerCallUnaryImpl::Response(CommonResponse& resp,bool reset)
{

	response_writer_.Finish(resp,grpc::Status::OK,&finish_func_);
}

#pragma endregion


#pragma region ServerCallBidiStreamImpl
ServerCallBidiStreamImpl::ServerCallBidiStreamImpl(Method_Func request_method,ServiceType* svc,grpc::ServerCompletionQueue* cq)
	:srv_ctx_(new ServerContext),
	request_method_(request_method),
	request_done_func_(std::bind(&ServerCallBidiStreamImpl::RequestDone, this, std::placeholders::_1)),
	read_done_func_(std::bind(&ServerCallBidiStreamImpl::ReadDone, this, std::placeholders::_1)),
	write_done_func_(std::bind(&ServerCallBidiStreamImpl::WriteDone, this, std::placeholders::_1)),
	finish_done_func_(std::bind(&ServerCallBidiStreamImpl::FinishDone, this, std::placeholders::_1)),
	stream_(srv_ctx_.get()),
	sv_(svc),
	cq_(cq)
{
	_callID = gRequestRecorder->GenGUID();

	request_method_(srv_ctx_.get(), &stream_, &request_done_func_);

	gRequestRecorder->AddRequest(_callID,this);

	LOG_DEBUG("[++xeg++]register bidistream msg event,call id:%llu",_callID);

}

void ServerCallBidiStreamImpl::Reset()
{

	gRequestRecorder->RemoveRequest(_callID);

	delete this;
}

void ServerCallBidiStreamImpl::CallLuaProcess(grpc::Status& status)
{

	//std::pair<int, std::string> results = 
	call_lua_func(LuaInterface::getInstance().LuaState(), sLuaHandleMsgFuncName, req_.cmd(),req_.content(),req_.context(),_callID);

}

void ServerCallBidiStreamImpl::makeNew()
{
	auto request_streaming_bidi = std::bind(
				&ServiceType::RequestSendBidiStreamMsg, sv_,
				std::placeholders::_1, std::placeholders::_2, cq_,
				cq_, std::placeholders::_3);

	new ServerCallBidiStreamImpl(request_streaming_bidi,sv_,cq_);
}

bool ServerCallBidiStreamImpl::RequestDone(bool ok)
{

	makeNew();

	if (!ok)
	{
		Reset();
		return false;
	}

	stream_.Read(&req_, &read_done_func_);
	return true;
}
bool ServerCallBidiStreamImpl::ReadDone(bool ok)
{
	if (ok)
	{
		LOG_DEBUG("[++xeg++]ServerCallBidiStreamImpl::ReadDone,CallLuaProcess");
		grpc::Status status;
		CallLuaProcess(status);
	}
	else
	{  // client has sent writes done
		// finish the stream
		stream_.Finish(Status::OK, &finish_done_func_);
	}
	return true;
}
bool ServerCallBidiStreamImpl::WriteDone(bool ok)
{
	// now go back and get another streaming read!
	if (ok)
	{
		stream_.Read(&req_, &read_done_func_);
	}
	else 
	{

		stream_.Finish(Status::OK, &finish_done_func_);
	}
	return true;
}
bool ServerCallBidiStreamImpl::FinishDone(bool ok)
{

	Reset();
	return false;
}

void ServerCallBidiStreamImpl::Response(CommonResponse& resp,bool reset)
{
	LOG_DEBUG("[++xeg++]response msg in bidistream mode,cmd:%d,call id:%llu",(int)resp.cmd(),_callID);

	stream_.Write(resp, &write_done_func_);
}

#pragma endregion


#pragma region RPCRecorder

//lua调用该api向对应请求回包
int RPCRecorder::SendMsg2Client(uint64_t requestID,int cmd,const std::string& body,const std::string& context,bool reset)
{
	IRpcCall* request = gRequestRecorder->Get(requestID);

	if (NULL == request)
	{

		return -1;
	}

	CommonResponse resp;

	//填充数据
	resp.set_cmd(cmd);
	resp.set_content(body);
	resp.set_context(context);

	//response pkg here
	request->Response(resp,reset);

	return 0;
}


int RPCRecorder::ResetReqest(uint64_t requestID)
{
	LOG_DEBUG("[++xeg++]reset request,call id:%llu null",requestID);

	IRpcCall* request = gRequestRecorder->Get(requestID);

	if (NULL == request)
	{
		
		return -1;
	}

	request->Reset();
	return 0;
}


 uint64_t RPCRecorder::GenGUID()
{
	uint32_t now = TimeProvider::getInstance()->getNow();

	//每秒重置序列号
	if (stamp != now)
	{
		stamp = now;
		_id = 1;
	}

	//高32位时间戳,低32位序列号
	uint64_t guid = ((uint64_t)now << 32 | _id);

	_id ++;

    return guid;
}

#pragma endregion

RpcServer.h

#pragma once
#include <algorithm>
#include <chrono>
#include <cmath>
#include <cstddef>
#include <iostream>
#include <memory>
#include <string>
#include <thread>
#include <queue>
#include <unordered_set>
#include <mutex>
#include <time.h>

#include <grpcpp/grpcpp.h>

#include "common_service.grpc.pb.h"
#include "ServerCallImpl.h"

namespace xengine {

using grpc::Server;
using grpc::ServerAsyncResponseWriter;
using grpc::ServerBuilder;
using grpc::CompletionQueue;
using grpc::ServerCompletionQueue;
using grpc::ServerContext;
using grpc::ServerAsyncReader;
using grpc::ServerAsyncWriter;
using grpc::ServerReader;
using grpc::ServerReaderWriter;
using grpc::ServerAsyncReaderWriter;
using grpc::ServerWriter;
using grpc::Status;


class RpcServer final
{
public:
	RpcServer();
	~RpcServer();

	void Init(const std::string& address,int receivMaxSize, int sendMaxSize, int maxThread);
	void RunOnce();
	CompletionQueue* GetCompletionQueue()
	{
		return cq_.get();
	}
private:
	RpcServer(const RpcServer&) = delete;
	RpcServer& operator=(const RpcServer&) = delete;
private:
	std::string srv_address_;
	//uint32_t cq_max_num_{ 1 };
	std::unique_ptr<ServerCompletionQueue> cq_;
	ServiceType service_;
	std::unique_ptr<Server> server_;
	std::unique_ptr<ServerBuilder> builder_;

	//std::vector<std::unique_ptr<IRpcCall>> server_impls_;
};

}

RpcServer.cpp

#include <iostream>
#include "RpcServer.h"
#include "ServerConfig.h"
#include "Log.h"

static Status ProcessSimpleRPC(CommonRequest* request, CommonResponse* response) {
	response->set_content(request->content() + " ok");
	// We are done using the request. Clear it to reduce working memory.
	// This proves to reduce cache misses in large message size cases.
	request->Clear();

	std::this_thread::sleep_for(std::chrono::milliseconds(10));

	return Status::OK;
}

RpcServer::RpcServer()
{
}

RpcServer::~RpcServer()
{	
	server_->Shutdown();
	cq_->Shutdown();
	bool ok;
	void* got_tag;
	while (cq_->Next(&got_tag, &ok)) {}
}

void RpcServer::Init(const std::string& address,int receivMaxSize,int sendMaxSize,int maxThread)
{
	srv_address_ = address;
	grpc::EnableDefaultHealthCheckService(true);
	builder_= std::make_unique<ServerBuilder>();


	builder_->SetMaxReceiveMessageSize(receivMaxSize);
	builder_->SetMaxSendMessageSize(sendMaxSize);

	int maxThreadNum = maxThread;
	builder_->SetResourceQuota(
		grpc::ResourceQuota("CommonService").SetMaxThreads(maxThreadNum));

	builder_->SetSyncServerOption(ServerBuilder::SyncServerOption::MIN_POLLERS, 1);
	builder_->SetSyncServerOption(ServerBuilder::SyncServerOption::MAX_POLLERS, 1);
	builder_->SetSyncServerOption(ServerBuilder::SyncServerOption::NUM_CQS, 1);

	builder_->AddListeningPort(srv_address_, grpc::InsecureServerCredentials());
	builder_->RegisterService(&service_);
	cq_ = builder_->AddCompletionQueue();
	server_ = builder_->BuildAndStart();
	if (server_ == nullptr) 
	{
		LOG_DEBUG("[++xeg++]Server: Fail to BuildAndStart(addr=%s)",srv_address_.c_str());
	}
	else 
	{
		LOG_DEBUG("[++xeg++]Server: BuildAndStart(addr=%s)",srv_address_.c_str());
	}

	auto request_unary = std::bind(
		&ServiceType::RequestSendMsg, &service_, std::placeholders::_1,
		std::placeholders::_2, std::placeholders::_3, cq_.get(),
		cq_.get(), std::placeholders::_4);

	new ServerCallUnaryImpl(request_unary,&service_,cq_.get());


	auto request_streaming_bidi = std::bind(
		&ServiceType::RequestSendBidiStreamMsg, &service_,
		std::placeholders::_1, std::placeholders::_2, cq_.get(),
		cq_.get(), std::placeholders::_3);

	new ServerCallBidiStreamImpl(request_streaming_bidi,&service_,cq_.get());



	LOG_DEBUG("[++xeg++]RpcServer listening on %s",srv_address_.c_str());
}

void RpcServer::RunOnce()
{
	void* got_tag;
	bool ok;
	CompletionQueue::NextStatus status = cq_->AsyncNext(&got_tag, &ok, gpr_time_from_millis(ServerConfig::TickInterval, GPR_TIMESPAN));

	if (status == grpc::CompletionQueue::NextStatus::GOT_EVENT) 
	{
		NextCallFunc* functionPointer = reinterpret_cast<NextCallFunc*>(got_tag);
		if ((*functionPointer)(ok))
		{
			//done
		}
	}
}

好的,下面是一个简单的示例,包括客户端服务端。首先,我们需要安装gRPC和protobuf库。 服务端代码: ```cpp #include <iostream> #include <memory> #include <string> #include <grpcpp/grpcpp.h> #include "helloworld.grpc.pb.h" using grpc::Server; using grpc::ServerBuilder; using grpc::ServerContext; using grpc::Status; using helloworld::HelloRequest; using helloworld::HelloReply; using helloworld::Greeter; // Logic and data behind the server's behavior. class GreeterServiceImpl final : public Greeter::Service { Status SayHello(ServerContext* context, const HelloRequest* request, HelloReply* reply) override { std::string prefix("Hello "); reply->set_message(prefix + request->name()); return Status::OK; } }; void RunServer() { std::string server_address("0.0.0.0:50051"); GreeterServiceImpl service; grpc::ServerBuilder builder; // Listen on the given address without any authentication mechanism. builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); // Register "service" as the instance through which we'll communicate with clients. builder.RegisterService(&service); // Finally assemble the server. std::unique_ptr<Server> server(builder.BuildAndStart()); std::cout << "Server listening on " << server_address << std::endl; // Wait for the server to shutdown. Note that some other thread must be // responsible for shutting down the server for this call to ever return. server->Wait(); } int main(int argc, char** argv) { RunServer(); return 0; } ``` 客户端代码: ```cpp #include <iostream> #include <memory> #include <string> #include <grpcpp/grpcpp.h> #include "helloworld.grpc.pb.h" using grpc::Channel; using grpc::ClientContext; using grpc::Status; using helloworld::HelloRequest; using helloworld::HelloReply; using helloworld::Greeter; class GreeterClient { public: GreeterClient(std::shared_ptr<Channel> channel) : stub_(Greeter::NewStub(channel)) {} std::string SayHello(const std::string& user) { // Data we are sending to the server. HelloRequest request; request.set_name(user); // Container for the data we expect from the server. HelloReply reply; // Context for the client. It could be used to convey extra information to // the server and/or tweak certain RPC behaviors. ClientContext context; // The actual RPC. Status status = stub_->SayHello(&context, request, &reply); // Act upon its status. if (status.ok()) { return reply.message(); } else { std::cout << status.error_code() << ": " << status.error_message() << std::endl; return "RPC failed"; } } private: std::unique_ptr<Greeter::Stub> stub_; }; int main(int argc, char** argv) { // Create a channel to the server. std::shared_ptr<Channel> channel = grpc::CreateChannel("localhost:50051", grpc::InsecureChannelCredentials()); GreeterClient greeter(channel); std::string user("world"); std::string reply = greeter.SayHello(user); std::cout << "Greeter received: " << reply << std::endl; return 0; } ``` 编译命令: ```bash $ g++ -std=c++11 -I./include -L./lib -o server server.cpp helloworld.grpc.pb.cc helloworld.pb.cc -lgrpc++ -lgrpc -lprotobuf -lpthread $ g++ -std=c++11 -I./include -L./lib -o client client.cpp helloworld.grpc.pb.cc helloworld.pb.cc -lgrpc++ -lgrpc -lprotobuf -lpthread ``` 运行命令: ```bash $ ./server $ ./client ``` 运行结果: ```bash # server console Server listening on 0.0.0.0:50051 # client console Greeter received: Hello world ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值