grpc C++异步调用的例子

代码主要是基于grpc官方examples/cpp/helloworld项目改的, 对helloworld的做了改进,可以方便的增加多个rpc调用接口

关于增加rpc接口的方法做如下介绍:

    1. 修改proto文件,增加新接口 如增加新接口 在Greeter里面增加 rpc UploadPlateIn(PlateInRequest) returns (PlateInReply) {}

并增加PlateInRequest 和 PlateInReply数据

syntax = "proto3";

option java_multiple_files = true;
option java_package = "io.grpc.examples.helloworld";
option java_outer_classname = "HelloWorldProto";
option objc_class_prefix = "HLW";

package helloworld;

// The greeting service definition.
service Greeter {
  // Sends a greeting
  rpc SayHello (HelloRequest) returns (HelloReply) {}
  rpc UploadPlateIn(PlateInRequest) returns (PlateInReply) {}
}

enum PlateType {
  TEMPORARY = 0;
  MONTHLY_PAYMENT = 1;
  OTHER_TYPE = 2;
}

// The request message containing the user's name.
message HelloRequest {
  string name = 1;
}

// The response message containing the greetings
message HelloReply {
  string message = 1;
}

message PlateInRequest {
  string plate = 1;
}
message PlateInReply {
  string     plate = 1;
  PlateType  plate_type = 2;
}

2. 这里我吧官方例子中greeter_async_server.cc中的CallData改成了模版函数,并且该模版函数继承自CallDataBase接口类(为了暴露接口Proceed() 方便处理不同rpc接口的处理函数的调用)

greeter_async_server.h的代码如下:

#include <memory>
#include <iostream>
#include <string>
#include <thread>

#include <grpcpp/grpcpp.h>
#include <grpc/support/log.h>

#ifdef BAZEL_BUILD
#include "examples/protos/helloworld.grpc.pb.h"
#else
#include "helloworld.grpc.pb.h"
#endif

using grpc::Server;
using grpc::ServerAsyncResponseWriter;
using grpc::ServerBuilder;
using grpc::ServerContext;
using grpc::ServerCompletionQueue;
using grpc::Status;
using helloworld::HelloRequest;
using helloworld::HelloReply;
using helloworld::Greeter;

using helloworld::PlateInRequest;
using helloworld::PlateInReply;

class ServerImpl final {
  public:
    ~ServerImpl() {
      server_->Shutdown();
      // Always shutdown the completion queue after the server.
      cq_->Shutdown();
    }

    // There is no shutdown handling in this code.
    void Run();

 private:
    // This can be run in multiple threads if needed.
    void HandleRpcs();

    std::unique_ptr<ServerCompletionQueue> cq_;
    Greeter::AsyncService service_;
    std::unique_ptr<Server> server_;
};

class CallDataBase
{
  public:
    virtual void Proceed() = 0;
    virtual ~CallDataBase() {}
};
// Class encompasing the state and logic needed to serve a request.
template<class req, class rep>
class CallData : public CallDataBase
{
  public:
    // Take in the "service" instance (in this case representing an asynchronous
    // server) and the completion queue "cq" used for asynchronous communication
    // with the gRPC runtime.
    CallData(Greeter::AsyncService* service, ServerCompletionQueue* cq)
        : service_(service), cq_(cq), responder_(&ctx_), status_(CREATE) {
      // // Invoke the serving logic right away.
      // Proceed();
    }
    virtual void Proceed() = 0;
protected:
// The means of communication with the gRPC runtime for an asynchronous
    // server.
    Greeter::AsyncService* service_;
    // The producer-consumer queue where for asynchronous server notifications.
    ServerCompletionQueue* cq_;
    // Context for the rpc, allowing to tweak aspects of it such as the use
    // of compression, authentication, as well as to send metadata back to the
    // client.
    ServerContext ctx_;

    // What we get from the client.
    req request_;
    // What we send back to the client.
    rep reply_;

    // The means to get back to the client.
    ServerAsyncResponseWriter<rep> responder_;

    // Let's implement a tiny state machine with the following states.
    enum CallStatus { CREATE, PROCESS, FINISH };
    CallStatus status_;  // The current serving state.
};

class HelloCall : public CallData<HelloRequest, HelloReply>
{
  public:
    void Proceed();
    HelloCall(Greeter::AsyncService* service, ServerCompletionQueue* cq)
        : CallData(service, cq) {
          Proceed();
        }
};

class PlateInCall : public CallData<PlateInRequest, PlateInReply>
{
  public:
    void Proceed();
    PlateInCall(Greeter::AsyncService* service, ServerCompletionQueue* cq)
        : CallData(service, cq){
          Proceed();
        }
};

greeter_async_server.cc的代码如下:

#include "greeter_async_server.h"

void ServerImpl::Run()
{
  std::string server_address("0.0.0.0:50051");

  ServerBuilder builder;
  // Listen on the given address without any authentication mechanism.
  builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
  // Register "service_" as the instance through which we'll communicate with
  // clients. In this case it corresponds to an *asynchronous* service.
  builder.RegisterService(&service_);
  // Get hold of the completion queue used for the asynchronous communication
  // with the gRPC runtime.
  cq_ = builder.AddCompletionQueue();
  // Finally assemble the server.
  server_ = builder.BuildAndStart();
  std::cout << "Server listening on " << server_address << std::endl;

  // Proceed to the server's main loop.
  HandleRpcs();
}

// This can be run in multiple threads if needed.
void ServerImpl::HandleRpcs()
{
  // Spawn a new CallData instance to serve new clients.
  new HelloCall(&service_, cq_.get());
  new PlateInCall(&service_, cq_.get());
  void* tag;  // uniquely identifies a request.
  bool ok;
  while (true) {
    // Block waiting to read the next event from the completion queue. The
    // event is uniquely identified by its tag, which in this case is the
    // memory address of a CallData instance.
    // The return value of Next should always be checked. This return value
    // tells us whether there is any kind of event or cq_ is shutting down.
    GPR_ASSERT(cq_->Next(&tag, &ok));
    GPR_ASSERT(ok);
    static_cast<CallDataBase*>(tag)->Proceed();
  }
}

void HelloCall::Proceed() 
{
  if (status_ == CREATE) {
    // Make this instance progress to the PROCESS state.
    status_ = PROCESS;

    // As part of the initial CREATE state, we *request* that the system
    // start processing SayHello requests. In this request, "this" acts are
    // the tag uniquely identifying the request (so that different CallData
    // instances can serve different requests concurrently), in this case
    // the memory address of this CallData instance.
    service_->RequestSayHello(&ctx_, &request_, &responder_, cq_, cq_,
                              this);
  } else if (status_ == PROCESS) {
    // Spawn a new CallData instance to serve new clients while we process
    // the one for this CallData. The instance will deallocate itself as
    // part of its FINISH state.
    // new HelloCall(service_, cq_);
    // new CallData(service_, cq_);
    new HelloCall(service_, cq_);

    // The actual processing.
    std::string prefix("Hello ");
    reply_.set_message(prefix + request_.name());

    // And we are done! Let the gRPC runtime know we've finished, using the
    // memory address of this instance as the uniquely identifying tag for
    // the event.
    status_ = FINISH;
    responder_.Finish(reply_, Status::OK, this);
  } else {
    GPR_ASSERT(status_ == FINISH);
    // Once in the FINISH state, deallocate ourselves (CallData).
    delete this;
  }
}

void PlateInCall::Proceed()
{
  if (status_ == CREATE) {
    // Make this instance progress to the PROCESS state.
    status_ = PROCESS;

    // As part of the initial CREATE state, we *request* that the system
    // start processing SayHello requests. In this request, "this" acts are
    // the tag uniquely identifying the request (so that different CallData
    // instances can serve different requests concurrently), in this case
    // the memory address of this CallData instance.
    service_->RequestUploadPlateIn(&ctx_, &request_, &responder_, cq_, cq_,
                              this);
  } else if (status_ == PROCESS) {
    // Spawn a new CallData instance to serve new clients while we process
    // the one for this CallData. The instance will deallocate itself as
    // part of its FINISH state.
    new PlateInCall(service_, cq_);

    // The actual processing.
    // std::string prefix("Hello ");
    // reply_.set_message(prefix + request_.name());
    reply_.set_plate(request_.plate());
    reply_.set_plate_type(helloworld::PlateType::TEMPORARY);

    // And we are done! Let the gRPC runtime know we've finished, using the
    // memory address of this instance as the uniquely identifying tag for
    // the event.
    status_ = FINISH;
    responder_.Finish(reply_, Status::OK, this);
  } else {
    GPR_ASSERT(status_ == FINISH);
    // Once in the FINISH state, deallocate ourselves (CallData).
    delete this;
  }
}

3. 把官方greeter_async_client.cc中的代码中ClientCall改成了模版类AsyncClientCallBase,并继承自AsyncClientCall接口类(该接口类是为了方便调用rpc服务的返回的消息显示,主要是在message() 接口函数实现的)

greeter_async_client.h 的代码如下:

#include <iostream>
#include <memory>
#include <string>
#include<atomic>

#include <grpcpp/grpcpp.h>
#include <grpc/support/log.h>
#include <thread>

#ifdef BAZEL_BUILD
#include "examples/protos/helloworld.grpc.pb.h"
#else
#include "helloworld.grpc.pb.h"
#endif

using grpc::Channel;
using grpc::ClientAsyncResponseReader;
using grpc::ClientContext;
using grpc::CompletionQueue;
using grpc::Status;
using helloworld::HelloRequest;
using helloworld::HelloReply;
using helloworld::Greeter;
using helloworld::PlateInRequest;
using helloworld::PlateInReply;

class GreeterAsyncClient {
  public:
    explicit GreeterAsyncClient(std::shared_ptr<Channel> channel)
            : stub_(Greeter::NewStub(channel)) {}

    
    void SayHello(const std::string& user);
    void UploadPlateIn(const std::string& plate);
    // Loop while listening for completed responses.
    // Prints out the response from the server.
    void AsyncCompleteRpc();

  private:

    // Out of the passed in Channel comes the stub, stored here, our view of the
    // server's exposed services.
    std::unique_ptr<Greeter::Stub> stub_;

    // The producer-consumer queue we use to communicate asynchronously with the
    // gRPC runtime.
    CompletionQueue cq_;
};
class AsyncClientCall
{
  public:
    virtual ~AsyncClientCall(){}
    virtual std::string message() = 0;
    ClientContext context;
    Status status;
};
template<class T>
class AsyncClientCallBase : public AsyncClientCall
{
public:
    T reply;
    std::unique_ptr<ClientAsyncResponseReader<T>> response_reader; 
};

class AsyncClientHelloCall : public AsyncClientCallBase<HelloReply>
{
  public:
    virtual std::string message();
};
class  AsyncClientPlateInCall : public AsyncClientCallBase<PlateInReply>
{
public:
    virtual std::string message();
};

greeter_async_client.cc 中的代码如下:

/*
 *
 * Copyright 2015 gRPC authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *
 */
#include "greeter_async_client.h"

// Assembles the client's payload and sends it to the server.
void GreeterAsyncClient::SayHello(const std::string& user) {
    // Data we are sending to the server.
    HelloRequest request;
    request.set_name(user);

    // Call object to store rpc data
    AsyncClientHelloCall* call = new AsyncClientHelloCall;

    // stub_->PrepareAsyncSayHello() creates an RPC object, returning
    // an instance to store in "call" but does not actually start the RPC
    // Because we are using the asynchronous API, we need to hold on to
    // the "call" instance in order to get updates on the ongoing RPC.
    call->response_reader =
        stub_->PrepareAsyncSayHello(&call->context, request, &cq_);

    // StartCall initiates the RPC call
    call->response_reader->StartCall();

    // Request that, upon completion of the RPC, "reply" be updated with the
    // server's response; "status" with the indication of whether the operation
    // was successful. Tag the request with the memory address of the call object.
    call->response_reader->Finish(&call->reply, &call->status, (void*)call);
    std::atomic_int i(0);
}
void GreeterAsyncClient::UploadPlateIn(const std::string& plate)
{
    PlateInRequest request;
    request.set_plate(plate);

    AsyncClientPlateInCall* call = new AsyncClientPlateInCall;
    call->response_reader = stub_->PrepareAsyncUploadPlateIn(&call->context, request, &cq_);
    call->response_reader->StartCall();
    call->response_reader->Finish(&call->reply, &call->status, (void*)call);
}
// Loop while listening for completed responses.
// Prints out the response from the server.
void GreeterAsyncClient::AsyncCompleteRpc() {
    void* got_tag;
    bool ok = false;

    // Block until the next result is available in the completion queue "cq".
    while (cq_.Next(&got_tag, &ok)) {
        // The tag in this example is the memory location of the call object
        AsyncClientCall* call = static_cast<AsyncClientCall*>(got_tag);

        // Verify that the request was completed successfully. Note that "ok"
        // corresponds solely to the request for updates introduced by Finish().
        GPR_ASSERT(ok);

        if (call->status.ok())
            std::cout << "Greeter received: " << call->message() << std::endl;
        else
            std::cout << "RPC failed" << std::endl;

        // Once we're complete, deallocate the call object.
        delete call;
    }
    
}

std::string AsyncClientHelloCall::message()
{
    return reply.message();
}

std::string AsyncClientPlateInCall::message()
{
    return reply.plate() + ":" + std::to_string(reply.plate_type());
}

我只整理了异步请求的例子,同步请求的并没有改动,主要是异步请求的添加rpc接口比较困难,所以做了整理,具体项目代码提交到了gitee上(由于github国内下载太慢,所有果断选用国内的仓库)。

项目地址: https://gitee.com/ellan-bm/grpc_async_example.git

  • 5
    点赞
  • 19
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值