fabric2.0 cluster framework

fabric2.0 cluster

catalog

1、rpc
2、comm

rpc

Orderer will start grpc server for service and connect to gprc server of other orderer as client. That means that there are 2 connections about cluster between 2 orderers.

It send message to the connection which dial to, and recv message from server.

service

grpc cluster server

The code below is located in vendor/github.com/hyperledger/fabric-protos-go/orderer/cluster.pb.go

// ClusterServer is the server API for Cluster service.
type ClusterServer interface {
	// Step passes an implementation-specific message to another cluster member.
	Step(Cluster_StepServer) error
}

func RegisterClusterServer(s *grpc.Server, srv ClusterServer) {
	s.RegisterService(&_Cluster_serviceDesc, srv)
}

func _Cluster_Step_Handler(srv interface{}, stream grpc.ServerStream) error {
	return srv.(ClusterServer).Step(&clusterStepServer{stream})
}

type Cluster_StepServer interface {
	Send(*StepResponse) error
	Recv() (*StepRequest, error)
	grpc.ServerStream
}

type clusterStepServer struct {
	grpc.ServerStream
}

func (x *clusterStepServer) Send(m *StepResponse) error {
	return x.ServerStream.SendMsg(m)
}

func (x *clusterStepServer) Recv() (*StepRequest, error) {
	m := new(StepRequest)
	if err := x.ServerStream.RecvMsg(m); err != nil {
		return nil, err
	}
	return m, nil
}

impl

The code below is located in orderer/common/cluster/service.go

// Service defines the raft Service
type Service struct {
	StreamCountReporter              *StreamCountReporter
	Dispatcher                       Dispatcher
	Logger                           *flogging.FabricLogger
	StepLogger                       *flogging.FabricLogger
	MinimumExpirationWarningInterval time.Duration
	CertExpWarningThreshold          time.Duration
}

// Step passes an implementation-specific message to another cluster member.
func (s *Service) Step(stream orderer.Cluster_StepServer) error {
	s.StreamCountReporter.Increment()
	defer s.StreamCountReporter.Decrement()

	addr := util.ExtractRemoteAddress(stream.Context())
	commonName := commonNameFromContext(stream.Context())
	exp := s.initializeExpirationCheck(stream, addr, commonName)
	s.Logger.Debugf("Connection from %s(%s)", commonName, addr)
	defer s.Logger.Debugf("Closing connection from %s(%s)", commonName, addr)
	for {
		err := s.handleMessage(stream, addr, exp)
		if err == io.EOF {
			s.Logger.Debugf("%s(%s) disconnected", commonName, addr)
			return nil
		}
		if err != nil {
			return err
		}
		// Else, no error occurred, so we continue to the next iteration
	}
}

Once recv message from cluster node, service will first check expiration of its cert. and then judge the type of incoming message.

It may be a submit request message or consensus message, call handleSubmit() or Dispatcher.DispatchConsensus().

In fact, handleSubmit() just call Dispatcher.DispatchSubmit().

func (s *Service) handleMessage(stream StepStream, addr string, exp *certificateExpirationCheck) error {
	request, err := stream.Recv()
	if err == io.EOF {
		return err
	}
	if err != nil {
		s.Logger.Warningf("Stream read from %s failed: %v", addr, err)
		return err
	}

	exp.checkExpiration(time.Now(), extractChannel(request))

	if s.StepLogger.IsEnabledFor(zap.DebugLevel) {
		nodeName := commonNameFromContext(stream.Context())
		s.StepLogger.Debugf("Received message from %s(%s): %v", nodeName, addr, requestAsString(request))
	}

	if submitReq := request.GetSubmitRequest(); submitReq != nil {
		nodeName := commonNameFromContext(stream.Context())
		s.Logger.Debugf("Received message from %s(%s): %v", nodeName, addr, requestAsString(request))
		return s.handleSubmit(submitReq, stream, addr)
	}

	// Else, it's a consensus message.
	return s.Dispatcher.DispatchConsensus(stream.Context(), request.GetConsensusRequest())
}

func (s *Service) handleSubmit(request *orderer.SubmitRequest, stream StepStream, addr string) error {
	err := s.Dispatcher.DispatchSubmit(stream.Context(), request)
	if err != nil {
		s.Logger.Warningf("Handling of Submit() from %s failed: %v", addr, err)
		return err
	}
	return err
}

If expiration of orderer’s cert is less than expirationWarningThreshold, service will alert every minimumExpirationWarningInterval.

type certificateExpirationCheck struct {
	minimumExpirationWarningInterval time.Duration
	expiresAt                        time.Time
	expirationWarningThreshold       time.Duration
	lastWarning                      time.Time
	nodeName                         string
	endpoint                         string
	alert                            func(string, ...interface{})
}

func (exp *certificateExpirationCheck) checkExpiration(currentTime time.Time, channel string) {
	timeLeft := exp.expiresAt.Sub(currentTime)
	if timeLeft > exp.expirationWarningThreshold {
		return
	}

	timeSinceLastWarning := currentTime.Sub(exp.lastWarning)
	if timeSinceLastWarning < exp.minimumExpirationWarningInterval {
		return
	}

	exp.alert("Certificate of %s from %s for channel %s expires in less than %v",
		exp.nodeName, exp.endpoint, channel, timeLeft)
	exp.lastWarning = currentTime
}

func (s *Service) initializeExpirationCheck(stream orderer.Cluster_StepServer, endpoint, nodeName string) *certificateExpirationCheck {
	return &certificateExpirationCheck{
		minimumExpirationWarningInterval: s.MinimumExpirationWarningInterval,
		expirationWarningThreshold:       s.CertExpWarningThreshold,
		expiresAt:                        expiresAt(stream),
		endpoint:                         endpoint,
		nodeName:                         nodeName,
		alert: func(template string, args ...interface{}) {
			s.Logger.Warningf(template, args...)
		},
	}
}

Dispatcher

// Dispatcher dispatches requests
type Dispatcher interface {
	DispatchSubmit(ctx context.Context, request *orderer.SubmitRequest) error
	DispatchConsensus(ctx context.Context, request *orderer.ConsensusRequest) error
}

impl

The code below is located in orderer/common/cluster/service.go

// Comm implements Communicator
type Comm struct {
	MinimumExpirationWarningInterval time.Duration
	CertExpWarningThreshold          time.Duration
	shutdownSignal                   chan struct{}
	shutdown                         bool
	SendBufferSize                   int
	Lock                             sync.RWMutex
	Logger                           *flogging.FabricLogger
	ChanExt                          ChannelExtractor
	H                                Handler
	Connections                      *ConnectionStore
	Chan2Members                     MembersByChannel
	Metrics                          *Metrics
}

type requestContext struct {
	channel string
	sender  uint64
}

// DispatchSubmit identifies the channel and sender of the submit request and passes it
// to the underlying Handler
func (c *Comm) DispatchSubmit(ctx context.Context, request *orderer.SubmitRequest) error {
	reqCtx, err := c.requestContext(ctx, request)
	if err != nil {
		return err
	}
	return c.H.OnSubmit(reqCtx.channel, reqCtx.sender, request)
}

// DispatchConsensus identifies the channel and sender of the step request and passes it
// to the underlying Handler
func (c *Comm) DispatchConsensus(ctx context.Context, request *orderer.ConsensusRequest) error {
	reqCtx, err := c.requestContext(ctx, request)
	if err != nil {
		return err
	}
	return c.H.OnConsensus(reqCtx.channel, reqCtx.sender, request)
}

// requestContext identifies the sender and channel of the request and returns
// it wrapped in a requestContext
func (c *Comm) requestContext(ctx context.Context, msg proto.Message) (*requestContext, error) {
	channel := c.ChanExt.TargetChannel(msg)
	if channel == "" {
		return nil, errors.Errorf("badly formatted message, cannot extract channel")
	}

	c.Lock.RLock()
	mapping, exists := c.Chan2Members[channel]
	c.Lock.RUnlock()

	if !exists {
		return nil, errors.Errorf("channel %s doesn't exist", channel)
	}

	cert := comm.ExtractRawCertificateFromContext(ctx)
	if len(cert) == 0 {
		return nil, errors.Errorf("no TLS certificate sent")
	}

	stub := mapping.LookupByClientCert(cert)
	if stub == nil {
		return nil, errors.Errorf("certificate extracted from TLS connection isn't authorized")
	}
	return &requestContext{
		channel: channel,
		sender:  stub.ID,
	}, nil
}

client

grpc cluster client

The code below is located in vendor/github.com/hyperledger/fabric-protos-go/orderer/cluster.pb.go

// ClusterClient creates streams that point to a remote cluster member.
type ClusterClient interface {
	Step(ctx context.Context, opts ...grpc.CallOption) (orderer.Cluster_StepClient, error)
}

type clusterClient struct {
	cc *grpc.ClientConn
}

func NewClusterClient(cc *grpc.ClientConn) ClusterClient {
	return &clusterClient{cc}
}

func (c *clusterClient) Step(ctx context.Context, opts ...grpc.CallOption) (Cluster_StepClient, error) {
	stream, err := c.cc.NewStream(ctx, &_Cluster_serviceDesc.Streams[0], "/orderer.Cluster/Step", opts...)
	if err != nil {
		return nil, err
	}
	x := &clusterStepClient{stream}
	return x, nil
}

type Cluster_StepClient interface {
	Send(*StepRequest) error
	Recv() (*StepResponse, error)
	grpc.ClientStream
}

type clusterStepClient struct {
	grpc.ClientStream
}

func (x *clusterStepClient) Send(m *StepRequest) error {
	return x.ClientStream.SendMsg(m)
}

func (x *clusterStepClient) Recv() (*StepResponse, error) {
	m := new(StepResponse)
	if err := x.ClientStream.RecvMsg(m); err != nil {
		return nil, err
	}
	return m, nil
}

RPC struct

// RPC performs remote procedure calls to remote cluster nodes.
type RPC struct {
	consensusLock sync.Mutex
	submitLock    sync.Mutex
	Logger        *flogging.FabricLogger
	Timeout       time.Duration
	Channel       string
	Comm          Communicator
	lock          sync.RWMutex
	StreamsByType map[OperationType]map[uint64]*Stream
}

StreamsByType store all streams which is classified by OperationType and destination(uint64)

// OperationType denotes a type of operation that the RPC can perform
// such as sending a transaction, or a consensus related message.
type OperationType int

const (
	ConsensusOperation OperationType = iota
	SubmitOperation
)

First getOrCreateStream() and then wrap corresponding request message. Finally, stream.Send()

// SendConsensus passes the given ConsensusRequest message to the raft.Node instance.
func (s *RPC) SendConsensus(destination uint64, msg *orderer.ConsensusRequest) error {
	if s.Logger.IsEnabledFor(zapcore.DebugLevel) {
		defer s.consensusSent(time.Now(), destination, msg)
	}

	stream, err := s.getOrCreateStream(destination, ConsensusOperation)
	if err != nil {
		return err
	}

	req := &orderer.StepRequest{
		Payload: &orderer.StepRequest_ConsensusRequest{
			ConsensusRequest: msg,
		},
	}

	s.consensusLock.Lock()
	defer s.consensusLock.Unlock()

	err = stream.Send(req)
	if err != nil {
		s.unMapStream(destination, ConsensusOperation)
	}

	return err
}

// SendSubmit sends a SubmitRequest to the given destination node.
func (s *RPC) SendSubmit(destination uint64, request *orderer.SubmitRequest) error {
	if s.Logger.IsEnabledFor(zapcore.DebugLevel) {
		defer s.submitSent(time.Now(), destination, request)
	}

	stream, err := s.getOrCreateStream(destination, SubmitOperation)
	if err != nil {
		return err
	}

	req := &orderer.StepRequest{
		Payload: &orderer.StepRequest_SubmitRequest{
			SubmitRequest: request,
		},
	}

	s.submitLock.Lock()
	defer s.submitLock.Unlock()

	err = stream.Send(req)
	if err != nil {
		s.unMapStream(destination, SubmitOperation)
	}
	return err
}

If stream is existed in StreamsByType(a map), return stream.
If not, get stub(RemoteContext) by Comm.Remote() and then NewStream(). we had disscuss it above.
Finally, store new stream in StreamsByType and return stream.

// getOrCreateStream obtains a Submit stream for the given destination node
func (s *RPC) getOrCreateStream(destination uint64, operationType OperationType) (orderer.Cluster_StepClient, error) {
	stream := s.getStream(destination, operationType)
	if stream != nil {
		return stream, nil
	}
	stub, err := s.Comm.Remote(s.Channel, destination)
	if err != nil {
		return nil, errors.WithStack(err)
	}
	stream, err = stub.NewStream(s.Timeout)
	if err != nil {
		return nil, err
	}
	s.mapStream(destination, stream, operationType)
	return stream, nil
}

Comm

Comm is core structure of cluster. which is responsible to manage communication with remote cluster node. It is implement of cluster.Communicator, cluster.Dispatcher and etcdraft.Configurator.

// Comm implements Communicator
type Comm struct {
	MinimumExpirationWarningInterval time.Duration
	CertExpWarningThreshold          time.Duration
	shutdownSignal                   chan struct{}
	shutdown                         bool
	SendBufferSize                   int
	Lock                             sync.RWMutex
	Logger                           *flogging.FabricLogger
	ChanExt                          ChannelExtractor
	H                                Handler
	Connections                      *ConnectionStore
	Chan2Members                     MembersByChannel
	Metrics                          *Metrics
}

ConnectionStore is used to establish connection and store it in map.

Chan2Members manage all conns of current node.

ConnectionStore

ConnectionStore is used to establish connection and store it in map.

The code below is located in orderer/common/cluster/connections.go

// SecureDialer connects to a remote address
type SecureDialer interface {
	Dial(address string, verifyFunc RemoteVerifier) (*grpc.ClientConn, error)
}

// ConnectionMapper maps certificates to connections
type ConnectionMapper interface {
	Lookup(cert []byte) (*grpc.ClientConn, bool)
	Put(cert []byte, conn *grpc.ClientConn)
	Remove(cert []byte)
	Size() int
}

// ConnectionStore stores connections to remote nodes
type ConnectionStore struct {
	lock        sync.RWMutex
	Connections ConnectionMapper
	dialer      SecureDialer
}

ConnectionMapper is implemented by ConnByCertMap(located in orderer/common/cluster/util.go), key is cert and value is grpc conn.

// ConnByCertMap maps certificates represented as strings
// to gRPC connections
type ConnByCertMap map[string]*grpc.ClientConn

SecureDialer is implemented by PredicateDialerp(located in orderer/common/cluster/util.go). It is used to establish connection with server by Dial().

Dial() will call GRPCClient.NewConnection() to establish connection.

// PredicateDialer creates gRPC connections
// that are only established if the given predicate
// is fulfilled
type PredicateDialer struct {
	lock   sync.RWMutex
	Config comm.ClientConfig
}

func (dialer *PredicateDialer) UpdateRootCAs(serverRootCAs [][]byte) {
	dialer.lock.Lock()
	defer dialer.lock.Unlock()
	dialer.Config.SecOpts.ServerRootCAs = serverRootCAs
}

// Dial creates a new gRPC connection that can only be established, if the remote node's
// certificate chain satisfy verifyFunc
func (dialer *PredicateDialer) Dial(address string, verifyFunc RemoteVerifier) (*grpc.ClientConn, error) {
	dialer.lock.RLock()
	cfg := dialer.Config.Clone()
	dialer.lock.RUnlock()

	cfg.SecOpts.VerifyCertificate = verifyFunc
	client, err := comm.NewGRPCClient(cfg)
	if err != nil {
		return nil, errors.WithStack(err)
	}
	return client.NewConnection(address, func(tlsConfig *tls.Config) {
		// We need to dynamically overwrite the TLS root CAs,
		// as they may be updated.
		dialer.lock.RLock()
		serverRootCAs := dialer.Config.Clone().SecOpts.ServerRootCAs
		dialer.lock.RUnlock()

		tlsConfig.RootCAs = x509.NewCertPool()
		for _, pem := range serverRootCAs {
			tlsConfig.RootCAs.AppendCertsFromPEM(pem)
		}
	})
}

Connect() firstly check If conn is existed to prevent dail repeatly. Note that certificate should be verified before dial().

// verifyHandshake returns a predicate that verifies that the remote node authenticates
// itself with the given TLS certificate
func (c *ConnectionStore) verifyHandshake(endpoint string, certificate []byte) RemoteVerifier {
	return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
		if bytes.Equal(certificate, rawCerts[0]) {
			return nil
		}
		return errors.Errorf("certificate presented by %s doesn't match any authorized certificate", endpoint)
	}
}

// Disconnect closes the gRPC connection that is mapped to the given certificate
func (c *ConnectionStore) Disconnect(expectedServerCert []byte) {
	c.lock.Lock()
	defer c.lock.Unlock()

	conn, connected := c.Connections.Lookup(expectedServerCert)
	if !connected {
		return
	}
	conn.Close()
	c.Connections.Remove(expectedServerCert)
}

// Connection obtains a connection to the given endpoint and expects the given server certificate
// to be presented by the remote node
func (c *ConnectionStore) Connection(endpoint string, expectedServerCert []byte) (*grpc.ClientConn, error) {
	c.lock.RLock()
	conn, alreadyConnected := c.Connections.Lookup(expectedServerCert)
	c.lock.RUnlock()

	if alreadyConnected {
		return conn, nil
	}

	// Else, we need to connect to the remote endpoint
	return c.connect(endpoint, expectedServerCert)
}

// connect connects to the given endpoint and expects the given TLS server certificate
// to be presented at the time of authentication
func (c *ConnectionStore) connect(endpoint string, expectedServerCert []byte) (*grpc.ClientConn, error) {
	c.lock.Lock()
	defer c.lock.Unlock()
	// Check again to see if some other goroutine has already connected while
	// we were waiting on the lock
	conn, alreadyConnected := c.Connections.Lookup(expectedServerCert)
	if alreadyConnected {
		return conn, nil
	}

	v := c.verifyHandshake(endpoint, expectedServerCert)
	conn, err := c.dialer.Dial(endpoint, v)
	if err != nil {
		return nil, err
	}

	c.Connections.Put(expectedServerCert, conn)
	return conn, nil
}

MembersByChannel

MembersByChannel is a map which key is channel name and value is MemberMapping; MemberMapping is a map which key is id of remote node and value is stub.

Stub manage conns with specified cluster node.

// MembersByChannel is a mapping from channel name
// to MemberMapping
type MembersByChannel map[string]MemberMapping

type MemberMapping map[uint64]*Stub

// Stub holds all information about the remote node,
// including the RemoteContext for it, and serializes
// some operations on it.
type Stub struct {
	lock sync.RWMutex
	RemoteNode
	*RemoteContext
}
RemoteNode

RemoteNode holds all information about the remote node,.

// RemoteNode represents a cluster member
type RemoteNode struct {
	// ID is unique among all members, and cannot be 0.
	ID uint64
	// Endpoint is the endpoint of the node, denoted in %s:%d format
	Endpoint string
	// ServerTLSCert is the DER encoded TLS server certificate of the node
	ServerTLSCert []byte
	// ClientTLSCert is the DER encoded TLS client certificate of the node
	ClientTLSCert []byte
}
RemoteContext

RemoteContext is the context of stream, It is used to interacts with remote cluster nodes.

Before creat RemoteContext object, We should establish connection and pass conn to RemoteContext.conn.

// RemoteContext interacts with remote cluster
// nodes. Every call can be aborted via call to Abort()
type RemoteContext struct {
	expiresAt                        time.Time
	minimumExpirationWarningInterval time.Duration
	certExpWarningThreshold          time.Duration
	Metrics                          *Metrics
	Channel                          string
	SendBuffSize                     int
	shutdownSignal                   chan struct{}
	Logger                           *flogging.FabricLogger
	endpoint                         string
	Client                           orderer.ClusterClient
	ProbeConn                        func(conn *grpc.ClientConn) error
	conn                             *grpc.ClientConn
	nextStreamID                     uint64
	streamsByID                      streamsMapperReporter
	workerCountReporter              workerCountReporter
}

NewStream() new grpc stream by Client.Step() and pass it into Stream.Cluster_StepClient. Finally, it will launch serviceStream() goroutine which is always waitting to send message.

// NewStream creates a new stream.
// It is not thread safe, and Send() or Recv() block only until the timeout expires.
func (rc *RemoteContext) NewStream(timeout time.Duration) (*Stream, error) {
	if err := rc.ProbeConn(rc.conn); err != nil {
		return nil, err
	}

	ctx, cancel := context.WithCancel(context.TODO())
	stream, err := rc.Client.Step(ctx)
	if err != nil {
		cancel()
		return nil, errors.WithStack(err)
	}

	streamID := atomic.AddUint64(&rc.nextStreamID, 1)
	nodeName := commonNameFromContext(stream.Context())

	var canceled uint32

	abortChan := make(chan struct{})

	abort := func() {
		cancel()
		rc.streamsByID.Delete(streamID)
		rc.Metrics.reportEgressStreamCount(rc.Channel, atomic.LoadUint32(&rc.streamsByID.size))
		rc.Logger.Debugf("Stream %d to %s(%s) is aborted", streamID, nodeName, rc.endpoint)
		atomic.StoreUint32(&canceled, 1)
		close(abortChan)
	}

	once := &sync.Once{}
	abortReason := &atomic.Value{}
	cancelWithReason := func(err error) {
		abortReason.Store(err.Error())
		once.Do(abort)
	}

	logger := flogging.MustGetLogger("orderer.common.cluster.step")
	stepLogger := logger.WithOptions(zap.AddCallerSkip(1))

	s := &Stream{
		Channel:            rc.Channel,
		metrics:            rc.Metrics,
		abortReason:        abortReason,
		abortChan:          abortChan,
		sendBuff:           make(chan *orderer.StepRequest, rc.SendBuffSize),
		commShutdown:       rc.shutdownSignal,
		NodeName:           nodeName,
		Logger:             stepLogger,
		ID:                 streamID,
		Endpoint:           rc.endpoint,
		Timeout:            timeout,
		Cluster_StepClient: stream,
		Cancel:             cancelWithReason,
		canceled:           &canceled,
	}

	s.expCheck = &certificateExpirationCheck{
		minimumExpirationWarningInterval: rc.minimumExpirationWarningInterval,
		expirationWarningThreshold:       rc.certExpWarningThreshold,
		expiresAt:                        rc.expiresAt,
		endpoint:                         s.Endpoint,
		nodeName:                         s.NodeName,
		alert: func(template string, args ...interface{}) {
			s.Logger.Warningf(template, args...)
		},
	}

	rc.Logger.Debugf("Created new stream to %s with ID of %d and buffer size of %d",
		rc.endpoint, streamID, cap(s.sendBuff))

	rc.streamsByID.Store(streamID, s)
	rc.Metrics.reportEgressStreamCount(rc.Channel, atomic.LoadUint32(&rc.streamsByID.size))

	go func() {
		rc.workerCountReporter.increment(s.metrics)
		s.serviceStream()
		rc.workerCountReporter.decrement(s.metrics)
	}()

	return s, nil
}

// Abort aborts the contexts the RemoteContext uses, thus effectively
// causes all operations that use this RemoteContext to terminate.
func (rc *RemoteContext) Abort() {
	rc.streamsByID.Range(func(_, value interface{}) bool {
		value.(*Stream).Cancel(errAborted)
		return false
	})
}
stream

stream assemble orderer.Cluster_StepClient namelessly to send/receive messages to/from the remote cluster member by grpc client, It is passed in from RemoteContext.NewStream() above.

// Stream is used to send/receive messages to/from the remote cluster member.
type Stream struct {
	abortChan    <-chan struct{}
	sendBuff     chan *orderer.StepRequest
	commShutdown chan struct{}
	abortReason  *atomic.Value
	metrics      *Metrics
	ID           uint64
	Channel      string
	NodeName     string
	Endpoint     string
	Logger       *flogging.FabricLogger
	Timeout      time.Duration
	orderer.Cluster_StepClient
	Cancel   func(error)
	canceled *uint32
	expCheck *certificateExpirationCheck
}

sendMessage() call Cluster_StepClient.Send() to send message.

// sendMessage sends the request down the stream
func (stream *Stream) sendMessage(request *orderer.StepRequest) {
	start := time.Now()
	var err error
	defer func() {
		if !stream.Logger.IsEnabledFor(zap.DebugLevel) {
			return
		}
		var result string
		if err != nil {
			result = fmt.Sprintf("but failed due to %s", err.Error())
		}
		stream.Logger.Debugf("Send of %s to %s(%s) took %v %s", requestAsString(request),
			stream.NodeName, stream.Endpoint, time.Since(start), result)
	}()

	f := func() (*orderer.StepResponse, error) {
		startSend := time.Now()
		stream.expCheck.checkExpiration(startSend, stream.Channel)
		err := stream.Cluster_StepClient.Send(request)
		stream.metrics.reportMsgSendTime(stream.Endpoint, stream.Channel, time.Since(startSend))
		return nil, err
	}

	_, err = stream.operateWithTimeout(f)
}

Same as send, Recv() call Cluster_StepClient.Recv() to recv message.

// Recv receives a message from a remote cluster member.
func (stream *Stream) Recv() (*orderer.StepResponse, error) {
	start := time.Now()
	defer func() {
		if !stream.Logger.IsEnabledFor(zap.DebugLevel) {
			return
		}
		stream.Logger.Debugf("Receive from %s(%s) took %v", stream.NodeName, stream.Endpoint, time.Since(start))
	}()

	f := func() (*orderer.StepResponse, error) {
		return stream.Cluster_StepClient.Recv()
	}

	return stream.operateWithTimeout(f)
}
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值