**
hyperledger fabric源码解析 – Orderer服务
**
Orderer,为排序节点,对所有发往网络中的交易进行排序,将排序后的交易安排配置中的约定整理为块,之后提交给Committer进行处理。
1、加载命令行工具并解析命令行参数
func Main() {
fullCmd := kingpin.MustParse(app.Parse(os.Args[1:]))
// 如果接收到的是"version"conmand
if fullCmd == version.FullCommand() {
fmt.Println(metadata.GetVersionInfo())
return
}
//加载本地配置
conf, err := localconfig.Load()
if err != nil {
logger.Error("failed to parse config: ", err)
os.Exit(1)
}
//初始化日志
initializeLogging()
//加载msp签名证书
initializeLocalMsp(conf)
prettyPrintStruct(conf)
//启动服务
Start(fullCmd, conf)
}
启动服务
Start为基准测试提供了一个抽象层
func Start(cmd string, conf *localconfig.TopLevel) {
bootstrapBlock := extractBootstrapBlock(conf)
if err := ValidateBootstrapBlock(bootstrapBlock); err != nil {
logger.Panicf("Failed validating bootstrap block: %v", err)
}
opsSystem := newOperationsSystem(conf.Operations, conf.Metrics)
err := opsSystem.Start()
if err != nil {
logger.Panicf("failed to initialize operations subsystem: %s", err)
}
defer opsSystem.Stop()
metricsProvider := opsSystem.Provider
lf, _ := createLedgerFactory(conf, metricsProvider)
sysChanLastConfigBlock := extractSysChanLastConfig(lf, bootstrapBlock)
clusterBootBlock := selectClusterBootBlock(bootstrapBlock, sysChanLastConfigBlock)
clusterType := isClusterType(clusterBootBlock)
//msp证书给签名者实例化
signer := localmsp.NewSigner()
clusterClientConfig := initializeClusterClientConfig(conf, clusterType, bootstrapBlock)
clusterDialer := &cluster.PredicateDialer{
ClientConfig: clusterClientConfig,
}
r := createReplicator(lf, bootstrapBlock, conf, clusterClientConfig.SecOpts, signer)
// 只有配备了最新配置块的集群才能复制。
if clusterType && conf.General.GenesisMethod == "file" {
r.replicateIfNeeded(bootstrapBlock)
}
logObserver := floggingmetrics.NewObserver(metricsProvider)
flogging.Global.SetObserver(logObserver)
serverConfig := initializeServerConfig(conf, metricsProvider)
初始化Grpc服务
grpcServer := initializeGrpcServer(conf, serverConfig)
caSupport := &comm.CredentialSupport{
AppRootCAsByChain: make(map[string]comm.CertificateBundle),
OrdererRootCAsByChainAndOrg: make(comm.OrgRootCAs),
ClientRootCAs: serverConfig.SecOpts.ClientRootCAs,
}
clusterServerConfig := serverConfig
clusterGRPCServer := grpcServer
if clusterType {
clusterServerConfig, clusterGRPCServer = configureClusterListener(conf, serverConfig, grpcServer, ioutil.ReadFile)
}
var servers = []*comm.GRPCServer{grpcServer}
// If we have a separate gRPC server for the cluster, we need to update its TLS
// CA certificate pool too.
if clusterGRPCServer != grpcServer {
servers = append(servers, clusterGRPCServer)
}
tlsCallback := func(bundle *channelconfig.Bundle) {
//只有在需要互TLS或者orderer节点是集群的一部分时需要
if grpcServer.MutualTLSRequired() || clusterType {
logger.Debug("Executing callback to update root CAs")
updateTrustedRoots(caSupport, bundle, servers...)
if clusterType {
updateClusterDialer(caSupport, clusterDialer, clusterClientConfig.SecOpts.ServerRootCAs)
}
}
}
//初始化多通道注册
manager := initializeMultichannelRegistrar(clusterBootBlock, r, clusterDialer, clusterServerConfig, clusterGRPCServer, conf, signer, metricsProvider, opsSystem, lf, tlsCallback)
mutualTLS := serverConfig.SecOpts.UseTLS && serverConfig.SecOpts.RequireClientCert
expiration := conf.General.Authentication.NoExpirationChecks
server := NewServer(manager, metricsProvider, &conf.Debug, conf.General.Authentication.TimeWindow, mutualTLS, expiration)
logger.Infof("Starting %s", metadata.GetVersionInfo())
go handleSignals(addPlatformSignals(map[os.Signal]func(){
syscall.SIGTERM: func() {
grpcServer.Stop()
if clusterGRPCServer != grpcServer {
clusterGRPCServer.Stop()
}
},
}))
if clusterGRPCServer != grpcServer {
logger.Info("Starting cluster listener on", clusterGRPCServer.Address())
go clusterGRPCServer.Start()
}
initializeProfilingService(conf)
//绑定服务
ab.RegisterAtomicBroadcastServer(grpcServer.Server(), server)
logger.Info("Beginning to serve requests")
//启动服务
grpcServer.Start()
}
初始化多通道注册
func initializeMultichannelRegistrar(
bootstrapBlock *cb.Block,
ri *replicationInitiator,
clusterDialer *cluster.PredicateDialer,
srvConf comm.ServerConfig,
srv *comm.GRPCServer,
conf *localconfig.TopLevel,
signer crypto.LocalSigner,
metricsProvider metrics.Provider,
healthChecker healthChecker,
lf blockledger.Factory,
callbacks ...channelconfig.BundleActor,
) *multichannel.Registrar {
genesisBlock := extractBootstrapBlock(conf)
// 判断是否存在,如果不存在,创建一个。
if len(lf.ChainIDs()) == 0 {
initializeBootstrapChannel(genesisBlock, lf)
} else {
logger.Info("Not bootstrapping because of existing channels")
}
consenters := make(map[string]consensus.Consenter)
registrar := multichannel.NewRegistrar(*conf, lf, signer, metricsProvider, callbacks...)
//共识算法,solo,kafka,etcdraft.
consenters["solo"] = solo.New()
var kafkaMetrics *kafka.Metrics
consenters["kafka"], kafkaMetrics = kafka.New(conf.Kafka, metricsProvider, healthChecker)
go kafkaMetrics.PollGoMetricsUntilStop(time.Minute, nil)
if isClusterType(bootstrapBlock) {
initializeEtcdraftConsenter(consenters, conf, lf, clusterDialer, bootstrapBlock, ri, srvConf, srv, registrar, metricsProvider)
}
//初始化registrar
registrar.Initialize(consenters)
return registrar
}
初始化registrar
func (r *Registrar) Initialize(consenters map[string]consensus.Consenter) {
r.consenters = consenters
//获取本地存储的链ID
existingChains := r.ledgerFactory.ChainIDs()
for _, chainID := range existingChains {
//根据账本工厂实例化账本读对象
rl, err := r.ledgerFactory.GetOrCreate(chainID)
if err != nil {
logger.Panicf("Ledger factory reported chainID %s but could not retrieve it: %s", chainID, err)
}
//获取最新配置
configTx := configTx(rl)
if configTx == nil {
logger.Panic("Programming error, configTx should never be nil here")
}
//将配置交易和Ledger对象绑定
ledgerResources := r.newLedgerResources(configTx)
chainID := ledgerResources.ConfigtxValidator().ChainID()
if _, ok := ledgerResources.ConsortiumsConfig(); ok {
if r.systemChannelID != "" {
logger.Panicf("There appear to be two system chains %s and %s", r.systemChannelID, chainID)
}
//实例化chainsupport
chain := newChainSupport(
r,
ledgerResources,
r.consenters,
r.signer,
r.blockcutterMetrics,
)
r.templator = msgprocessor.NewDefaultTemplator(chain)
chain.Processor = msgprocessor.NewSystemChannel(chain, r.templator, msgprocessor.CreateSystemChannelFilters(r, chain, r.config))
// Retrieve genesis block to log its hash. See FAB-5450 for the purpose
iter, pos := rl.Iterator(&ab.SeekPosition{Type: &ab.SeekPosition_Oldest{Oldest: &ab.SeekOldest{}}})
defer iter.Close()
if pos != uint64(0) {
logger.Panicf("Error iterating over system channel: '%s', expected position 0, got %d", chainID, pos)
}
genesisBlock, status := iter.Next()
if status != cb.Status_SUCCESS {
logger.Panicf("Error reading genesis block of system channel '%s'", chainID)
}
logger.Infof("Starting system channel '%s' with genesis block hash %x and orderer type %s",
chainID, genesisBlock.Header.Hash(), chain.SharedConfig().ConsensusType())
r.chains[chainID] = chain
r.systemChannelID = chainID
r.systemChannel = chain
// 延迟启动这个链,因为它可能试图在完全构建映射之前通过newChain复制和替换链映射
defer chain.start()
} else {
logger.Debugf("Starting chain: %s", chainID)
chain := newChainSupport(
r,
ledgerResources,
r.consenters,
r.signer,
r.blockcutterMetrics,
)
//创建普通链,并启动
r.chains[chainID] = chain
chain.start()
}
}
if r.systemChannelID == "" {
logger.Panicf("No system chain found. If bootstrapping, does your system channel contain a consortiums group definition?")
}
}
func newChainSupport(
registrar *Registrar,
ledgerResources *ledgerResources,
consenters map[string]consensus.Consenter,
signer crypto.LocalSigner,
blockcutterMetrics *blockcutter.Metrics,
) *ChainSupport {
// Read in the last block and metadata for the channel
lastBlock := blockledger.GetBlock(ledgerResources, ledgerResources.Height()-1)
//获取区块元数据信息
metadata, err := utils.GetMetadataFromBlock(lastBlock, cb.BlockMetadataIndex_ORDERER)
// Assuming a block created with cb.NewBlock(), this should not
// error even if the orderer metadata is an empty byte slice
if err != nil {
logger.Fatalf("[channel: %s] Error extracting orderer metadata: %s", ledgerResources.ConfigtxValidator().ChainID(), err)
}
// Construct limited support needed as a parameter for additional support
cs := &ChainSupport{
ledgerResources: ledgerResources,
LocalSigner: signer,
cutter: blockcutter.NewReceiverImpl(
ledgerResources.ConfigtxValidator().ChainID(),
ledgerResources,
blockcutterMetrics,
),
}
// Set up the msgprocessor
cs.Processor = msgprocessor.NewStandardChannel(cs, msgprocessor.CreateStandardChannelFilters(cs, registrar.config))
// Set up the block writer
cs.BlockWriter = newBlockWriter(lastBlock, registrar, cs)
// 根据配置查询orderer使用的共识机制
consenterType := ledgerResources.SharedConfig().ConsensusType()
consenter, ok := consenters[consenterType]
if !ok {
logger.Panicf("Error retrieving consenter of type: %s", consenterType)
}
//用共识机制处理chain
cs.Chain, err = consenter.HandleChain(cs, metadata)
if err != nil {
logger.Panicf("[channel: %s] Error creating consenter: %s", cs.ChainID(), err)
}
logger.Debugf("[channel: %s] Done creating channel support resources", cs.ChainID())
return cs
}
func (consenter *consenterImpl) HandleChain(support consensus.ConsenterSupport, metadata *cb.Metadata) (consensus.Chain, error) {
lastOffsetPersisted, lastOriginalOffsetProcessed, lastResubmittedConfigOffset := getOffsets(metadata.Value, support.ChainID())
//实例化链
ch, err := newChain(consenter, support, lastOffsetPersisted, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
if err != nil {
return nil, err
}
consenter.healthChecker.RegisterChecker(ch.channel.String(), ch)
return ch, nil
}
func newChain(
consenter commonConsenter,
support consensus.ConsenterSupport,
lastOffsetPersisted int64,
lastOriginalOffsetProcessed int64,
lastResubmittedConfigOffset int64,
) (*chainImpl, error) {
lastCutBlockNumber := getLastCutBlockNumber(support.Height())
logger.Infof("[channel: %s] Starting chain with last persisted offset %d and last recorded block [%d]",
support.ChainID(), lastOffsetPersisted, lastCutBlockNumber)
doneReprocessingMsgInFlight := make(chan struct{})
// In either one of following cases, we should unblock ingress messages:
// - lastResubmittedConfigOffset == 0, where we've never resubmitted any config messages
// - lastResubmittedConfigOffset == lastOriginalOffsetProcessed, where the latest config message we resubmitted
// has been processed already
// - lastResubmittedConfigOffset < lastOriginalOffsetProcessed, where we've processed one or more resubmitted
// normal messages after the latest resubmitted config message. (we advance `lastResubmittedConfigOffset` for
// config messages, but not normal messages)
if lastResubmittedConfigOffset == 0 || lastResubmittedConfigOffset <= lastOriginalOffsetProcessed {
// If we've already caught up with the reprocessing resubmitted messages, close the channel to unblock broadcast
close(doneReprocessingMsgInFlight)
}
consenter.Metrics().LastOffsetPersisted.With("channel", support.ChainID()).Set(float64(lastOffsetPersisted))
return &chainImpl{
consenter: consenter,
ConsenterSupport: support,
channel: newChannel(support.ChainID(), defaultPartition),
lastOffsetPersisted: lastOffsetPersisted,
lastOriginalOffsetProcessed: lastOriginalOffsetProcessed,
lastResubmittedConfigOffset: lastResubmittedConfigOffset,
lastCutBlockNumber: lastCutBlockNumber,
haltChan: make(chan struct{}),
startChan: make(chan struct{}),
doneReprocessingMsgInFlight: doneReprocessingMsgInFlight,
}, nil
}
实例化服务
type server struct {
bh *broadcast.Handler
dh *deliver.Handler
debug *localconfig.Debug
*multichannel.Registrar
}
func NewServer(
r *multichannel.Registrar,
metricsProvider metrics.Provider,
debug *localconfig.Debug,
timeWindow time.Duration,
mutualTLS bool,
expirationCheckDisabled bool,
) ab.AtomicBroadcastServer {
s := &server{
dh: deliver.NewHandler(
deliverSupport{Registrar: r},
timeWindow,
mutualTLS,
deliver.NewMetrics(metricsProvider),
expirationCheckDisabled,
),
bh: &broadcast.Handler{
SupportRegistrar: broadcastSupport{Registrar: r},
Metrics: broadcast.NewMetrics(metricsProvider),
},
debug: debug,
Registrar: r,
}
return s
}
//接收来自客户机的消息流以进行排序
func (s *server) Broadcast(srv ab.AtomicBroadcast_BroadcastServer) error {
logger.Debugf("Starting new Broadcast handler")
defer func() {
if r := recover(); r != nil {
logger.Criticalf("Broadcast client triggered panic: %s\n%s", r, debug.Stack())
}
logger.Debugf("Closing Broadcast stream")
}()
return s.bh.Handle(&broadcastMsgTracer{
AtomicBroadcast_BroadcastServer: srv,
msgTracer: msgTracer{
debug: s.debug,
function: "Broadcast",
},
})
}
//从广播流中读取请求,处理请求,并将响应返回到流
func (bh *Handler) Handle(srv ab.AtomicBroadcast_BroadcastServer) error {
addr := util.ExtractRemoteAddress(srv.Context())
logger.Debugf("Starting new broadcast loop for %s", addr)
for {
msg, err := srv.Recv()
if err == io.EOF {
logger.Debugf("Received EOF from %s, hangup", addr)
return nil
}
if err != nil {
logger.Warningf("Error reading from %s: %s", addr, err)
return err
}
resp := bh.ProcessMessage(msg, addr)
err = srv.Send(resp)
if resp.Status != cb.Status_SUCCESS {
return err
}
if err != nil {
logger.Warningf("Error sending to %s: %s", addr, err)
return err
}
}
}
//验证单个消息并为其排队
func (bh *Handler) ProcessMessage(msg *cb.Envelope, addr string) (resp *ab.BroadcastResponse) {
tracker := &MetricsTracker{
ChannelID: "unknown",
TxType: "unknown",
Metrics: bh.Metrics,
}
defer func() {
// This looks a little unnecessary, but if done directly as
// a defer, resp gets the (always nil) current state of resp
// and not the return value
tracker.Record(resp)
}()
tracker.BeginValidate()
chdr, isConfig, processor, err := bh.SupportRegistrar.BroadcastChannelSupport(msg)
if chdr != nil {
tracker.ChannelID = chdr.ChannelId
tracker.TxType = cb.HeaderType(chdr.Type).String()
}
if err != nil {
logger.Warningf("[channel: %s] Could not get message processor for serving %s: %s", tracker.ChannelID, addr, err)
return &ab.BroadcastResponse{Status: cb.Status_BAD_REQUEST, Info: err.Error()}
}
if !isConfig {
logger.Debugf("[channel: %s] Broadcast is processing normal message from %s with txid '%s' of type %s", chdr.ChannelId, addr, chdr.TxId, cb.HeaderType_name[chdr.Type])
configSeq, err := processor.ProcessNormalMsg(msg)
if err != nil {
logger.Warningf("[channel: %s] Rejecting broadcast of normal message from %s because of error: %s", chdr.ChannelId, addr, err)
return &ab.BroadcastResponse{Status: ClassifyError(err), Info: err.Error()}
}
tracker.EndValidate()
tracker.BeginEnqueue()
if err = processor.WaitReady(); err != nil {
logger.Warningf("[channel: %s] Rejecting broadcast of message from %s with SERVICE_UNAVAILABLE: rejected by Consenter: %s", chdr.ChannelId, addr, err)
return &ab.BroadcastResponse{Status: cb.Status_SERVICE_UNAVAILABLE, Info: err.Error()}
}
err = processor.Order(msg, configSeq)
if err != nil {
logger.Warningf("[channel: %s] Rejecting broadcast of normal message from %s with SERVICE_UNAVAILABLE: rejected by Order: %s", chdr.ChannelId, addr, err)
return &ab.BroadcastResponse{Status: cb.Status_SERVICE_UNAVAILABLE, Info: err.Error()}
}
} else { // isConfig
logger.Debugf("[channel: %s] Broadcast is processing config update message from %s", chdr.ChannelId, addr)
config, configSeq, err := processor.ProcessConfigUpdateMsg(msg)
if err != nil {
logger.Warningf("[channel: %s] Rejecting broadcast of config message from %s because of error: %s", chdr.ChannelId, addr, err)
return &ab.BroadcastResponse{Status: ClassifyError(err), Info: err.Error()}
}
tracker.EndValidate()
tracker.BeginEnqueue()
if err = processor.WaitReady(); err != nil {
logger.Warningf("[channel: %s] Rejecting broadcast of message from %s with SERVICE_UNAVAILABLE: rejected by Consenter: %s", chdr.ChannelId, addr, err)
return &ab.BroadcastResponse{Status: cb.Status_SERVICE_UNAVAILABLE, Info: err.Error()}
}
err = processor.Configure(config, configSeq)
if err != nil {
logger.Warningf("[channel: %s] Rejecting broadcast of config message from %s with SERVICE_UNAVAILABLE: rejected by Configure: %s", chdr.ChannelId, addr, err)
return &ab.BroadcastResponse{Status: cb.Status_SERVICE_UNAVAILABLE, Info: err.Error()}
}
}
logger.Debugf("[channel: %s] Broadcast has successfully enqueued message of type %s from %s", chdr.ChannelId, cb.HeaderType_name[chdr.Type], addr)
return &ab.BroadcastResponse{Status: cb.Status_SUCCESS}
}
//在订购后向客户端发送块流
func (s *server) Deliver(srv ab.AtomicBroadcast_DeliverServer) error {
logger.Debugf("Starting new Deliver handler")
defer func() {
if r := recover(); r != nil {
logger.Criticalf("Deliver client triggered panic: %s\n%s", r, debug.Stack())
}
logger.Debugf("Closing Deliver stream")
}()
policyChecker := func(env *cb.Envelope, channelID string) error {
chain := s.GetChain(channelID)
if chain == nil {
return errors.Errorf("channel %s not found", channelID)
}
// In maintenance mode, we typically require the signature of /Channel/Orderer/Readers.
// This will block Deliver requests from peers (which normally satisfy /Channel/Readers).
sf := msgprocessor.NewSigFilter(policies.ChannelReaders, policies.ChannelOrdererReaders, chain)
return sf.Apply(env)
}
deliverServer := &deliver.Server{
PolicyChecker: deliver.PolicyCheckerFunc(policyChecker),
Receiver: &deliverMsgTracer{
Receiver: srv,
msgTracer: msgTracer{
debug: s.debug,
function: "Deliver",
},
},
ResponseSender: &responseSender{
AtomicBroadcast_DeliverServer: srv,
},
}
return s.dh.Handle(srv.Context(), deliverServer)
}
func (h *Handler) Handle(ctx context.Context, srv *Server) error {
addr := util.ExtractRemoteAddress(ctx)
logger.Debugf("Starting new deliver loop for %s", addr)
h.Metrics.StreamsOpened.Add(1)
defer h.Metrics.StreamsClosed.Add(1)
for {
logger.Debugf("Attempting to read seek info message from %s", addr)
envelope, err := srv.Recv()
if err == io.EOF {
logger.Debugf("Received EOF from %s, hangup", addr)
return nil
}
if err != nil {
logger.Warningf("Error reading from %s: %s", addr, err)
return err
}
status, err := h.deliverBlocks(ctx, srv, envelope)
if err != nil {
return err
}
err = srv.SendStatusResponse(status)
if status != cb.Status_SUCCESS {
return err
}
if err != nil {
logger.Warningf("Error sending to %s: %s", addr, err)
return err
}
logger.Debugf("Waiting for new SeekInfo from %s", addr)
}
}
func (h *Handler) deliverBlocks(ctx context.Context, srv *Server, envelope *cb.Envelope) (status cb.Status, err error) {
addr := util.ExtractRemoteAddress(ctx)
//做校验
payload, err := utils.UnmarshalPayload(envelope.Payload)
if err != nil {
logger.Warningf("Received an envelope from %s with no payload: %s", addr, err)
return cb.Status_BAD_REQUEST, nil
}
if payload.Header == nil {
logger.Warningf("Malformed envelope received from %s with bad header", addr)
return cb.Status_BAD_REQUEST, nil
}
chdr, err := utils.UnmarshalChannelHeader(payload.Header.ChannelHeader)
if err != nil {
logger.Warningf("Failed to unmarshal channel header from %s: %s", addr, err)
return cb.Status_BAD_REQUEST, nil
}
err = h.validateChannelHeader(ctx, chdr)
if err != nil {
logger.Warningf("Rejecting deliver for %s due to envelope validation error: %s", addr, err)
return cb.Status_BAD_REQUEST, nil
}
//获取chain对象
chain := h.ChainManager.GetChain(chdr.ChannelId)
if chain == nil {
// Note, we log this at DEBUG because SDKs will poll waiting for channels to be created
// So we would expect our log to be somewhat flooded with these
logger.Debugf("Rejecting deliver for %s because channel %s not found", addr, chdr.ChannelId)
return cb.Status_NOT_FOUND, nil
}
labels := []string{
"channel", chdr.ChannelId,
"filtered", strconv.FormatBool(isFiltered(srv)),
}
h.Metrics.RequestsReceived.With(labels...).Add(1)
defer func() {
labels := append(labels, "success", strconv.FormatBool(status == cb.Status_SUCCESS))
h.Metrics.RequestsCompleted.With(labels...).Add(1)
}()
seekInfo := &ab.SeekInfo{}
if err = proto.Unmarshal(payload.Data, seekInfo); err != nil {
logger.Warningf("[channel: %s] Received a signed deliver request from %s with malformed seekInfo payload: %s", chdr.ChannelId, addr, err)
return cb.Status_BAD_REQUEST, nil
}
//监听是否有错误发生
erroredChan := chain.Errored()
if seekInfo.ErrorResponse == ab.SeekInfo_BEST_EFFORT {
// In a 'best effort' delivery of blocks, we should ignore consenter errors
// and continue to deliver blocks according to the client's request.
erroredChan = nil
}
select {
case <-erroredChan:
logger.Warningf("[channel: %s] Rejecting deliver request for %s because of consenter error", chdr.ChannelId, addr)
return cb.Status_SERVICE_UNAVAILABLE, nil
default:
}
accessControl, err := NewSessionAC(chain, envelope, srv.PolicyChecker, chdr.ChannelId, h.ExpirationCheckFunc)
if err != nil {
logger.Warningf("[channel: %s] failed to create access control object due to %s", chdr.ChannelId, err)
return cb.Status_BAD_REQUEST, nil
}
if err := accessControl.Evaluate(); err != nil {
logger.Warningf("[channel: %s] Client authorization revoked for deliver request from %s: %s", chdr.ChannelId, addr, err)
return cb.Status_FORBIDDEN, nil
}
if seekInfo.Start == nil || seekInfo.Stop == nil {
logger.Warningf("[channel: %s] Received seekInfo message from %s with missing start or stop %v, %v", chdr.ChannelId, addr, seekInfo.Start, seekInfo.Stop)
return cb.Status_BAD_REQUEST, nil
}
logger.Debugf("[channel: %s] Received seekInfo (%p) %v from %s", chdr.ChannelId, seekInfo, seekInfo, addr)
cursor, number := chain.Reader().Iterator(seekInfo.Start)
defer cursor.Close()
var stopNum uint64
switch stop := seekInfo.Stop.Type.(type) {
case *ab.SeekPosition_Oldest:
stopNum = number
case *ab.SeekPosition_Newest:
stopNum = chain.Reader().Height() - 1
case *ab.SeekPosition_Specified:
stopNum = stop.Specified.Number
if stopNum < number {
logger.Warningf("[channel: %s] Received invalid seekInfo message from %s: start number %d greater than stop number %d", chdr.ChannelId, addr, number, stopNum)
return cb.Status_BAD_REQUEST, nil
}
}
for {
if seekInfo.Behavior == ab.SeekInfo_FAIL_IF_NOT_READY {
if number > chain.Reader().Height()-1 {
return cb.Status_NOT_FOUND, nil
}
}
var block *cb.Block
var status cb.Status
iterCh := make(chan struct{})
go func() {
block, status = cursor.Next()
close(iterCh)
}()
select {
case <-ctx.Done():
logger.Debugf("Context canceled, aborting wait for next block")
return cb.Status_INTERNAL_SERVER_ERROR, errors.Wrapf(ctx.Err(), "context finished before block retrieved")
case <-erroredChan:
// TODO, today, the only user of the errorChan is the orderer consensus implementations. If the peer ever reports
// this error, we will need to update this error message, possibly finding a way to signal what error text to return.
logger.Warningf("Aborting deliver for request because the backing consensus implementation indicates an error")
return cb.Status_SERVICE_UNAVAILABLE, nil
case <-iterCh:
// Iterator has set the block and status vars
}
if status != cb.Status_SUCCESS {
logger.Errorf("[channel: %s] Error reading from channel, cause was: %v", chdr.ChannelId, status)
return status, nil
}
// increment block number to support FAIL_IF_NOT_READY deliver behavior
number++
if err := accessControl.Evaluate(); err != nil {
logger.Warningf("[channel: %s] Client authorization revoked for deliver request from %s: %s", chdr.ChannelId, addr, err)
return cb.Status_FORBIDDEN, nil
}
logger.Debugf("[channel: %s] Delivering block for (%p) for %s", chdr.ChannelId, seekInfo, addr)
if err := srv.SendBlockResponse(block); err != nil {
logger.Warningf("[channel: %s] Error sending to %s: %s", chdr.ChannelId, addr, err)
return cb.Status_INTERNAL_SERVER_ERROR, err
}
h.Metrics.BlocksSent.With(labels...).Add(1)
if stopNum == block.Header.Number {
break
}
}
logger.Debugf("[channel: %s] Done delivering to %s for (%p)", chdr.ChannelId, addr, seekInfo)
return cb.Status_SUCCESS, nil
}
kafka共识启动
func (chain *chainImpl) Start() {
go startThread(chain)
}
func startThread(chain *chainImpl) {
var err error
// 创建不存在的主题
//在Fabric中,channel是连接彼此的最重要部分,topic相当于channel的延申,orderer将block发到topic上,订阅该topic消费者会写入本地orderer账本。
err = setupTopicForChannel(chain.consenter.retryOptions(), chain.haltChan, chain.SharedConfig().KafkaBrokers(), chain.consenter.brokerConfig(), chain.consenter.topicDetail(), chain.channel)
if err != nil {
// log for now and fallback to auto create topics setting for broker
logger.Infof("[channel: %s]: failed to create Kafka topic = %s", chain.channel.topic(), err)
}
//根据配置创建消息生产者,消费者。
// Set up the producer
chain.producer, err = setupProducerForChannel(chain.consenter.retryOptions(), chain.haltChan, chain.SharedConfig().KafkaBrokers(), chain.consenter.brokerConfig(), chain.channel)
if err != nil {
logger.Panicf("[channel: %s] Cannot set up producer = %s", chain.channel.topic(), err)
}
logger.Infof("[channel: %s] Producer set up successfully", chain.ChainID())
// Have the producer post the CONNECT message
if err = sendConnectMessage(chain.consenter.retryOptions(), chain.haltChan, chain.producer, chain.channel); err != nil {
logger.Panicf("[channel: %s] Cannot post CONNECT message = %s", chain.channel.topic(), err)
}
logger.Infof("[channel: %s] CONNECT message posted successfully", chain.channel.topic())
// Set up the parent consumer
chain.parentConsumer, err = setupParentConsumerForChannel(chain.consenter.retryOptions(), chain.haltChan, chain.SharedConfig().KafkaBrokers(), chain.consenter.brokerConfig(), chain.channel)
if err != nil {
logger.Panicf("[channel: %s] Cannot set up parent consumer = %s", chain.channel.topic(), err)
}
logger.Infof("[channel: %s] Parent consumer set up successfully", chain.channel.topic())
// Set up the channel consumer
chain.channelConsumer, err = setupChannelConsumerForChannel(chain.consenter.retryOptions(), chain.haltChan, chain.parentConsumer, chain.channel, chain.lastOffsetPersisted+1)
if err != nil {
logger.Panicf("[channel: %s] Cannot set up channel consumer = %s", chain.channel.topic(), err)
}
logger.Infof("[channel: %s] Channel consumer set up successfully", chain.channel.topic())
chain.replicaIDs, err = getHealthyClusterReplicaInfo(chain.consenter.retryOptions(), chain.haltChan, chain.SharedConfig().KafkaBrokers(), chain.consenter.brokerConfig(), chain.channel)
if err != nil {
logger.Panicf("[channel: %s] failed to get replica IDs = %s", chain.channel.topic(), err)
}
chain.doneProcessingMessagesToBlocks = make(chan struct{})
chain.errorChan = make(chan struct{}) // Deliver requests will also go through
close(chain.startChan) // Broadcast requests will now go through
logger.Infof("[channel: %s] Start phase completed successfully", chain.channel.topic())
//监听并处理收到的消息
chain.processMessagesToBlocks() // Keep up to date with the channel
}
func (chain *chainImpl) processMessagesToBlocks() ([]uint64, error) {
counts := make([]uint64, 11) // For metrics and tests
msg := new(ab.KafkaMessage)
defer func() {
// notify that we are not processing messages to blocks
close(chain.doneProcessingMessagesToBlocks)
}()
defer func() { // When Halt() is called
select {
case <-chain.errorChan: // If already closed, don't do anything
default:
close(chain.errorChan)
}
}()
...
//对三种消息进行处理
switch msg.Type.(type) {
//里面什么都没做
case *ab.KafkaMessage_Connect:
_ = chain.processConnect(chain.ChainID())
counts[indexProcessConnectPass]++
//超时处理
case *ab.KafkaMessage_TimeToCut:
if err := chain.processTimeToCut(msg.GetTimeToCut(), in.Offset); err != nil {
logger.Warningf("[channel: %s] %s", chain.ChainID(), err)
logger.Criticalf("[channel: %s] Consenter for channel exiting", chain.ChainID())
counts[indexProcessTimeToCutError]++
return counts, err // TODO Revisit whether we should indeed stop processing the chain at this point
}
counts[indexProcessTimeToCutPass]++
case *ab.KafkaMessage_Regular:
if err := chain.processRegular(msg.GetRegular(), in.Offset); err != nil {
logger.Warningf("[channel: %s] Error when processing incoming message of type REGULAR = %s", chain.ChainID(), err)
counts[indexProcessRegularError]++
} else {
counts[indexProcessRegularPass]++
}
}
}
regular消息处理
switch regularMessage.Class {
case ab.KafkaMessageRegular_UNKNOWN:
logger.Panicf("[channel: %s] Kafka message of type UNKNOWN should have been processed already", chain.ChainID())
//Noamal消息处理
case ab.KafkaMessageRegular_NORMAL:
// 这是一条经过重新验证和重新排序的消息
if regularMessage.OriginalOffset != 0 {
logger.Debugf("[channel: %s] Received re-submitted normal message with original offset %d", chain.ChainID(), regularMessage.OriginalOffset)
// But we've reprocessed it already
if regularMessage.OriginalOffset <= chain.lastOriginalOffsetProcessed {
logger.Debugf(
"[channel: %s] OriginalOffset(%d) <= LastOriginalOffsetProcessed(%d), message has been consumed already, discard",
chain.ChainID(), regularMessage.OriginalOffset, chain.lastOriginalOffsetProcessed)
return nil
}
logger.Debugf(
"[channel: %s] OriginalOffset(%d) > LastOriginalOffsetProcessed(%d), "+
"this is the first time we receive this re-submitted normal message",
chain.ChainID(), regularMessage.OriginalOffset, chain.lastOriginalOffsetProcessed)
// In case we haven't reprocessed the message, there's no need to differentiate it from those
// messages that will be processed for the first time.
}
// The config sequence has advanced
if regularMessage.ConfigSeq < seq {
logger.Debugf("[channel: %s] Config sequence has advanced since this normal message got validated, re-validating", chain.ChainID())
configSeq, err := chain.ProcessNormalMsg(env)
if err != nil {
return fmt.Errorf("discarding bad normal message because = %s", err)
}
logger.Debugf("[channel: %s] Normal message is still valid, re-submit", chain.ChainID())
// For both messages that are ordered for the first time or re-ordered, we set original offset
// to current received offset and re-order it.
if err := chain.order(env, configSeq, receivedOffset); err != nil {
return fmt.Errorf("error re-submitting normal message because = %s", err)
}
return nil
}
// Any messages coming in here may or may not have been re-validated
// and re-ordered, BUT they are definitely valid here
// advance lastOriginalOffsetProcessed iff message is re-validated and re-ordered
offset := regularMessage.OriginalOffset
if offset == 0 {
offset = chain.lastOriginalOffsetProcessed
}
commitNormalMsg(env, offset)
//config消息处理
case ab.KafkaMessageRegular_CONFIG:
// This is a message that is re-validated and re-ordered
if regularMessage.OriginalOffset != 0 {
logger.Debugf("[channel: %s] Received re-submitted config message with original offset %d", chain.ChainID(), regularMessage.OriginalOffset)
// But we've reprocessed it already
if regularMessage.OriginalOffset <= chain.lastOriginalOffsetProcessed {
logger.Debugf(
"[channel: %s] OriginalOffset(%d) <= LastOriginalOffsetProcessed(%d), message has been consumed already, discard",
chain.ChainID(), regularMessage.OriginalOffset, chain.lastOriginalOffsetProcessed)
return nil
}
logger.Debugf(
"[channel: %s] OriginalOffset(%d) > LastOriginalOffsetProcessed(%d), "+
"this is the first time we receive this re-submitted config message",
chain.ChainID(), regularMessage.OriginalOffset, chain.lastOriginalOffsetProcessed)
if regularMessage.OriginalOffset == chain.lastResubmittedConfigOffset && // This is very last resubmitted config message
regularMessage.ConfigSeq == seq { // AND we don't need to resubmit it again
logger.Debugf("[channel: %s] Config message with original offset %d is the last in-flight resubmitted message"+
"and it does not require revalidation, unblock ingress messages now", chain.ChainID(), regularMessage.OriginalOffset)
chain.reprocessConfigComplete() // Therefore, we could finally unblock broadcast
}
// Somebody resubmitted message at offset X, whereas we didn't. This is due to non-determinism where
// that message was considered invalid by us during revalidation, however somebody else deemed it to
// be valid, and resubmitted it. We need to advance lastResubmittedConfigOffset in this case in order
// to enforce consistency across the network.
if chain.lastResubmittedConfigOffset < regularMessage.OriginalOffset {
chain.lastResubmittedConfigOffset = regularMessage.OriginalOffset
}
}
// The config sequence has advanced
if regularMessage.ConfigSeq < seq {
logger.Debugf("[channel: %s] Config sequence has advanced since this config message got validated, re-validating", chain.ChainID())
configEnv, configSeq, err := chain.ProcessConfigMsg(env)
if err != nil {
return fmt.Errorf("rejecting config message because = %s", err)
}
// For both messages that are ordered for the first time or re-ordered, we set original offset
// to current received offset and re-order it.
if err := chain.configure(configEnv, configSeq, receivedOffset); err != nil {
return fmt.Errorf("error re-submitting config message because = %s", err)
}
logger.Debugf("[channel: %s] Resubmitted config message with offset %d, block ingress messages", chain.ChainID(), receivedOffset)
chain.lastResubmittedConfigOffset = receivedOffset // Keep track of last resubmitted message offset
chain.reprocessConfigPending() // Begin blocking ingress messages
return nil
}
// Any messages coming in here may or may not have been re-validated
// and re-ordered, BUT they are definitely valid here
// advance lastOriginalOffsetProcessed iff message is re-validated and re-ordered
offset := regularMessage.OriginalOffset
if offset == 0 {
offset = chain.lastOriginalOffsetProcessed
}
commitConfigMsg(env, offset)
default:
return errors.Errorf("unsupported regular kafka message type: %v", regularMessage.Class.String())
}
//receiveOffset:consumer的消费进度
//newOffset:新消息的offset
//LastOffsetPersisted:已经commit到账本的最后消息的offset
//LastOriginalOffsetProcessed:已经接收到的最新消息的offset
commitNormalMsg := func(message *cb.Envelope, newOffset int64) {
batches, pending := chain.BlockCutter().Ordered(message)
logger.Debugf("[channel: %s] Ordering results: items in batch = %d, pending = %v", chain.ChainID(), len(batches), pending)
switch {
case chain.timer != nil && !pending:
// Timer is already running but there are no messages pending, stop the timer
chain.timer = nil
case chain.timer == nil && pending:
// Timer is not already running and there are messages pending, so start it
chain.timer = time.After(chain.SharedConfig().BatchTimeout())
logger.Debugf("[channel: %s] Just began %s batch timer", chain.ChainID(), chain.SharedConfig().BatchTimeout().String())
default:
// Do nothing when:
// 1. Timer is already running and there are messages pending
// 2. Timer is not set and there are no messages pending
}
if len(batches) == 0 {
// If no block is cut, we update the `lastOriginalOffsetProcessed`, start the timer if necessary and return
chain.lastOriginalOffsetProcessed = newOffset
return
}
offset := receivedOffset
//单个消息太大
if pending || len(batches) == 2 {
// If the newest envelope is not encapsulated into the first batch,
// the `LastOffsetPersisted` should be `receivedOffset` - 1.
offset--
} else {
// We are just cutting exactly one block, so it is safe to update
// `lastOriginalOffsetProcessed` with `newOffset` here, and then
// encapsulate it into this block. Otherwise, if we are cutting two
// blocks, the first one should use current `lastOriginalOffsetProcessed`
// and the second one should use `newOffset`, which is also used to
// update `lastOriginalOffsetProcessed`
chain.lastOriginalOffsetProcessed = newOffset
}
// Commit第一个块
block := chain.CreateNextBlock(batches[0])
metadata := &ab.KafkaMetadata{
LastOffsetPersisted: offset,
LastOriginalOffsetProcessed: chain.lastOriginalOffsetProcessed,
LastResubmittedConfigOffset: chain.lastResubmittedConfigOffset,
}
chain.WriteBlock(block, metadata)
chain.lastCutBlockNumber++
logger.Debugf("[channel: %s] Batch filled, just cut block [%d] - last persisted offset is now %d", chain.ChainID(), chain.lastCutBlockNumber, offset)
// 如果第二个块存在,commit
if len(batches) == 2 {
chain.lastOriginalOffsetProcessed = newOffset
offset++
block := chain.CreateNextBlock(batches[1])
metadata := &ab.KafkaMetadata{
LastOffsetPersisted: offset,
LastOriginalOffsetProcessed: newOffset,
LastResubmittedConfigOffset: chain.lastResubmittedConfigOffset,
}
chain.WriteBlock(block, metadata)
chain.lastCutBlockNumber++
logger.Debugf("[channel: %s] Batch filled, just cut block [%d] - last persisted offset is now %d", chain.ChainID(), chain.lastCutBlockNumber, offset)
}
}
超时处理
```go
//orderer是集群的,只有一个地方写入,其他成员进行同步
func (chain *chainImpl) processTimeToCut(ttcMessage *ab.KafkaMessageTimeToCut, receivedOffset int64) error {
ttcNumber := ttcMessage.GetBlockNumber()
logger.Debugf("[channel: %s] It's a time-to-cut message for block [%d]", chain.ChainID(), ttcNumber)
if ttcNumber == chain.lastCutBlockNumber+1 {
chain.timer = nil
logger.Debugf("[channel: %s] Nil'd the timer", chain.ChainID())
batch := chain.BlockCutter().Cut()
if len(batch) == 0 {
return fmt.Errorf("got right time-to-cut message (for block [%d]),"+
" no pending requests though; this might indicate a bug", chain.lastCutBlockNumber+1)
}
block := chain.CreateNextBlock(batch)
metadata := &ab.KafkaMetadata{
LastOffsetPersisted: receivedOffset,
LastOriginalOffsetProcessed: chain.lastOriginalOffsetProcessed,
}
chain.WriteBlock(block, metadata)
chain.lastCutBlockNumber++
logger.Debugf("[channel: %s] Proper time-to-cut received, just cut block [%d]", chain.ChainID(), chain.lastCutBlockNumber)
return nil
} else if ttcNumber > chain.lastCutBlockNumber+1 {
return fmt.Errorf("got larger time-to-cut message (%d) than allowed/expected (%d)"+
" - this might indicate a bug", ttcNumber, chain.lastCutBlockNumber+1)
}
logger.Debugf("[channel: %s] Ignoring stale time-to-cut-message for block [%d]", chain.ChainID(), ttcNumber)
return nil
}
区块对象切割
func (r *receiver) Ordered(msg *cb.Envelope) (messageBatches [][]*cb.Envelope, pending bool) {
if len(r.pendingBatch) == 0 {
// We are beginning a new batch, mark the time
r.PendingBatchStartTime = time.Now()
}
//加载orderer配置
ordererConfig, ok := r.sharedConfigFetcher.OrdererConfig()
if !ok {
logger.Panicf("Could not retrieve orderer config to query batch parameters, block cutting is not possible")
}
//获取配置大小
batchSize := ordererConfig.BatchSize()
//计算交易体的大小
messageSizeBytes := messageSizeBytes(msg)
//交易体的大小如果比最大交易体还大,交易内容过大,进行切块
if messageSizeBytes > batchSize.PreferredMaxBytes {
logger.Debugf("The current message, with %v bytes, is larger than the preferred batch size of %v bytes and will be isolated.", messageSizeBytes, batchSize.PreferredMaxBytes)
// 如果有任何消息,则删除挂起的批处理
if len(r.pendingBatch) > 0 {
messageBatch := r.Cut()
messageBatches = append(messageBatches, messageBatch)
}
// 使用单个消息创建新批处理
messageBatches = append(messageBatches, []*cb.Envelope{msg})
// Record that this batch took no time to fill
r.Metrics.BlockFillDuration.With("channel", r.ChannelID).Observe(0)
return
}
//判断加上当前交易后,区块的大小是否超出预设定的大小
messageWillOverflowBatchSizeBytes := r.pendingBatchSizeBytes+messageSizeBytes > batchSize.PreferredMaxBytes
//超过预定大小处理
if messageWillOverflowBatchSizeBytes {
logger.Debugf("The current message, with %v bytes, will overflow the pending batch of %v bytes.", messageSizeBytes, r.pendingBatchSizeBytes)
logger.Debugf("Pending batch would overflow if current message is added, cutting batch now.")
messageBatch := r.Cut()
r.PendingBatchStartTime = time.Now()
messageBatches = append(messageBatches, messageBatch)
}
logger.Debugf("Enqueuing message into batch")
r.pendingBatch = append(r.pendingBatch, msg)
r.pendingBatchSizeBytes += messageSizeBytes
pending = true
//若区块队列超出阈值范围,进行切割
if uint32(len(r.pendingBatch)) >= batchSize.MaxMessageCount {
logger.Debugf("Batch size met, cutting batch")
messageBatch := r.Cut()
messageBatches = append(messageBatches, messageBatch)
pending = false
}
return
}
// 返回当前批并开始一个新的批
func (r *receiver) Cut() []*cb.Envelope {
r.Metrics.BlockFillDuration.With("channel", r.ChannelID).Observe(time.Since(r.PendingBatchStartTime).Seconds())
r.PendingBatchStartTime = time.Time{}
batch := r.pendingBatch
r.pendingBatch = nil
r.pendingBatchSizeBytes = 0
return batch
}