关于Fabric中的Orderer

还是从/home/yjm/go/src/github.com/hyperledger/fabric/orderer的main函数开始。

logger.Infof("Starting %s", metadata.GetVersionInfo())
conf := config.Load()
initializeLoggingLevel(conf)
conf := config.Load()
initializeLoggingLevel(conf)
initializeProfilingService(conf)
grpcServer := initializeGrpcServer(conf)
conf := config.Load()
initializeLoggingLevel(conf)
initializeProfilingService(conf)
grpcServer := initializeGrpcServer(conf)
initializeLocalMsp(conf)
signer := localmsp.NewSigner()
manager := initializeMultiChainManager(conf, signer)
server := NewServer(manager, signer)
ab.RegisterAtomicBroadcastServer(grpcServer.Server(), server)
logger.Info("Beginning to serve requests")
grpcServer.Start()

首先是加载配置文件,主要用到了viper
实例化一个viper对象 再调用/fabric/core/config模块的InitViper方法将viper对象传进去,InitViper主要是将FABRIC_CFG_PATH添加到viper的路径。同时设置configName。方便后面读取。

var altPath = os.Getenv("FABRIC_CFG_PATH")
	if altPath != "" {
		// If the user has overridden the path with an envvar, its the only path
		// we will consider
		addConfigPath(v, altPath)
	}

继续往下,就是读取配置文件ReadInConfig,我们可以看到

file, err := ioutil.ReadFile(v.getConfigFile())

v.getConfigFile()继续下去发现最终viper会去遍历所有的配置文件路径,找到对应的文件。

然后现在我们看一下FABRIC_CFG_PATH这个路径对我们哪个配置文件呢? 仔细查看后我们发现它在orderer的docker配置文件中,路径/hyperledger/fabric/images/orderer/Dockerfile.in 文件内容如下

# Copyright Greg Haskins All Rights Reserved
#
# SPDX-License-Identifier: Apache-2.0
#
FROM _BASE_NS_/fabric-baseos:_BASE_TAG_
ENV FABRIC_CFG_PATH /etc/hyperledger/fabric
RUN mkdir -p /var/hyperledger/production $FABRIC_CFG_PATH
COPY payload/orderer /usr/local/bin
ADD payload/sampleconfig.tar.bz2 $FABRIC_CFG_PATH/
EXPOSE 7050
CMD ["orderer"]
initializeLoggingLevel(conf)
		initializeProfilingService(conf)

这两个方法略过 initializeGrpcServer(conf)后面文章单独讲。 我们把目光放在

manager := initializeMultiChainManager(conf, signer) 

①首先创建账本,我们可以看到有file,json,ram三种类型。 ②根据配置文件获取到路径,再调用/fabric/orderer/ledger/file下的New()实例化账本。 ③调用initializeBootstrapChannel(conf, lf)

func initializeBootstrapChannel(conf *config.TopLevel, lf ledger.Factory) {
	var genesisBlock *cb.Block

	// Select the bootstrapping mechanism
	switch conf.General.GenesisMethod {
	case "provisional":
		genesisBlock = provisional.New(genesisconfig.Load(conf.General.GenesisProfile)).GenesisBlock()
	case "file":
		genesisBlock = file.New(conf.General.GenesisFile).GenesisBlock()
	default:
		logger.Panic("Unknown genesis method:", conf.General.GenesisMethod)
	}

	chainID, err := utils.GetChainIDFromBlock(genesisBlock)
	if err != nil {
		logger.Fatal("Failed to parse chain ID from genesis block:", err)
	}
	gl, err := lf.GetOrCreate(chainID)
	if err != nil {
		logger.Fatal("Failed to create the system chain:", err)
	}

	err = gl.Append(genesisBlock)
	if err != nil {
		logger.Fatal("Could not write genesis block to ledger:", err)
	}
}

我们可以看到根据类型获取刚开始我们生成的创世区块 genesisBlock,获取到区块之后,从区块上获取到链的信息。 再通过

gl, err := lf.GetOrCreate(chainID)

lf即为刚才的我们获取到的账本GetOrCreate()返回的是账本的读写集ReadWriter

// Reader allows the caller to inspect the ledger
type Reader interface {
	// Iterator returns an Iterator, as specified by a cb.SeekInfo message, and
	// its starting block number
	Iterator(startType *ab.SeekPosition) (Iterator, uint64)
	// Height returns the number of blocks on the ledger
	Height() uint64
}

// Writer allows the caller to modify the ledger
type Writer interface {
	// Append a new block to the ledger
	Append(block *cb.Block) error
}

// ReadWriter encapsulates the read/write functions of the ledger
type ReadWriter interface {
	Reader
	Writer
}

所以简单总结下整个流程,初始化配置文件,加载配置文件,创建账本并实例化,读取创世区块,根据创世区块获取到链的Id,账本根据chainID获取到对应的读写集,将创世区块写入。 ReadWriter的实现主要如下:

func (fl *fileLedger) Append(block *cb.Block) error {
	err := fl.blockStore.AddBlock(block)
	if err == nil {
		close(fl.signal)
		fl.signal = make(chan struct{})
	}
	return err
}

BlockStore如下,路径为fabric/common/ledger/blockStorage.go中

  type BlockStore interface {
  AddBlock(block *common.Block)error
 GetBlockchainInfo() (*common.BlockchainInfo, error)
	RetrieveBlocks(startNum uint64) (ledger.ResultsIterator, error)
	RetrieveBlockByHash(blockHash []byte) (*common.Block, error)
	RetrieveBlockByNumber(blockNum uint64) (*common.Block, error) // blockNum of  math.MaxUint64 will return last block
	RetrieveTxByID(txID string) (*common.Envelope, error)
	RetrieveTxByBlockNumTranNum(blockNum uint64, tranNum uint64) (*common.Envelope, error)
	RetrieveBlockByTxID(txID string) (*common.Block, error)
	RetrieveTxValidationCodeByTxID(txID string) (peer.TxValidationCode, error)
	Shutdown()
}

继续看是如何写入区块的

func (mgr *blockfileMgr) addBlock(block *common.Block) error {
	if block.Header.Number != mgr.getBlockchainInfo().Height {
		return fmt.Errorf("Block number should have been %d but was %d", mgr.getBlockchainInfo().Height, block.Header.Number)
	}
	blockBytes, info, err := serializeBlock(block)
	if err != nil {
		return fmt.Errorf("Error while serializing block: %s", err)
	}
	blockHash := block.Header.Hash()
	//Get the location / offset where each transaction starts in the block and where the block ends
	txOffsets := info.txOffsets
	currentOffset := mgr.cpInfo.latestFileChunksize
	if err != nil {
		return fmt.Errorf("Error while serializing block: %s", err)
	}
	blockBytesLen := len(blockBytes)
	blockBytesEncodedLen := proto.EncodeVarint(uint64(blockBytesLen))
	totalBytesToAppend := blockBytesLen + len(blockBytesEncodedLen)

	//Determine if we need to start a new file since the size of this block
	//exceeds the amount of space left in the current file
	if currentOffset+totalBytesToAppend > mgr.conf.maxBlockfileSize {
		mgr.moveToNextFile()
		currentOffset = 0
	}
	//append blockBytesEncodedLen to the file
	err = mgr.currentFileWriter.append(blockBytesEncodedLen, false)
	if err == nil {
		//append the actual block bytes to the file
		err = mgr.currentFileWriter.append(blockBytes, true)
	}
	if err != nil {
		truncateErr := mgr.currentFileWriter.truncateFile(mgr.cpInfo.latestFileChunksize)
		if truncateErr != nil {
			panic(fmt.Sprintf("Could not truncate current file to known size after an error during block append: %s", err))
		}
		return fmt.Errorf("Error while appending block to file: %s", err)
	}

	//Update the checkpoint info with the results of adding the new block
	currentCPInfo := mgr.cpInfo
	newCPInfo := &checkpointInfo{
		latestFileChunkSuffixNum: currentCPInfo.latestFileChunkSuffixNum,
		latestFileChunksize:      currentCPInfo.latestFileChunksize + totalBytesToAppend,
		isChainEmpty:             false,
		lastBlockNumber:          block.Header.Number}
	//save the checkpoint information in the database
	if err = mgr.saveCurrentInfo(newCPInfo, false); err != nil {
		truncateErr := mgr.currentFileWriter.truncateFile(currentCPInfo.latestFileChunksize)
		if truncateErr != nil {
			panic(fmt.Sprintf("Error in truncating current file to known size after an error in saving checkpoint info: %s", err))
		}
		return fmt.Errorf("Error while saving current file info to db: %s", err)
	}

	//Index block file location pointer updated with file suffex and offset for the new block
	blockFLP := &fileLocPointer{fileSuffixNum: newCPInfo.latestFileChunkSuffixNum}
	blockFLP.offset = currentOffset
	// shift the txoffset because we prepend length of bytes before block bytes
	for _, txOffset := range txOffsets {
		txOffset.loc.offset += len(blockBytesEncodedLen)
	}
	//save the index in the database
	mgr.index.indexBlock(&blockIdxInfo{
		blockNum: block.Header.Number, blockHash: blockHash,
		flp: blockFLP, txOffsets: txOffsets, metadata: block.Metadata})

	//update the checkpoint info (for storage) and the blockchain info (for APIs) in the manager
	mgr.updateCheckpoint(newCPInfo)
	mgr.updateBlockchainInfo(blockHash, block)
	return nil
}

继续跟踪,可以看到leveldbhelper.DBHandle() 可以进行相应的数据存储。 再看

multichain.NewManagerImpl(lf, consenters, signer)

三個必要参数:账本,公式,以及签署者。 NewManagerImpl()方法主要干了什么事? ①根据账本获取所有存在的链,遍历所有的链,获取每条链对应的账本读写集,再根据读写集获取配置区块,multiLedger再根据配置区块再加载账本资源。

ledgerResources := ml.newLedgerResources(configTx)

再根据账本资源获取链id,再实例化ChainSupport对象。然后就是调用ChainSupport对象的start函数,ChainSupport对象的start函数实际调用的是Chain对象的start函数。Chain的start也即Fabric现在支持的两个共识solo和Kafka中的start函数。solo主要用于开发测试,solo中的start函数调用的最终内容如下:

func (ch *chain) main() {
	var timer <-chan time.Time

	for {
		select {
		case msg := <-ch.sendChan:
			batches, committers, ok := ch.support.BlockCutter().Ordered(msg)
			if ok && len(batches) == 0 && timer == nil {
				timer = time.After(ch.batchTimeout)
				continue
			}
			for i, batch := range batches {
				block := ch.support.CreateNextBlock(batch)
				ch.support.WriteBlock(block, committers[i], nil)
			}
			if len(batches) > 0 {
				timer = nil
			}
		case <-timer:
			//clear the timer
			timer = nil

			batch, committers := ch.support.BlockCutter().Cut()
			if len(batch) == 0 {
				logger.Warningf("Batch timer expired with no pending requests, this might indicate a bug")
				continue
			}
			logger.Debugf("Batch timer expired, creating block")
			block := ch.support.CreateNextBlock(batch)
			ch.support.WriteBlock(block, committers, nil)
		case <-ch.exitChan:
			logger.Debugf("Exiting")
			return
		}
	}
}

kafka的比较复杂,后面会有文章单独讲。至于为什么还要用ml.newLedgerResources(configTx)再去加载一遍账本资源主要是要调用configtx工具去初始化一些过滤策略。来区分是系统链还是非系统链,然后进行不同的启动。 orderer的grpc启动,

// NewServer creates an ab.AtomicBroadcastServer based on the broadcast target and ledger Reader
func NewServer(ml multichain.Manager, signer crypto.LocalSigner) ab.AtomicBroadcastServer {
	s := &server{
		dh: deliver.NewHandlerImpl(deliverSupport{Manager: ml}),
		bh: broadcast.NewHandlerImpl(broadcastSupport{
			Manager:               ml,
			ConfigUpdateProcessor: configupdate.New(ml.SystemChannelID(), configUpdateSupport{Manager: ml}, signer),
		}),
	}
	return s
}

orderer将所有peer结点通过broadcast客户端发来的消息(按照配置的大小依次封装到一个个block中并写入orderer自己的账本中,然后供各个peer结点的gossip服务通过deliver客户端来消费这个账本中的数据进行自身结点账本的同步。

转载于:https://my.oschina.net/wdt/blog/1590088

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值