ipfs, bitswap architecture

Block diagram

Take a look at the UML block diagram, copied from ipfs go-bitswap repo in github:
Copied from github go-bitswap repo

bitswap data sturcure & initialization

// Bitswap instances implement the bitswap protocol.
type Bitswap struct {
	// the wantlist tracks global wants for bitswap
	wm *bswm.WantManager
	// the provider query manager manages requests to find providers
	pqm *bspqm.ProviderQueryManager
	// the engine is the bit of logic that decides who to send which blocks to
	engine *decision.Engine
	// network delivers messages on behalf of the session
	network bsnet.BitSwapNetwork
	// blockstore is the local database
	// NB: ensure threadsafety
	blockstore blockstore.Blockstore
	// manages channels of outgoing blocks for sessions
	notif notifications.PubSub
	// newBlocks is a channel for newly added blocks to be provided to the
	// network.  blocks pushed down this channel get buffered and fed to the
	// provideKeys channel later on to avoid too much network activity
	newBlocks chan cid.Cid
	// provideKeys directly feeds provide workers
	provideKeys chan cid.Cid
	// the sessionmanager manages tracking sessions
	sm *bssm.SessionManager
	...
}

wm -> wantManager
sm -> SessionManager
pqm -> ProviderQueryManager

When initializing bitswap, wm, pqm, bs.workers, decisionEngine workers will be started as well. Moreover, provideCollector and ProvideWorker will be started when ProviderEnable is true.

	// start want manager
	bs.wm.Startup()
	// start provide query manager
	bs.pqm.Startup()
	// set stream handler for bitswap protocol. v0, v1 and no version
	// also register a network notifiee for swarm notification
	network.SetDelegate(bs)

	// Start up bitswaps async worker routines
	bs.startWorkers(ctx, px)
	engine.StartWorkers(ctx, px)

	// bind the context and process.
	// do it over here to avoid closing before all setup is done.
	go func() {
		<-px.Closing() // process closes first
		cancelFunc()
		notif.Shutdown()
	}()

bitswap Worker

Waiting for bs.engine.Outbox(), then extracting Message/Blocks, finnally bs.sendBlocks(ctx, envelope)…


func (bs *Bitswap) taskWorker(ctx context.Context, id int) {
	for {
		select {
		case nextEnvelope := <-bs.engine.Outbox():
			select {
			case envelope, ok := <-nextEnvelope:
				if !ok {
					continue
				}
				// update the BS ledger to reflect sent message
				// TODO: Should only track *useful* messages in ledger
				outgoing := bsmsg.New(false)
				for _, block := range envelope.Message.Blocks() {
					outgoing.AddBlock(block)
				}
				bs.engine.MessageSent(envelope.Peer, outgoing)
			case <-ctx.Done():
				return
			}
		case <-ctx.Done():
			return
		}
	}
}

Message

(gogoproto.nullable) = false            --> Make nil field always there
syntax = "proto3";
package bitswap.message.pb;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
message Message {
  message Wantlist {
    message Entry {
			bytes block = 1;		// the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0)
			int32 priority = 2;	// the priority (normalized). default to 1
			bool cancel = 3;		// whether this revokes an entry
		}
    repeated Entry entries = 1 [(gogoproto.nullable) = false];	// a list of wantlist entries
    bool full = 2;							// whether this is the full wantlist. default to false
  }
  message Block {
    bytes prefix = 1;		// CID prefix (cid version, multicodec and multihash prefix (type + length)
    bytes data = 2;
  }
  Wantlist wantlist = 1 [(gogoproto.nullable) = false];
  repeated bytes blocks = 2;		// used to send Blocks in bitswap 1.0.0
  repeated Block payload = 3 [(gogoproto.nullable) = false];		// used to send Blocks in bitswap 1.1.0
}

handleNewStream

func (bsnet *impl) handleNewStream(s network.Stream) {
	defer s.Close()
	if bsnet.receiver == nil {
		_ = s.Reset()
		return
	}
	reader := msgio.NewVarintReaderSize(s, network.MessageSizeMax)
	for {
		received, err := bsmsg.FromMsgReader(reader)
		if err != nil {
			if err != io.EOF {
				_ = s.Reset()
				go bsnet.receiver.ReceiveError(err)
				log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err)
			}
			return
		}

		p := s.Conn().RemotePeer()
		ctx := context.Background()
		log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer())
		bsnet.receiver.ReceiveMessage(ctx, p, received)
		atomic.AddUint64(&bsnet.stats.MessagesRecvd, 1)
	}
}

// ReceiveMessage is called by the network interface when a new message is
// received.
func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) {
	bs.counterLk.Lock()
	bs.counters.messagesRecvd++
	bs.counterLk.Unlock()

	// This call records changes to wantlists, blocks received,
	// and number of bytes transfered.
	bs.engine.MessageReceived(ctx, p, incoming)
	// TODO: this is bad, and could be easily abused.
	// Should only track *useful* messages in ledger

	iblocks := incoming.Blocks()

	if len(iblocks) == 0 {
		return
	}

	bs.updateReceiveCounters(iblocks)
	for _, b := range iblocks {
		log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p)
	}

	// Process blocks
	err := bs.receiveBlocksFrom(ctx, p, iblocks)
	if err != nil {
		log.Warningf("ReceiveMessage recvBlockFrom error: %s", err)
		return
	}
}
receiveBlocksFrom() will process the blocks.
bs.engine.MessageReceived(ctx, p, incoming) will process the message.

MessageReceived will handle the wantlist from peer.

  • Create a ledger for the peer
  • Put wantlist into the ledger
  • peerRequestQueue.PushBlock(p, activeEntries…)
  • signalNewWork to drive EngineWorker
	l := e.findOrCreate(p)
	l.lk.Lock()
	var msgSize int
	var activeEntries []peertask.Task
	for _, entry := range m.Wantlist() {
		if entry.Cancel {
			log.Debugf("%s cancel %s", p, entry.Cid)
			l.CancelWant(entry.Cid)
			e.peerRequestQueue.Remove(entry.Cid, p)
		} else {
			log.Debugf("wants %s - %d", entry.Cid, entry.Priority)
			l.Wants(entry.Cid, entry.Priority)
			blockSize, ok := blockSizes[entry.Cid]
			if ok {
				// we have the block
				newWorkExists = true
				if msgSize+blockSize > maxMessageSize {
					e.peerRequestQueue.PushBlock(p, activeEntries...)
					activeEntries = []peertask.Task{}
					msgSize = 0
				}
				activeEntries = append(activeEntries, peertask.Task{Identifier: entry.Cid, Priority: entry.Priority})
				msgSize += blockSize
			}
		}
	}
	if len(activeEntries) > 0 {
		e.peerRequestQueue.PushBlock(p, activeEntries...)
	}

Engine taskWorker

// nextEnvelope runs in the taskWorker goroutine. Returns an error if the
// context is cancelled before the next Envelope can be created.
func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) {
	for {
		nextTask := e.peerRequestQueue.PopBlock()
		for nextTask == nil {
			select {
			case <-ctx.Done():
				return nil, ctx.Err()
			case <-e.workSignal:
				nextTask = e.peerRequestQueue.PopBlock()
			case <-e.ticker.C:
				e.peerRequestQueue.ThawRound()
				nextTask = e.peerRequestQueue.PopBlock()
			}
		}

		// with a task in hand, we're ready to prepare the envelope...
		blockCids := cid.NewSet()
		for _, t := range nextTask.Tasks {
			blockCids.Add(t.Identifier.(cid.Cid))
		}
		blks, err := e.bsm.getBlocks(ctx, blockCids.Keys())
		if err != nil {
			// we're dropping the envelope but that's not an issue in practice.
			return nil, err
		}

		msg := bsmsg.New(true)
		for _, b := range blks {
			msg.AddBlock(b)
		}

		if msg.Empty() {
			// If we don't have the block, don't hold that against the peer
			// make sure to update that the task has been 'completed'
			nextTask.Done(nextTask.Tasks)
			continue
		}

		return &Envelope{
			Peer:    nextTask.Target,
			Message: msg,
			Sent: func() {
				nextTask.Done(nextTask.Tasks)
				select {
				case e.workSignal <- struct{}{}:
					// work completing may mean that our queue will provide new
					// work to be done.
				default:
				}
			},
		}, nil
	}
}

tasks will be popped from the queue, and get handled by e.bsm.getBlocks(ctx, blockCids.Keys()). An envelop message will be returned along with the requested block data.

BlockstoreManager of engine

Bsm is started by engine. The default number of workers is 128!!!

func (bsm *blockstoreManager) start(px process.Process) {
	bsm.px = px

	// Start up workers
	for i := 0; i < bsm.workerCount; i++ {
		px.Go(func(px process.Process) {
			bsm.worker()
		})
	}
}

Per cid of requested blocks, bsm add a job to do bsm.bs.Get(cid) in parellel. (It can handle 128 requests in a batch)

func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) (map[cid.Cid]blocks.Block, error) {
	res := make(map[cid.Cid]blocks.Block)
	if len(ks) == 0 {
		return res, nil
	}
	var lk sync.Mutex
	return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) {
		blk, err := bsm.bs.Get(c)
		if err != nil {
			if err != bstore.ErrNotFound {
				// Note: this isn't a fatal error. We shouldn't abort the request
				log.Errorf("blockstore.Get(%s) error: %s", c, err)
			}
		} else {
			lk.Lock()
			res[c] = blk
			lk.Unlock()
		}
	})
}
func (bsm *blockstoreManager) worker() {
	for {
		select {
		case <-bsm.px.Closing():
			return
		case job := <-bsm.jobs:
			job()
		}
	}
}

WantManager

WM is Simple, the task of wm is shown below. It will take message from wm/wantMessages.

func (wm *WantManager) run() {
	// NOTE: Do not open any streams or connections from anywhere in this
	// event loop. Really, just don't do anything likely to block.
	for {
		select {
		case message := <-wm.wantMessages:
			message.handle(wm)
		case <-wm.ctx.Done():
			return
		}
	}
}

Availble messages:

  • wantSet
  • currentBroadcastWantsMessage
  • currentWantsMessage
  • wantCountMessage
  • connectedMessage
  • disconnectedMessage

SessionManager & Session

SessionManager is responsible for managing session’s lifecycle, NewSession/removeSession.

A session will be created by bitswap.NewSession(), in case that we do some block requests.


// Session holds state for an individual bitswap transfer operation.
// This allows bitswap to make smarter decisions about who to send wantlist
// info to, and who to request blocks from.
type Session struct {
	// dependencies
	ctx context.Context
	wm  WantManager
	pm  PeerManager
	srs RequestSplitter

	sw sessionWants

	// channels
	incoming      chan op
	latencyReqs   chan chan time.Duration
	tickDelayReqs chan time.Duration

	// do not touch outside run loop
	idleTick            *time.Timer
	periodicSearchTimer *time.Timer
	baseTickDelay       time.Duration
	latTotal            time.Duration
	fetchcnt            int
	consecutiveTicks    int
	initialSearchDelay  time.Duration
	periodicSearchDelay delay.D
	// identifiers
	notif notifications.PubSub
	uuid  logging.Loggable
	id    uint64
}

type SessionManager struct {
	ctx                    context.Context
	sessionFactory         SessionFactory
	peerManagerFactory     PeerManagerFactory
	requestSplitterFactory RequestSplitterFactory
	notif                  notifications.PubSub

	// Sessions
	sessLk   sync.RWMutex
	sessions []sesTrk

	// Session Index
	sessIDLk sync.Mutex
	sessID   uint64
}

peerManagerFactory & requestSplitterFactory are used in NewSession. So, whenever a session is created, a peerManager and a requestSplitter will be created as well. All together, session, peerMgr and splitter will grouped as a tracked Session.

Also, a routine is created to safe guard the context cancellation.

// NewSession initializes a session with the given context, and adds to the
// session manager.
func (sm *SessionManager) NewSession(ctx context.Context,
	provSearchDelay time.Duration,
	rebroadcastDelay delay.D) exchange.Fetcher {
	id := sm.GetNextSessionID()
	sessionctx, cancel := context.WithCancel(ctx)

	pm := sm.peerManagerFactory(sessionctx, id)
	srs := sm.requestSplitterFactory(sessionctx)
	session := sm.sessionFactory(sessionctx, id, pm, srs, sm.notif, provSearchDelay, rebroadcastDelay)
	tracked := sesTrk{session, pm, srs}
	sm.sessLk.Lock()
	sm.sessions = append(sm.sessions, tracked)
	sm.sessLk.Unlock()
	go func() {
		defer cancel()
		select {
		case <-sm.ctx.Done():
			sm.removeSession(tracked)
		case <-ctx.Done():
			sm.removeSession(tracked)
		}
	}()
	return session
}

SessionPeerManager

A session will create a PeerManager for managing peers.

// SessionPeerManager tracks and manages peers for a session, and provides
// the best ones to the session
type SessionPeerManager struct {
	ctx            context.Context
	tagger         PeerTagger
	providerFinder PeerProviderFinder
	tag            string
	id             uint64

	peerMessages chan peerMessage

	// do not touch outside of run loop
	activePeers         map[peer.ID]*peerData
	unoptimizedPeersArr []peer.ID
	optimizedPeersArr   []peer.ID
	broadcastLatency    *latencyTracker
	timeoutDuration     time.Duration
}

SessionPeerManager will start a loop to receiving message:

func (spm *SessionPeerManager) run(ctx context.Context) {
	for {
		select {
		case pm := <-spm.peerMessages:
			pm.handle(spm)
		case <-ctx.Done():
			spm.handleShutdown()
			return
		}
	}
}

Message types:

  • peerFoundMessage
  • peerResponseMessage
  • peerRequestMessage
  • getPeersMessage
  • cancelMessage
  • peerTimeoutMessage
  • broadcastTimeoutMessage
  • setTimeoutMessage

peerManagerFactory & requestSplitterFactory

Actually these two look like this:

sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.PeerManager {
	return bsspm.New(ctx, id, network.ConnectionManager(), pqm)
}
sessionRequestSplitterFactory := func(ctx context.Context) bssession.RequestSplitter {
	return bssrs.New(ctx)
}

GetBlock message flow

Session.GetBlocks -> getter.AsyncGetBlocks : subcribe then send the want request and wait for incoming in a routine

notif.Subscribe(ctx, keys...)
s.incoming <- op{op: opWant, keys: keys}:

Session.GetBlocks will wait for any requested blocks, unless the operation is cancelled or context expired.

func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid, notif notifications.PubSub,
	want WantFunc, cwants func([]cid.Cid)) (<-chan blocks.Block, error) {

	// Use a PubSub notifier to listen for incoming blocks for each key
	remaining := cid.NewSet()
	promise := notif.Subscribe(ctx, keys...)
	for _, k := range keys {
		log.Event(ctx, "Bitswap.GetBlockRequest.Start", k)
		remaining.Add(k)
	}

	// Send the want request for the keys to the network
	want(ctx, keys)

	out := make(chan blocks.Block)
	go handleIncoming(ctx, sessctx, remaining, promise, out, cwants)
	return out, nil
}

want():

		func(ctx context.Context, keys []cid.Cid) {
			select {
			case s.incoming <- op{op: opWant, keys: keys}:
			case <-ctx.Done():
			case <-s.ctx.Done():
			}
		},

opWant

opWant msg will be handled by Session.wantBlocks.

func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) {
	// Given the want limit and any newly received blocks, get as many wants as
	// we can to send out
	ks := s.sw.GetNextWants(s.wantLimit(), newks)
	if len(ks) == 0 {
		return
	}

	peers := s.pm.GetOptimizedPeers()
	if len(peers) > 0 {
		splitRequests := s.srs.SplitRequest(peers, ks)
		for _, splitRequest := range splitRequests {
			s.pm.RecordPeerRequests(splitRequest.Peers, splitRequest.Keys)
			s.wm.WantBlocks(ctx, splitRequest.Keys, splitRequest.Peers, s.id)
		}
	} else {
		s.pm.RecordPeerRequests(nil, ks)
		s.wm.WantBlocks(ctx, ks, nil, s.id)
	}
}

Basically, request will be handled like this:

s.pm.RecordPeerRequests(nil, ks)
s.wm.WantBlocks(ctx, ks, nil, s.id)

SessionPeerManager records the peer request: peerRequestMessage-> s.pm
WantManager adds new enries: wantSet -> wm

wantSet

Each cid will be treated as an entry and inserted into either broadcast wantlist or a regular wantlist, depending on len(ws.targets) == 0, which means if SessionPeerManager has any OptimizedPeers. See above.

Wantlist is organized by, firstly Cid itself, then session id which is a unique integer assigned on Session creation. As a result, the same Cid but wanted by defferent session will be tracked respectively.

type SessionTrackedWantlist struct {
	set map[cid.Cid]*sessionTrackedEntry
}
type sessionTrackedEntry struct {
	Entry
	sesTrk map[uint64]struct{}
}
func (ws *wantSet) handle(wm *WantManager) {
	// is this a broadcast or not?
	brdc := len(ws.targets) == 0

	// add changes to our wantlist
	for _, e := range ws.entries {
		if e.Cancel {
			if brdc {
				wm.bcwl.Remove(e.Cid, ws.from)
			}

			if wm.wl.Remove(e.Cid, ws.from) {
				wm.wantlistGauge.Dec()
			}
		} else {
			if brdc {
				wm.bcwl.AddEntry(e.Entry, ws.from)
			}
			if wm.wl.AddEntry(e.Entry, ws.from) {
				wm.wantlistGauge.Inc()
			}
		}
	}

	// broadcast those wantlist changes
	wm.peerHandler.SendMessage(ws.entries, ws.targets, ws.from)
}

In the end, WantManager triggers peerHandler for further processing…

PeerHandler

If len(tgargets) >0, do a broadcast, otherwise, send to peers specified by targets.

func (pm *PeerManager) SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64) {
	if len(targets) == 0 {
		for _, p := range pm.peerQueues {
			p.pq.AddMessage(entries, from)
		}
	} else {
		for _, t := range targets {
			pqi := pm.getOrCreate(t)
			pqi.pq.AddMessage(entries, from)
		}
	}
}
// entries will be added to MessageQueue's wantlist, then kick outgoing channel... 
func (mq *MessageQueue) AddMessage(entries []bsmsg.Entry, ses uint64) {
	if !mq.addEntries(entries, ses) {
		return
	}
	select {
	case mq.outgoingWork <- struct{}{}:
	default:
	}
}

PeerQueue

func (mq *MessageQueue) runQueue() {
	for {
		select {
		case <-mq.rebroadcastTimer.C:
			mq.rebroadcastWantlist()
		case <-mq.outgoingWork:
			mq.sendMessage()
		case <-mq.done:
			if mq.sender != nil {
				mq.sender.Close()
			}
			return
		case <-mq.ctx.Done():
			if mq.sender != nil {
				_ = mq.sender.Reset()
			}
			return
		}
	}
}
// Make a sender 
func (mq *MessageQueue) sendMessage() {
	message := mq.extractOutgoingMessage()
	if message == nil || message.Empty() {
		return
	}
	err := mq.initializeSender()
	if err != nil {
		log.Infof("cant open message sender to peer %s: %s", mq.p, err)
		// TODO: cant connect, what now?
		return
	}

	for i := 0; i < maxRetries; i++ { // try to send this message until we fail.
		if mq.attemptSendAndRecovery(message) {
			return
		}
	}
}

Making sender means creating a bitswap stream towards the peer

Session idle Query

Session will query DHT for more peers of block poviders in an idel ticker basis.

s.pm.FindMorePeers(ctx, live[0])

A idel ticker to periodically propagate the live list of want blocks.

func (s *Session) handleIdleTick(ctx context.Context) {
	live := s.sw.PrepareBroadcast()
	// Broadcast these keys to everyone we're connected to
	s.pm.RecordPeerRequests(nil, live)
	s.wm.WantBlocks(ctx, live, nil, s.id)

	// do no find providers on consecutive ticks
	// -- just rely on periodic search widening
	if len(live) > 0 && (s.consecutiveTicks == 0) {
		s.pm.FindMorePeers(ctx, live[0])
	}
	s.resetIdleTick()
	if s.sw.HasLiveWants() {
		s.consecutiveTicks++
	}
}

ProviderQueryManager

s.pm.FindMorePeers() will then invoke PQM for FindProvider, by sending a message to ProviderQueryManager.

Msg = newProvideQueryMessage
func (spm *SessionPeerManager) FindMorePeers(ctx context.Context, c cid.Cid) {
	go func(k cid.Cid) {
		for p := range spm.providerFinder.FindProvidersAsync(ctx, k) {

			select {
			case spm.peerMessages <- &peerFoundMessage{p}:
			case <-ctx.Done():
			case <-spm.ctx.Done():
			}
		}
	}(c)
}

// FindProvidersAsync finds providers for the given block.
func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, k cid.Cid) <-chan peer.ID {
	inProgressRequestChan := make(chan inProgressRequest)

	select {
	case pqm.providerQueryMessages <- &newProvideQueryMessage{
		k:                     k,
		inProgressRequestChan: inProgressRequestChan,
	}:
	case <-pqm.ctx.Done():
		ch := make(chan peer.ID)
		close(ch)
		return ch
	case <-sessionCtx.Done():
		ch := make(chan peer.ID)
		close(ch)
		return ch
	}

	// DO NOT select on sessionCtx. We only want to abort here if we're
	// shutting down because we can't actually _cancel_ the request till we
	// get to receiveProviders.
	var receivedInProgressRequest inProgressRequest
	select {
	case <-pqm.ctx.Done():
		ch := make(chan peer.ID)
		close(ch)
		return ch
	case receivedInProgressRequest = <-inProgressRequestChan:
	}

	return pqm.receiveProviders(sessionCtx, k, receivedInProgressRequest)
}

PQM internal

PQM is started when bitswap is initialized.

	...
	bs.wm.Startup()
	bs.pqm.Startup()
	network.SetDelegate(bs)

	// Start up bitswaps async worker routines
	bs.startWorkers(ctx, px)
	engine.StartWorkers(ctx, px)

PQM is a message driven system, similar with WantManager and SessionPeerManager
PQM starts a providerRequestBufferWorker for buffering outgoingRequest

func (pqm *ProviderQueryManager) run() {
	defer pqm.cleanupInProcessRequests()

	go pqm.providerRequestBufferWorker()
	for i := 0; i < maxInProcessRequests; i++ {
		go pqm.findProviderWorker()
	}

	for {
		select {
		case nextMessage := <-pqm.providerQueryMessages:
			log.Debug(nextMessage.debugMessage())
			nextMessage.handle(pqm)
		case <-pqm.ctx.Done():
			return
		}
	}
}
  • newProvideQueryMessage
  • receivedProviderMessage
  • finishedProviderQueryMessage
  • cancelRequestMessage
Procedure:
  1. newProvideQueryMessage is externally invoked by SessionPeerManager.FindMorePeer
  2. When handling newProvideQueryMessage in PQM worker, PQM will send findProviderRequest message to providerRequestBufferWorker()
  3. providerRequestBufferWorker do a queuing, pumping findProviderRequest message to findProviderWorker
  4. findProviderWorker will do DHT FindProvidersAsync(findProviderCtx, k, maxProviders)
  5. PQM will try to connect each provider returned by DHT
  6. After each connection establishment, receivedProviderMessage will be sent to PQM
  7. In the end, finishedProviderQueryMessage will be sent to PQM

As I understand, FindProvider could take a long time. In order to provide Query service, PQM makes a buffer mechanism between itself and DHT. THis is why it creates 6 findProviderWorker and a providerRequestBufferWorker for queuing all newProviderQueryMessage…

Eventually, in FindMorePeers(), peerFoundMessage along with the peer ID will be sent to SessionPeerManager

In SessionPeerManager, peers will be added to spm.activePeers, and unoptimized peer array

func (pfm *peerFoundMessage) handle(spm *SessionPeerManager) {
	p := pfm.p
	if _, ok := spm.activePeers[p]; !ok {
		spm.activePeers[p] = newPeerData()
		spm.insertPeer(p, spm.activePeers[p])
		spm.tagPeer(p, spm.activePeers[p])
	}
}

Session.GetBlock Flow

Session WantManager PeerHandler MessageQueue SPM PQM opWant [cids] wm.WantBlocks {wantSet [cids]} peerHandler.SendMessage 1. broadcast to all connected peers or 2. talk to peers specified with cids bitswap NewStream, Send bitswap Message Some real stuff idleTicker FindMorePeers newProvideQueryMessage receivedProviderMessage finishedProviderQueryMessage peerFoundMessage [cids], update activePeers[] Session WantManager PeerHandler MessageQueue SPM PQM

SPM=SessionPeerManager
PQM=ProvideQueryManager

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值