以太坊原始碼分析(37)eth以太坊協議分析
node中的服務的定義, eth其實就是實現了一個服務。
type Service interface {
// Protocols retrieves the P2P protocols the service wishes to start.
Protocols() []p2p.Protocol
// APIs retrieves the list of RPC descriptors the service provides
APIs() []rpc.API
// Start is called after all services have been constructed and the networking
// layer was also initialized to spawn any goroutines required by the service.
Start(server *p2p.Server) error
// Stop terminates all goroutines belonging to the service, blocking until they
// are all terminated.
Stop() error
}
go ethereum 的eth目錄是以太坊服務的實現。 以太坊協議是通過node的Register方法注入的。
// RegisterEthService adds an Ethereum client to the stack.
func RegisterEthService(stack *node.Node, cfg *eth.Config) {
var err error
if cfg.SyncMode == downloader.LightSync {
err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
return les.New(ctx, cfg)
})
} else {
err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
fullNode, err := eth.New(ctx, cfg)
if fullNode != nil && cfg.LightServ > 0 {
ls, _ := les.NewLesServer(fullNode, cfg)
fullNode.AddLesServer(ls)
}
return fullNode, err
})
}
if err != nil {
Fatalf("Failed to register the Ethereum service: %v", err)
}
}
以太坊協議的資料結構
// Ethereum implements the Ethereum full node service.
type Ethereum struct {
config *Config 配置
chainConfig *params.ChainConfig 鏈配置
// Channel for shutting down the service
shutdownChan chan bool // Channel for shutting down the ethereum
stopDbUpgrade func() error // stop chain db sequential key upgrade
// Handlers
txPool *core.TxPool 交易池
blockchain *core.BlockChain 區塊鏈
protocolManager *ProtocolManager 協議管理
lesServer LesServer 輕量級客戶端伺服器
// DB interfaces
chainDb ethdb.Database // Block chain database 區塊鏈資料庫
eventMux *event.TypeMux
engine consensus.Engine 一致性引擎。 應該是Pow部分
accountManager *accounts.Manager 賬號管理
bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests 接收bloom過濾器資料請求的通道
bloomIndexer *core.ChainIndexer // Bloom indexer operating during block imports //在區塊import的時候執行Bloom indexer操作 暫時不清楚是什麼
ApiBackend *EthApiBackend //提供給RPC服務使用的API後端
miner *miner.Miner //礦工
gasPrice *big.Int //節點接收的gasPrice的最小值。 比這個值更小的交易會被本節點拒絕
etherbase common.Address //礦工地址
networkId uint64 //網路ID testnet是0 mainnet是1
netRPCService *ethapi.PublicNetAPI //RPC的服務
lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase)
}
以太坊協議的建立New. 暫時先不涉及core的內容。 只是大概介紹一下。 core裡面的內容後續會分析。
// New creates a new Ethereum object (including the
// initialisation of the common Ethereum object)
func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if config.SyncMode == downloader.LightSync {
return nil, errors.New("can't run eth.Ethereum in light sync mode, use les.LightEthereum")
}
if !config.SyncMode.IsValid() {
return nil, fmt.Errorf("invalid sync mode %d", config.SyncMode)
}
// 建立leveldb。 開啟或者新建 chaindata目錄
chainDb, err := CreateDB(ctx, config, "chaindata")
if err != nil {
return nil, err
}
// 資料庫格式升級
stopDbUpgrade := upgradeDeduplicateData(chainDb)
// 設定創世區塊。 如果資料庫裡面已經有創世區塊那麼從資料庫裡面取出(私鏈)。或者是從程式碼裡面獲取預設值。
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis)
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
return nil, genesisErr
}
log.Info("Initialised chain configuration", "config", chainConfig)
eth := &Ethereum{
config: config,
chainDb: chainDb,
chainConfig: chainConfig,
eventMux: ctx.EventMux,
accountManager: ctx.AccountManager,
engine: CreateConsensusEngine(ctx, config, chainConfig, chainDb), // 一致性引擎。 這裡我理解是Pow
shutdownChan: make(chan bool),
stopDbUpgrade: stopDbUpgrade,
networkId: config.NetworkId, // 網路ID用來區別網路。 測試網路是0.主網是1
gasPrice: config.GasPrice, // 可以通過配置 --gasprice 客戶端接納的交易的gasprice最小值。如果小於這個值那麼會被節點丟棄。
etherbase: config.Etherbase, //挖礦的受益者
bloomRequests: make(chan chan *bloombits.Retrieval), //bloom的請求
bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks),
}
log.Info("Initialising Ethereum protocol", "versions", ProtocolVersions, "network", config.NetworkId)
if !config.SkipBcVersionCheck { // 檢查資料庫裡面儲存的BlockChainVersion和客戶端的BlockChainVersion的版本是否一致
bcVersion := core.GetBlockChainVersion(chainDb)
if bcVersion != core.BlockChainVersion && bcVersion != 0 {
return nil, fmt.Errorf("Blockchain DB version mismatch (%d / %d). Run geth upgradedb.\n", bcVersion, core.BlockChainVersion)
}
core.WriteBlockChainVersion(chainDb, core.BlockChainVersion)
}
vmConfig := vm.Config{EnablePreimageRecording: config.EnablePreimageRecording}
// 使用資料庫建立了區塊鏈
eth.blockchain, err = core.NewBlockChain(chainDb, eth.chainConfig, eth.engine, vmConfig)
if err != nil {
return nil, err
}
// Rewind the chain in case of an incompatible config upgrade.
if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
log.Warn("Rewinding chain to upgrade configuration", "err", compat)
eth.blockchain.SetHead(compat.RewindTo)
core.WriteChainConfig(chainDb, genesisHash, chainConfig)
}
// bloomIndexer 暫時不知道是什麼東西 這裡面涉及得也不是很多。 暫時先不管了
eth.bloomIndexer.Start(eth.blockchain.CurrentHeader(), eth.blockchain.SubscribeChainEvent)
if config.TxPool.Journal != "" {
config.TxPool.Journal = ctx.ResolvePath(config.TxPool.Journal)
}
// 建立交易池。 用來儲存本地或者在網路上接收到的交易。
eth.txPool = core.NewTxPool(config.TxPool, eth.chainConfig, eth.blockchain)
// 建立協議管理器
if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.SyncMode, config.NetworkId, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb); err != nil {
return nil, err
}
// 建立礦工
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine)
eth.miner.SetExtra(makeExtraData(config.ExtraData))
// ApiBackend 用於給RPC呼叫提供後端支援
eth.ApiBackend = &EthApiBackend{eth, nil}
// gpoParams GPO Gas Price Oracle 的縮寫。 GasPrice預測。 通過最近的交易來預測當前的GasPrice的值。這個值可以作為之後傳送交易的費用的參考。
gpoParams := config.GPO
if gpoParams.Default == nil {
gpoParams.Default = config.GasPrice
}
eth.ApiBackend.gpo = gasprice.NewOracle(eth.ApiBackend, gpoParams)
return eth, nil
}
ApiBackend 定義在 api_backend.go檔案中。 封裝了一些函式。
// EthApiBackend implements ethapi.Backend for full nodes
type EthApiBackend struct {
eth *Ethereum
gpo *gasprice.Oracle
}
func (b *EthApiBackend) SetHead(number uint64) {
b.eth.protocolManager.downloader.Cancel()
b.eth.blockchain.SetHead(number)
}
New方法中除了core中的一些方法, 有一個ProtocolManager的物件在以太坊協議中比較重要, 以太坊本來是一個協議。ProtocolManager中又可以管理多個以太坊的子協議。
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
// Create the protocol manager with the base fields
manager := &ProtocolManager{
networkId: networkId,
eventMux: mux,
txpool: txpool,
blockchain: blockchain,
chaindb: chaindb,
chainconfig: config,
peers: newPeerSet(),
newPeerCh: make(chan *peer),
noMorePeers: make(chan struct{}),
txsyncCh: make(chan *txsync),
quitSync: make(chan struct{}),
}
// Figure out whether to allow fast sync or not
if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 {
log.Warn("Blockchain not empty, fast sync disabled")
mode = downloader.FullSync
}
if mode == downloader.FastSync {
manager.fastSync = uint32(1)
}
// Initiate a sub-protocol for every implemented version we can handle
manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
for i, version := range ProtocolVersions {
// Skip protocol version if incompatible with the mode of operation
if mode == downloader.FastSync && version < eth63 {
continue
}
// Compatible; initialise the sub-protocol
version := version // Closure for the run
manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
Name: ProtocolName,
Version: version,
Length: ProtocolLengths[i],
// 還記得p2p裡面的Protocol麼。 p2p的peer連線成功之後會呼叫Run方法
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := manager.newPeer(int(version), p, rw)
select {
case manager.newPeerCh <- peer:
manager.wg.Add(1)
defer manager.wg.Done()
return manager.handle(peer)
case <-manager.quitSync:
return p2p.DiscQuitting
}
},
NodeInfo: func() interface{} {
return manager.NodeInfo()
},
PeerInfo: func(id discover.NodeID) interface{} {
if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
return p.Info()
}
return nil
},
})
}
if len(manager.SubProtocols) == 0 {
return nil, errIncompatibleConfig
}
// Construct the different synchronisation mechanisms
// downloader是負責從其他的peer來同步自身資料。
// downloader是全鏈同步工具
manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer)
// validator 是使用一致性引擎來驗證區塊頭的函式
validator := func(header *types.Header) error {
return engine.VerifyHeader(blockchain, header, true)
}
// 返回區塊高度的函式
heighter := func() uint64 {
return blockchain.CurrentBlock().NumberU64()
}
// 如果fast sync開啟了。 那麼不會呼叫inserter。
inserter := func(blocks types.Blocks) (int, error) {
// If fast sync is running, deny importing weird blocks
if atomic.LoadUint32(&manager.fastSync) == 1 {
log.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
return 0, nil
}
// 設定開始接收交易
atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
// 插入區塊
return manager.blockchain.InsertChain(blocks)
}
// 生成一個fetcher
// Fetcher負責積累來自各個peer的區塊通知並安排進行檢索。
manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
return manager, nil
}
服務的APIs()方法會返回服務暴露的RPC方法。
// APIs returns the collection of RPC services the ethereum package offers.
// NOTE, some of these services probably need to be moved to somewhere else.
func (s *Ethereum) APIs() []rpc.API {
apis := ethapi.GetAPIs(s.ApiBackend)
// Append any APIs exposed explicitly by the consensus engine
apis = append(apis, s.engine.APIs(s.BlockChain())...)
// Append all the local APIs and return
return append(apis, []rpc.API{
{
Namespace: "eth",
Version: "1.0",
Service: NewPublicEthereumAPI(s),
Public: true,
},
...
, {
Namespace: "net",
Version: "1.0",
Service: s.netRPCService,
Public: true,
},
}...)
}
服務的Protocols方法會返回服務提供了那些p2p的Protocol。 返回協議管理器裡面的所有SubProtocols. 如果有lesServer那麼還提供lesServer的Protocol。可以看到。所有的網路功能都是通過Protocol的方式提供出來的。
// Protocols implements node.Service, returning all the currently configured
// network protocols to start.
func (s *Ethereum) Protocols() []p2p.Protocol {
if s.lesServer == nil {
return s.protocolManager.SubProtocols
}
return append(s.protocolManager.SubProtocols, s.lesServer.Protocols()...)
}
Ethereum服務在建立之後,會被呼叫服務的Start方法。下面我們來看看Start方法
// Start implements node.Service, starting all internal goroutines needed by the
// Ethereum protocol implementation.
func (s *Ethereum) Start(srvr *p2p.Server) error {
// Start the bloom bits servicing goroutines
// 啟動布隆過濾器請求處理的goroutine TODO
s.startBloomHandlers()
// Start the RPC service
// 建立網路的API net
s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.NetVersion())
// Figure out a max peers count based on the server limits
maxPeers := srvr.MaxPeers
if s.config.LightServ > 0 {
maxPeers -= s.config.LightPeers
if maxPeers < srvr.MaxPeers/2 {
maxPeers = srvr.MaxPeers / 2
}
}
// Start the networking layer and the light server if requested
// 啟動協議管理器
s.protocolManager.Start(maxPeers)
if s.lesServer != nil {
// 如果lesServer不為nil 啟動它。
s.lesServer.Start(srvr)
}
return nil
}
協議管理器的資料結構
type ProtocolManager struct {
networkId uint64
fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
txpool txPool
blockchain *core.BlockChain
chaindb ethdb.Database
chainconfig *params.ChainConfig
maxPeers int
downloader *downloader.Downloader
fetcher *fetcher.Fetcher
peers *peerSet
SubProtocols []p2p.Protocol
eventMux *event.TypeMux
txCh chan core.TxPreEvent
txSub event.Subscription
minedBlockSub *event.TypeMuxSubscription
// channels for fetcher, syncer, txsyncLoop
newPeerCh chan *peer
txsyncCh chan *txsync
quitSync chan struct{}
noMorePeers chan struct{}
// wait group is used for graceful shutdowns during downloading
// and processing
wg sync.WaitGroup
}
協議管理器的Start方法。這個方法裡面啟動了大量的goroutine用來處理各種事務,可以推測,這個類應該是以太坊服務的主要實現類。
func (pm *ProtocolManager) Start(maxPeers int) {
pm.maxPeers = maxPeers
// broadcast transactions
// 廣播交易的通道。 txCh會作為txpool的TxPreEvent訂閱通道。txpool有了這種訊息會通知給這個txCh。 廣播交易的goroutine會把這個訊息廣播出去。
pm.txCh = make(chan core.TxPreEvent, txChanSize)
// 訂閱的回執
pm.txSub = pm.txpool.SubscribeTxPreEvent(pm.txCh)
// 啟動廣播的goroutine
go pm.txBroadcastLoop()
// broadcast mined blocks
// 訂閱挖礦訊息。當新的Block被挖出來的時候會產生訊息。 這個訂閱和上面的那個訂閱採用了兩種不同的模式,這種是標記為Deprecated的訂閱方式。
pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
// 挖礦廣播 goroutine 當挖出來的時候需要儘快的廣播到網路上面去。
go pm.minedBroadcastLoop()
// start sync handlers
// 同步器負責週期性地與網路同步,下載雜湊和塊以及處理通知處理程式。
go pm.syncer()
// txsyncLoop負責每個新連線的初始事務同步。 當新的peer出現時,我們轉發所有當前待處理的事務。 為了最小化出口頻寬使用,我們一次只傳送一個小包。
go pm.txsyncLoop()
}
當p2p的server啟動的時候,會主動的找節點去連線,或者被其他的節點連線。 連線的過程是首先進行加密通道的握手,然後進行協議的握手。 最後為每個協議啟動goroutine 執行Run方法來把控制交給最終的協議。 這個run方法首先建立了一個peer物件,然後呼叫了handle方法來處理這個peer
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := manager.newPeer(int(version), p, rw)
select {
case manager.newPeerCh <- peer: //把peer傳送到newPeerCh通道
manager.wg.Add(1)
defer manager.wg.Done()
return manager.handle(peer) // 呼叫handlo方法
case <-manager.quitSync:
return p2p.DiscQuitting
}
},
handle方法,
// handle is the callback invoked to manage the life cycle of an eth peer. When
// this function terminates, the peer is disconnected.
// handle是一個回撥方法,用來管理eth的peer的生命週期管理。 當這個方法退出的時候,peer的連線也會斷開。
func (pm *ProtocolManager) handle(p *peer) error {
if pm.peers.Len() >= pm.maxPeers {
return p2p.DiscTooManyPeers
}
p.Log().Debug("Ethereum peer connected", "name", p.Name())
// Execute the Ethereum handshake
td, head, genesis := pm.blockchain.Status()
// td是total difficult, head是當前的區塊頭,genesis是創世區塊的資訊。 只有創世區塊相同才能握手成功。
if err := p.Handshake(pm.networkId, td, head, genesis); err != nil {
p.Log().Debug("Ethereum handshake failed", "err", err)
return err
}
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
rw.Init(p.version)
}
// Register the peer locally
// 把peer註冊到本地
if err := pm.peers.Register(p); err != nil {
p.Log().Error("Ethereum peer registration failed", "err", err)
return err
}
defer pm.removePeer(p.id)
// Register the peer in the downloader. If the downloader considers it banned, we disconnect
// 把peer註冊給downloader. 如果downloader認為這個peer被禁,那麼斷開連線。
if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil {
return err
}
// Propagate existing transactions. new transactions appearing
// after this will be sent via broadcasts.
// 把當前pending的交易傳送給對方,這個只在連線剛建立的時候發生
pm.syncTransactions(p)
// If we're DAO hard-fork aware, validate any remote peer with regard to the hard-fork
// 驗證peer的DAO硬分叉
if daoBlock := pm.chainconfig.DAOForkBlock; daoBlock != nil {
// Request the peer's DAO fork header for extra-data validation
if err := p.RequestHeadersByNumber(daoBlock.Uint64(), 1, 0, false); err != nil {
return err
}
// Start a timer to disconnect if the peer doesn't reply in time
// 如果15秒內沒有接收到迴應。那麼斷開連線。
p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() {
p.Log().Debug("Timed out DAO fork-check, dropping")
pm.removePeer(p.id)
})
// Make sure it's cleaned up if the peer dies off
defer func() {
if p.forkDrop != nil {
p.forkDrop.Stop()
p.forkDrop = nil
}
}()
}
// main loop. handle incoming messages.
// 主迴圈。 處理進入的訊息。
for {
if err := pm.handleMsg(p); err != nil {
p.Log().Debug("Ethereum message handling failed", "err", err)
return err
}
}
}
Handshake
// Handshake executes the eth protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks.
func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash) error {
// Send out own handshake in a new thread
// error的channel的大小是2, 就是為了一次性處理下面的兩個goroutine方法
errc := make(chan error, 2)
var status statusData // safe to read after two values have been received from errc
go func() {
errc <- p2p.Send(p.rw, StatusMsg, &statusData{
ProtocolVersion: uint32(p.version),
NetworkId: network,
TD: td,
CurrentBlock: head,
GenesisBlock: genesis,
})
}()
go func() {
errc <- p.readStatus(network, &status, genesis)
}()
timeout := time.NewTimer(handshakeTimeout)
defer timeout.Stop()
// 如果接收到任何一個錯誤(傳送,接收),或者是超時, 那麼就斷開連線。
for i := 0; i < 2; i++ {
select {
case err := <-errc:
if err != nil {
return err
}
case <-timeout.C:
return p2p.DiscReadTimeout
}
}
p.td, p.head = status.TD, status.CurrentBlock
return nil
}
readStatus,檢查對端返回的各種情況,
func (p *peer) readStatus(network uint64, status *statusData, genesis common.Hash) (err error) {
msg, err := p.rw.ReadMsg()
if err != nil {
return err
}
if msg.Code != StatusMsg {
return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
}
if msg.Size > ProtocolMaxMsgSize {
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
}
// Decode the handshake and make sure everything matches
if err := msg.Decode(&status); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
if status.GenesisBlock != genesis {
return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", status.GenesisBlock[:8], genesis[:8])
}
if status.NetworkId != network {
return errResp(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, network)
}
if int(status.ProtocolVersion) != p.version {
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version)
}
return nil
}
Register 簡單的把peer加入到自己的peers的map
// Register injects a new peer into the working set, or returns an error if the
// peer is already known.
func (ps *peerSet) Register(p *peer) error {
ps.lock.Lock()
defer ps.lock.Unlock()
if ps.closed {
return errClosed
}
if _, ok := ps.peers[p.id]; ok {
return errAlreadyRegistered
}
ps.peers[p.id] = p
return nil
}
經過一系列的檢查和握手之後, 迴圈的呼叫了handleMsg方法來處理事件迴圈。 這個方法很長,主要是處理接收到各種訊息之後的應對措施。
// handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is turn down upon returning any error.
func (pm *ProtocolManager) handleMsg(p *peer) error {
// Read the next message from the remote peer, and ensure it's fully consumed
msg, err := p.rw.ReadMsg()
if err != nil {
return err
}
if msg.Size > ProtocolMaxMsgSize {
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
}
defer msg.Discard()
// Handle the message depending on its contents
switch {
case msg.Code == StatusMsg:
// Status messages should never arrive after the handshake
// StatusMsg應該在HandleShake階段接收到。 經過了HandleShake之後是不應該接收到這種訊息的。
return errResp(ErrExtraStatusMsg, "uncontrolled status message")
// Block header query, collect the requested headers and reply
// 接收到請求區塊頭的訊息, 會根據請求返回區塊頭資訊。
case msg.Code == GetBlockHeadersMsg:
// Decode the complex header query
var query getBlockHeadersData
if err := msg.Decode(&query); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
hashMode := query.Origin.Hash != (common.Hash{})
// Gather headers until the fetch or network limits is reached
var (
bytes common.StorageSize
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
// Retrieve the next header satisfying the query
var origin *types.Header
if hashMode {
origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
} else {
origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
}
if origin == nil {
break
}
number := origin.Number.Uint64()
headers = append(headers, origin)
bytes += estHeaderRlpSize
// Advance to the next header of the query
switch {
case query.Origin.Hash != (common.Hash{}) && query.Reverse:
// Hash based traversal towards the genesis block
// 從Hash指定的開始朝創世區塊移動。 也就是反向移動。 通過hash查詢
for i := 0; i < int(query.Skip)+1; i++ {
if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil {// 通過hash和number獲取前一個區塊頭
query.Origin.Hash = header.ParentHash
number--
} else {
unknown = true
break //break是跳出switch。 unknow用來跳出迴圈。
}
}
case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
// Hash based traversal towards the leaf block
// 通過hash來查詢
var (
current = origin.Number.Uint64()
next = current + query.Skip + 1
)
if next <= current { //正向, 但是next比當前還小,防備整數溢位攻擊。
infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ")
p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
unknown = true
} else {
if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
// 如果可以找到這個header,而且這個header和origin在同一個鏈上。
query.Origin.Hash = header.Hash()
} else {
unknown = true
}
} else {
unknown = true
}
}
case query.Reverse: // 通過number查詢
// Number based traversal towards the genesis block
// query.Origin.Hash == (common.Hash{})
if query.Origin.Number >= query.Skip+1 {
query.Origin.Number -= (query.Skip + 1)
} else {
unknown = true
}
case !query.Reverse: //通過number查詢
// Number based traversal towards the leaf block
query.Origin.Number += (query.Skip + 1)
}
}
return p.SendBlockHeaders(headers)
case msg.Code == BlockHeadersMsg: //接收到了GetBlockHeadersMsg的回答。
// A batch of headers arrived to one of our previous requests
var headers []*types.Header
if err := msg.Decode(&headers); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// If no headers were received, but we're expending a DAO fork check, maybe it's that
// 如果對端沒有返回任何的headers,而且forkDrop不為空,那麼應該是我們的DAO檢查的請求,我們之前在HandShake傳送了DAO header的請求。
if len(headers) == 0 && p.forkDrop != nil {
// Possibly an empty reply to the fork header checks, sanity check TDs
verifyDAO := true
// If we already have a DAO header, we can check the peer's TD against it. If
// the peer's ahead of this, it too must have a reply to the DAO check
if daoHeader := pm.blockchain.GetHeaderByNumber(pm.chainconfig.DAOForkBlock.Uint64()); daoHeader != nil {
if _, td := p.Head(); td.Cmp(pm.blockchain.GetTd(daoHeader.Hash(), daoHeader.Number.Uint64())) >= 0 {
//這個時候檢查對端的total difficult 是否已經超過了DAO分叉區塊的td值, 如果超過了,說明對端應該存在這個區塊頭, 但是返回的空白的,那麼這裡驗證失敗。 這裡什麼都沒有做。 如果對端還不傳送,那麼會被超時退出。
verifyDAO = false
}
}
// If we're seemingly on the same chain, disable the drop timer
if verifyDAO { // 如果驗證成功,那麼刪除掉計時器,然後返回。
p.Log().Debug("Seems to be on the same side of the DAO fork")
p.forkDrop.Stop()
p.forkDrop = nil
return nil
}
}
// Filter out any explicitly requested headers, deliver the rest to the downloader
// 過濾出任何非常明確的請求, 然後把剩下的投遞給downloader
// 如果長度是1 那麼filter為true
filter := len(headers) == 1
if filter {
// If it's a potential DAO fork check, validate against the rules
if p.forkDrop != nil && pm.chainconfig.DAOForkBlock.Cmp(headers[0].Number) == 0 { //DAO檢查
// Disable the fork drop timer
p.forkDrop.Stop()
p.forkDrop = nil
// Validate the header and either drop the peer or continue
if err := misc.VerifyDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil {
p.Log().Debug("Verified to be on the other side of the DAO fork, dropping")
return err
}
p.Log().Debug("Verified to be on the same side of the DAO fork")
return nil
}
// Irrelevant of the fork checks, send the header to the fetcher just in case
// 如果不是DAO的請求,交給過濾器進行過濾。過濾器會返回需要繼續處理的headers,這些headers會被交給downloader進行分發。
headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
}
if len(headers) > 0 || !filter {
err := pm.downloader.DeliverHeaders(p.id, headers)
if err != nil {
log.Debug("Failed to deliver headers", "err", err)
}
}
case msg.Code == GetBlockBodiesMsg:
// Block Body的請求 這個比較簡單。 從blockchain裡面獲取body返回就行。
// Decode the retrieval message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
if _, err := msgStream.List(); err != nil {
return err
}
// Gather blocks until the fetch or network limits is reached
var (
hash common.Hash
bytes int
bodies []rlp.RawValue
)
for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
// Retrieve the hash of the next block
if err := msgStream.Decode(&hash); err == rlp.EOL {
break
} else if err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Retrieve the requested block body, stopping if enough was found
if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 {
bodies = append(bodies, data)
bytes += len(data)
}
}
return p.SendBlockBodiesRLP(bodies)
case msg.Code == BlockBodiesMsg:
// A batch of block bodies arrived to one of our previous requests
var request blockBodiesData
if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Deliver them all to the downloader for queuing
trasactions := make([][]*types.Transaction, len(request))
uncles := make([][]*types.Header, len(request))
for i, body := range request {
trasactions[i] = body.Transactions
uncles[i] = body.Uncles
}
// Filter out any explicitly requested bodies, deliver the rest to the downloader
// 過濾掉任何顯示的請求, 剩下的交給downloader
filter := len(trasactions) > 0 || len(uncles) > 0
if filter {
trasactions, uncles = pm.fetcher.FilterBodies(p.id, trasactions, uncles, time.Now())
}
if len(trasactions) > 0 || len(uncles) > 0 || !filter {
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
if err != nil {
log.Debug("Failed to deliver bodies", "err", err)
}
}
case p.version >= eth63 && msg.Code == GetNodeDataMsg:
// 對端的版本是eth63 而且是請求NodeData
// Decode the retrieval message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
if _, err := msgStream.List(); err != nil {
return err
}
// Gather state data until the fetch or network limits is reached
var (
hash common.Hash
bytes int
data [][]byte
)
for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
// Retrieve the hash of the next state entry
if err := msgStream.Decode(&hash); err == rlp.EOL {
break
} else if err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Retrieve the requested state entry, stopping if enough was found
// 請求的任何hash值都會返回給對方。
if entry, err := pm.chaindb.Get(hash.Bytes()); err == nil {
data = append(data, entry)
bytes += len(entry)
}
}
return p.SendNodeData(data)
case p.version >= eth63 && msg.Code == NodeDataMsg:
// A batch of node state data arrived to one of our previous requests
var data [][]byte
if err := msg.Decode(&data); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Deliver all to the downloader
// 資料交給downloader
if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
log.Debug("Failed to deliver node state data", "err", err)
}
case p.version >= eth63 && msg.Code == GetReceiptsMsg:
// 請求收據
// Decode the retrieval message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
if _, err := msgStream.List(); err != nil {
return err
}
// Gather state data until the fetch or network limits is reached
var (
hash common.Hash
bytes int
receipts []rlp.RawValue
)
for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
// Retrieve the hash of the next block
if err := msgStream.Decode(&hash); err == rlp.EOL {
break
} else if err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Retrieve the requested block's receipts, skipping if unknown to us
results := core.GetBlockReceipts(pm.chaindb, hash, core.GetBlockNumber(pm.chaindb, hash))
if results == nil {
if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
continue
}
}
// If known, encode and queue for response packet
if encoded, err := rlp.EncodeToBytes(results); err != nil {
log.Error("Failed to encode receipt", "err", err)
} else {
receipts = append(receipts, encoded)
bytes += len(encoded)
}
}
return p.SendReceiptsRLP(receipts)
case p.version >= eth63 && msg.Code == ReceiptsMsg:
// A batch of receipts arrived to one of our previous requests
var receipts [][]*types.Receipt
if err := msg.Decode(&receipts); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Deliver all to the downloader
if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
log.Debug("Failed to deliver receipts", "err", err)
}
case msg.Code == NewBlockHashesMsg:
// 接收到BlockHashesMsg訊息
var announces newBlockHashesData
if err := msg.Decode(&announces); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
// Mark the hashes as present at the remote node
for _, block := range announces {
p.MarkBlock(block.Hash)
}
// Schedule all the unknown hashes for retrieval
unknown := make(newBlockHashesData, 0, len(announces))
for _, block := range announces {
if !pm.blockchain.HasBlock(block.Hash, block.Number) {
unknown = append(unknown, block)
}
}
for _, block := range unknown {
// 通知fetcher有一個潛在的block需要下載
pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
}
case msg.Code == NewBlockMsg:
// Retrieve and decode the propagated block
var request newBlockData
if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
request.Block.ReceivedAt = msg.ReceivedAt
request.Block.ReceivedFrom = p
// Mark the peer as owning the block and schedule it for import
p.MarkBlock(request.Block.Hash())
pm.fetcher.Enqueue(p.id, request.Block)
// Assuming the block is importable by the peer, but possibly not yet done so,
// calculate the head hash and TD that the peer truly must have.
var (
trueHead = request.Block.ParentHash()
trueTD = new(big.Int).Sub(request.TD, request.Block.Difficulty())
)
// Update the peers total difficulty if better than the previous
if _, td := p.Head(); trueTD.Cmp(td) > 0 {
// 如果peer的真實的TD和head和我們這邊記載的不同, 設定peer真實的head和td,
p.SetHead(trueHead, trueTD)
// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
// a singe block (as the true TD is below the propagated block), however this
// scenario should easily be covered by the fetcher.
// 如果真實的TD比我們的TD大,那麼請求和這個peer同步。
currentBlock := pm.blockchain.CurrentBlock()
if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 {
go pm.synchronise(p)
}
}
case msg.Code == TxMsg:
// Transactions arrived, make sure we have a valid and fresh chain to handle them
// 交易資訊返回。 在我們沒用同步完成之前不會接收交易資訊。
if atomic.LoadUint32(&pm.acceptTxs) == 0 {
break
}
// Transactions can be processed, parse all of them and deliver to the pool
var txs []*types.Transaction
if err := msg.Decode(&txs); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
for i, tx := range txs {
// Validate and mark the remote transaction
if tx == nil {
return errResp(ErrDecode, "transaction %d is nil", i)
}
p.MarkTransaction(tx.Hash())
}
// 新增到txpool
pm.txpool.AddRemotes(txs)
default:
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
}
return nil
}
幾種同步synchronise, 之前發現對方的節點比自己節點要更新的時候會呼叫這個方法synchronise,
// synchronise tries to sync up our local block chain with a remote peer.
// synchronise 嘗試 讓本地區塊鏈跟遠端同步。
func (pm *ProtocolManager) synchronise(peer *peer) {
// Short circuit if no peers are available
if peer == nil {
return
}
// Make sure the peer's TD is higher than our own
currentBlock := pm.blockchain.CurrentBlock()
td := pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
pHead, pTd := peer.Head()
if pTd.Cmp(td) <= 0 {
return
}
// Otherwise try to sync with the downloader
mode := downloader.FullSync
if atomic.LoadUint32(&pm.fastSync) == 1 { //如果顯式申明是fast
// Fast sync was explicitly requested, and explicitly granted
mode = downloader.FastSync
} else if currentBlock.NumberU64() == 0 && pm.blockchain.CurrentFastBlock().NumberU64() > 0 { //如果資料庫是空白的
// The database seems empty as the current block is the genesis. Yet the fast
// block is ahead, so fast sync was enabled for this node at a certain point.
// The only scenario where this can happen is if the user manually (or via a
// bad block) rolled back a fast sync node below the sync point. In this case
// however it's safe to reenable fast sync.
atomic.StoreUint32(&pm.fastSync, 1)
mode = downloader.FastSync
}
// Run the sync cycle, and disable fast sync if we've went past the pivot block
err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode)
if atomic.LoadUint32(&pm.fastSync) == 1 {
// Disable fast sync if we indeed have something in our chain
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
log.Info("Fast sync complete, auto disabling")
atomic.StoreUint32(&pm.fastSync, 0)
}
}
if err != nil {
return
}
atomic.StoreUint32(&pm.acceptTxs, 1) // Mark initial sync done
// 同步完成 開始接收交易。
if head := pm.blockchain.CurrentBlock(); head.NumberU64() > 0 {
// We've completed a sync cycle, notify all peers of new state. This path is
// essential in star-topology networks where a gateway node needs to notify
// all its out-of-date peers of the availability of a new block. This failure
// scenario will most often crop up in private and hackathon networks with
// degenerate connectivity, but it should be healthy for the mainnet too to
// more reliably update peers or the local TD state.
// 我們告訴所有的peer我們的狀態。
go pm.BroadcastBlock(head, false)
}
}
交易廣播。txBroadcastLoop 在start的時候啟動的goroutine。 txCh在txpool接收到一條合法的交易的時候會往這個上面寫入事件。 然後把交易廣播給所有的peers
func (self *ProtocolManager) txBroadcastLoop() {
for {
select {
case event := <-self.txCh:
self.BroadcastTx(event.Tx.Hash(), event.Tx)
// Err() channel will be closed when unsubscribing.
case <-self.txSub.Err():
return
}
}
}
挖礦廣播。當收到訂閱的事件的時候把新挖到的礦廣播出去。
// Mined broadcast loop
func (self *ProtocolManager) minedBroadcastLoop() {
// automatically stops if unsubscribe
for obj := range self.minedBlockSub.Chan() {
switch ev := obj.Data.(type) {
case core.NewMinedBlockEvent:
self.BroadcastBlock(ev.Block, true) // First propagate block to peers
self.BroadcastBlock(ev.Block, false) // Only then announce to the rest
}
}
}
syncer負責定期和網路同步,
// syncer is responsible for periodically synchronising with the network, both
// downloading hashes and blocks as well as handling the announcement handler.
//同步器負責週期性地與網路同步,下載雜湊和塊以及處理通知處理程式。
func (pm *ProtocolManager) syncer() {
// Start and ensure cleanup of sync mechanisms
pm.fetcher.Start()
defer pm.fetcher.Stop()
defer pm.downloader.Terminate()
// Wait for different events to fire synchronisation operations
forceSync := time.NewTicker(forceSyncCycle)
defer forceSync.Stop()
for {
select {
case <-pm.newPeerCh: //當有新的Peer增加的時候 會同步。 這個時候還可能觸發區塊廣播。
// Make sure we have peers to select from, then sync
if pm.peers.Len() < minDesiredPeerCount {
break
}
go pm.synchronise(pm.peers.BestPeer())
case <-forceSync.C:
// 定時觸發 10秒一次
// Force a sync even if not enough peers are present
// BestPeer() 選擇總難度最大的節點。
go pm.synchronise(pm.peers.BestPeer())
case <-pm.noMorePeers: // 退出訊號
return
}
}
}
txsyncLoop負責把pending的交易傳送給新建立的連線。
// txsyncLoop takes care of the initial transaction sync for each new
// connection. When a new peer appears, we relay all currently pending
// transactions. In order to minimise egress bandwidth usage, we send
// the transactions in small packs to one peer at a time.
txsyncLoop負責每個新連線的初始事務同步。 當新的對等體出現時,我們轉發所有當前待處理的事務。 為了最小化出口頻寬使用,我們一次將一個小包中的事務傳送給一個對等體。
func (pm *ProtocolManager) txsyncLoop() {
var (
pending = make(map[discover.NodeID]*txsync)
sending = false // whether a send is active
pack = new(txsync) // the pack that is being sent
done = make(chan error, 1) // result of the send
)
// send starts a sending a pack of transactions from the sync.
send := func(s *txsync) {
// Fill pack with transactions up to the target size.
size := common.StorageSize(0)
pack.p = s.p
pack.txs = pack.txs[:0]
for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ {
pack.txs = append(pack.txs, s.txs[i])
size += s.txs[i].Size()
}
// Remove the transactions that will be sent.
s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])]
if len(s.txs) == 0 {
delete(pending, s.p.ID())
}
// Send the pack in the background.
s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size)
sending = true
go func() { done <- pack.p.SendTransactions(pack.txs) }()
}
// pick chooses the next pending sync.
// 隨機挑選一個txsync來傳送。
pick := func() *txsync {
if len(pending) == 0 {
return nil
}
n := rand.Intn(len(pending)) + 1
for _, s := range pending {
if n--; n == 0 {
return s
}
}
return nil
}
for {
select {
case s := <-pm.txsyncCh: //從這裡接收txsyncCh訊息。
pending[s.p.ID()] = s
if !sending {
send(s)
}
case err := <-done:
sending = false
// Stop tracking peers that cause send failures.
if err != nil {
pack.p.Log().Debug("Transaction send failed", "err", err)
delete(pending, pack.p.ID())
}
// Schedule the next send.
if s := pick(); s != nil {
send(s)
}
case <-pm.quitSync:
return
}
}
}
txsyncCh佇列的生產者,syncTransactions是在handle方法裡面呼叫的。 在新連結剛剛建立的時候會被呼叫一次。
// syncTransactions starts sending all currently pending transactions to the given peer.
func (pm *ProtocolManager) syncTransactions(p *peer) {
var txs types.Transactions
pending, _ := pm.txpool.Pending()
for _, batch := range pending {
txs = append(txs, batch...)
}
if len(txs) == 0 {
return
}
select {
case pm.txsyncCh <- &txsync{p, txs}:
case <-pm.quitSync:
}
}
總結一下。 我們現在的一些大的流程。
區塊同步
1. 如果是自己挖的礦。通過goroutine minedBroadcastLoop()來進行廣播。
2. 如果是接收到其他人的區塊廣播,(NewBlockHashesMsg/NewBlockMsg),是否fetcher會通知的peer? TODO
3. goroutine syncer()中會定時的同BestPeer()來同步資訊。
交易同步
1. 新建立連線。 把pending的交易傳送給他。
2. 本地傳送了一個交易,或者是接收到別人發來的交易資訊。 txpool會產生一條訊息,訊息被傳遞到txCh通道。 然後被goroutine txBroadcastLoop()處理, 傳送給其他不知道這個交易的peer。
網址:http://www.qukuailianxueyuan.io/
欲領取造幣技術與全套虛擬機器資料
區塊鏈技術交流QQ群:756146052 備註:CSDN
尹成學院微信:備註:CSDN
相關文章
- 以太坊原始碼分析(34)eth-downloader原始碼分析原始碼
- 以太坊原始碼分析(35)eth-fetcher原始碼分析原始碼
- 以太坊原始碼分析(30)eth-bloombits和filter原始碼分析原始碼OOMFilter
- 以太坊原始碼分析(31)eth-downloader-peer原始碼分析原始碼
- 以太坊原始碼分析(32)eth-downloader-peer原始碼分析原始碼
- 以太坊原始碼分析(33)eth-downloader-statesync原始碼分析原始碼
- 死磕以太坊原始碼分析之rlpx協議原始碼協議
- 以太坊原始碼分析(15)node包建立多重協議以太坊節點原始碼協議
- 以太坊原始碼分析(18)以太坊交易執行分析原始碼
- 以太坊原始碼分析(36)ethdb原始碼分析原始碼
- 以太坊原始碼分析(38)event原始碼分析原始碼
- 以太坊原始碼分析(41)hashimoto原始碼分析原始碼
- 以太坊原始碼分析(43)node原始碼分析原始碼
- 以太坊原始碼分析(52)trie原始碼分析原始碼
- 以太坊原始碼分析(51)rpc原始碼分析原始碼RPC
- 以太坊原始碼分析(8)區塊分析原始碼
- 以太坊原始碼分析(9)cmd包分析原始碼
- 以太坊原始碼分析(13)RPC分析原始碼RPC
- 以太坊原始碼分析(16)挖礦分析原始碼
- 以太坊原始碼分析(5)accounts程式碼分析原始碼
- 以太坊交易池原始碼分析原始碼
- 以太坊原始碼分析(20)core-bloombits原始碼分析原始碼OOM
- 以太坊原始碼分析(24)core-state原始碼分析原始碼
- 以太坊原始碼分析(29)core-vm原始碼分析原始碼
- 以太坊原始碼分析(10)CMD深入分析原始碼
- 以太坊原始碼分析(12)交易資料分析原始碼
- 以太坊原始碼分析(19)core-blockchain分析原始碼Blockchain
- 以太坊原始碼分析(52)以太坊fast sync演算法原始碼AST演算法
- 以太坊原始碼分析(23)core-state-process原始碼分析原始碼
- 以太坊原始碼分析(11)eth目前的共識演算法pow的整理原始碼演算法
- 以太坊原始碼分析(6)accounts賬戶管理分析原始碼
- 以太坊原始碼分析(14)P2P分析原始碼
- 以太坊原始碼分析(39)geth啟動流程分析原始碼
- 以太坊原始碼分析(9)CMD實際操作分析原始碼
- 以太坊原始碼分析(54)以太坊隨機數生成方式原始碼隨機
- 以太坊原始碼分析(3)以太坊交易手續費明細原始碼
- 以太坊原始碼分析(26)core-txpool交易池原始碼分析原始碼
- 以太坊原始碼分析(28)core-vm-stack-memory原始碼分析原始碼