Fabric 1.0原始碼分析(10)consenter(共識外掛)

尹成發表於2018-05-20
# Fabric 1.0原始碼筆記 之 consenter(共識外掛)

## 1、consenter概述

consenter,即共識外掛,負責接受交易資訊進行排序,以及對交易進行切割並打包,打包後返回批量交易。
Orderer包含三種共識外掛:
* solo,單節點的排序功能,用於試驗。
* kafka,基於kafka叢集實現的排序,可用於生產環境。
* SBFT,支援拜占庭容錯的排序實現,尚未完成開發。

consenter程式碼分佈在orderer/multichain、orderer/solo、orderer/kafka、orderer/common/blockcutter、orderer/common/filter目錄下。目錄結構如下:

* orderer/multichain目錄:
    * chainsupport.go,Consenter和Chain介面定義。
* orderer/solo目錄,solo版本共識外掛。
* orderer/kafka目錄,kafka版本共識外掛。
* orderer/common/blockcutter目錄,block cutter相關實現,即Receiver介面定義及實現。
* orderer/common/filter目錄,過濾器相關實現。

## 2、Consenter和Chain介面定義

```go
type Consenter interface { //共識外掛介面
    //獲取共識外掛對應的Chain例項
    HandleChain(support ConsenterSupport, metadata *cb.Metadata) (Chain, error)
}

type Chain interface {
    //接受訊息
    Enqueue(env *cb.Envelope) bool
    Errored() <-chan struct{}
    Start() //開始
    Halt() //掛起
}
//程式碼在orderer/multichain/chainsupport.go
```

## 3、solo版本共識外掛

### 3.1、Consenter介面實現

```go
type consenter struct{}

//構造consenter
func New() multichain.Consenter {
    return &consenter{}
}

//獲取solo共識外掛對應的Chain例項
func (solo *consenter) HandleChain(support multichain.ConsenterSupport, metadata *cb.Metadata) (multichain.Chain, error) {
    return newChain(support), nil
}
//程式碼在orderer/solo/consensus.go
```

### 3.2、Chain介面實現

```go
type chain struct {
    support multichain.ConsenterSupport
    sendChan chan *cb.Envelope //交易資料通道
    exitChan chan struct{} //退出訊號
}

//構造chain
func newChain(support multichain.ConsenterSupport) *chain
//go ch.main()
func (ch *chain) Start()
//關閉通道,close(ch.exitChan)
func (ch *chain) Halt()
//Envelope寫入通道ch.sendChan
func (ch *chain) Enqueue(env *cb.Envelope) bool
//獲取ch.exitChan
func (ch *chain) Errored() <-chan struct{}
//goroutine
func (ch *chain) main()
//程式碼在orderer/solo/consensus.go
```

### 3.3、main()實現

```go
func (ch *chain) main() {
    var timer <-chan time.Time //超時通道

    for {
        select {
        case msg := <-ch.sendChan: //接受交易訊息
            batches, committers, ok, _ := ch.support.BlockCutter().Ordered(msg)
            if ok && len(batches) == 0 && timer == nil {
                timer = time.After(ch.support.SharedConfig().BatchTimeout())
                continue
            }
            for i, batch := range batches {
                block := ch.support.CreateNextBlock(batch) //每個批處理建立一個塊
                ch.support.WriteBlock(block, committers[i], nil) //寫入塊
            }
            if len(batches) > 0 {
                timer = nil
            }
        case <-timer:
            //clear the timer
            timer = nil

            batch, committers := ch.support.BlockCutter().Cut()
            if len(batch) == 0 {
                logger.Warningf("Batch timer expired with no pending requests, this might indicate a bug")
                continue
            }
            logger.Debugf("Batch timer expired, creating block")
            block := ch.support.CreateNextBlock(batch)
            ch.support.WriteBlock(block, committers, nil)
        case <-ch.exitChan: //退出訊號
            logger.Debugf("Exiting")
            return
        }
    }
}
//程式碼在orderer/solo/consensus.go
```

## 4、kafka版本共識外掛

### 4.1、Consenter介面實現

```go
type consenterImpl struct {
    brokerConfigVal *sarama.Config
    tlsConfigVal localconfig.TLS
    retryOptionsVal localconfig.Retry
    kafkaVersionVal sarama.KafkaVersion
}
//構造consenterImpl
func New(tlsConfig localconfig.TLS, retryOptions localconfig.Retry, kafkaVersion sarama.KafkaVersion) multichain.Consenter
//構造chainImpl
func (consenter *consenterImpl) HandleChain(support multichain.ConsenterSupport, metadata *cb.Metadata) (multichain.Chain, error)
func (consenter *consenterImpl) brokerConfig() *sarama.Config
func (consenter *consenterImpl) retryOptions() localconfig.Retry
//程式碼在orderer/kafka/consenter.go
```

### 4.2、Chain介面實現

```go
type chainImpl struct {
    consenter commonConsenter
    support multichain.ConsenterSupport

    channel channel
    lastOffsetPersisted int64
    lastCutBlockNumber uint64

    producer sarama.SyncProducer
    parentConsumer sarama.Consumer
    channelConsumer sarama.PartitionConsumer

    errorChan chan struct{}
    haltChan chan struct{}
    startChan chan struct{}
}

//構造chainImpl
func newChain(consenter commonConsenter, support multichain.ConsenterSupport, lastOffsetPersisted int64) (*chainImpl, error)
//獲取chain.errorChan
func (chain *chainImpl) Errored() <-chan struct{}
//go startThread(chain)
func (chain *chainImpl) Start()
//結束
func (chain *chainImpl) Halt()
//接收Envelope訊息,序列化後發給kafka
func (chain *chainImpl) Enqueue(env *cb.Envelope) bool
//goroutine,調取chain.processMessagesToBlocks()
func startThread(chain *chainImpl)
//goroutine實際功能實現
func (chain *chainImpl) processMessagesToBlocks() ([]uint64, error)
func (chain *chainImpl) closeKafkaObjects() []error
func getLastCutBlockNumber(blockchainHeight uint64) uint64
func getLastOffsetPersisted(metadataValue []byte, chainID string) int64
func newConnectMessage() *ab.KafkaMessage
func newRegularMessage(payload []byte) *ab.KafkaMessage
func newTimeToCutMessage(blockNumber uint64) *ab.KafkaMessage
//構造sarama.ProducerMessage
func newProducerMessage(channel channel, pld []byte) *sarama.ProducerMessage
func processConnect(channelName string) error
func processRegular(regularMessage *ab.KafkaMessageRegular, support multichain.ConsenterSupport, timer *<-chan time.Time, receivedOffset int64, lastCutBlockNumber *uint64) error
func processTimeToCut(ttcMessage *ab.KafkaMessageTimeToCut, support multichain.ConsenterSupport, lastCutBlockNumber *uint64, timer *<-chan time.Time, receivedOffset int64) error
func sendConnectMessage(retryOptions localconfig.Retry, exitChan chan struct{}, producer sarama.SyncProducer, channel channel) error
func sendTimeToCut(producer sarama.SyncProducer, channel channel, timeToCutBlockNumber uint64, timer *<-chan time.Time) error
func setupChannelConsumerForChannel(retryOptions localconfig.Retry, haltChan chan struct{}, parentConsumer sarama.Consumer, channel channel, startFrom int64) (sarama.PartitionConsumer, error)
func setupParentConsumerForChannel(retryOptions localconfig.Retry, haltChan chan struct{}, brokers []string, brokerConfig *sarama.Config, channel channel) (sarama.Consumer, error)
func setupProducerForChannel(retryOptions localconfig.Retry, haltChan chan struct{}, brokers []string, brokerConfig *sarama.Config, channel channel) (sarama.SyncProducer, error)
//程式碼在orderer/kafka/chain.go
```

func (chain *chainImpl) Enqueue(env *cb.Envelope) bool程式碼如下:

```go
func (chain *chainImpl) Enqueue(env *cb.Envelope) bool {
    select {
    case <-chain.startChan: //開始階段已完成
        select {
        case <-chain.haltChan:
            return false
        default:
            marshaledEnv, err := utils.Marshal(env) //env序列化
            payload := utils.MarshalOrPanic(newRegularMessage(marshaledEnv))
            message := newProducerMessage(chain.channel, payload) //構造sarama.ProducerMessage
            _, _, err := chain.producer.SendMessage(message) //向kafka傳送message
            return true
        }
    default:
        return false
    }
}
//程式碼在orderer/kafka/chain.go
```

func newProducerMessage(channel channel, pld []byte) *sarama.ProducerMessage程式碼如下:

```go
func newProducerMessage(channel channel, pld []byte) *sarama.ProducerMessage {
    return &sarama.ProducerMessage{
        Topic: channel.topic(),
        Key: sarama.StringEncoder(strconv.Itoa(int(channel.partition()))),
        Value: sarama.ByteEncoder(pld),
    }
}
//程式碼在orderer/kafka/chain.go
```

func (chain *chainImpl) processMessagesToBlocks() ([]uint64, error)程式碼如下:

```go
func (chain *chainImpl) processMessagesToBlocks() ([]uint64, error) {
    counts := make([]uint64, 11) // For metrics and tests
    msg := new(ab.KafkaMessage)
    var timer <-chan time.Time

    defer func() { //Halt()時執行
        select {
        case <-chain.errorChan:
        default:
            close(chain.errorChan)
        }
    }()

    for {
        select {
        case <-chain.haltChan: //退出
            counts[indexExitChanPass]++
            return counts, nil
        case kafkaErr := <-chain.channelConsumer.Errors(): //錯誤
            counts[indexRecvError]++
            select {
            case <-chain.errorChan:
            default:
                close(chain.errorChan)
            }
            go sendConnectMessage(chain.consenter.retryOptions(), chain.haltChan, chain.producer, chain.channel)
        case in, ok := <-chain.channelConsumer.Messages(): //接收訊息
            select {
            case <-chain.errorChan: //錯誤
                chain.errorChan = make(chan struct{})
            default:
            }
            err := proto.Unmarshal(in.Value, msg)
            counts[indexRecvPass]++
            switch msg.Type.(type) { //訊息型別
            case *ab.KafkaMessage_Connect: //連線
                _ = processConnect(chain.support.ChainID())
                counts[indexProcessConnectPass]++
            case *ab.KafkaMessage_TimeToCut: //超時
                err := processTimeToCut(msg.GetTimeToCut(), chain.support, &chain.lastCutBlockNumber, &timer, in.Offset)
                counts[indexProcessTimeToCutPass]++
            case *ab.KafkaMessage_Regular: //正常訊息
                err := processRegular(msg.GetRegular(), chain.support, &timer, in.Offset, &chain.lastCutBlockNumber)
                counts[indexProcessRegularPass]++
            }
        case <-timer:
            err := sendTimeToCut(chain.producer, chain.channel, chain.lastCutBlockNumber+1, &timer)
            counts[indexSendTimeToCutPass]++
        }
    }
}
//程式碼在orderer/kafka/chain.go
```

func processRegular(regularMessage *ab.KafkaMessageRegular, support multichain.ConsenterSupport, timer *<-chan time.Time, receivedOffset int64, lastCutBlockNumber *uint64) error 程式碼如下:

```go
func processRegular(regularMessage *ab.KafkaMessageRegular, support multichain.ConsenterSupport, timer *<-chan time.Time, receivedOffset int64, lastCutBlockNumber *uint64) error {
    env := new(cb.Envelope)
    proto.Unmarshal(regularMessage.Payload, env) //發序列化為env
    batches, committers, ok, pending := support.BlockCutter().Ordered(env)
    for i, batch := range batches {
        block := support.CreateNextBlock(batch)
        encodedLastOffsetPersisted := utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: offset})
        support.WriteBlock(block, committers[i], encodedLastOffsetPersisted) //寫入塊
        *lastCutBlockNumber++
        offset++
    }

    if len(batches) > 0 {
        *timer = nil
    }
    return nil
}

//程式碼在orderer/kafka/chain.go
```

## 5、blockcutter

### 5.1、Receiver介面定義

```go
type Receiver interface {
    //交易資訊排序
    Ordered(msg *cb.Envelope) (messageBatches [][]*cb.Envelope, committers [][]filter.Committer, validTx bool, pending bool)
    //返回當前批處理,並啟動一個新批次
    Cut() ([]*cb.Envelope, []filter.Committer)
}
//程式碼在orderer/common/blockcutter/blockcutter.go
```

### 5.2、Receiver介面實現

```go
type receiver struct {
    sharedConfigManager config.Orderer
    filters *filter.RuleSet
    pendingBatch []*cb.Envelope
    pendingBatchSizeBytes uint32
    pendingCommitters []filter.Committer
}

//構造receiver
func NewReceiverImpl(sharedConfigManager config.Orderer, filters *filter.RuleSet) Receiver
//交易資訊排序
func (r *receiver) Ordered(msg *cb.Envelope) (messageBatches [][]*cb.Envelope, committerBatches [][]filter.Committer, validTx bool, pending bool)
//返回當前批處理,並啟動一個新批次
func (r *receiver) Cut() ([]*cb.Envelope, []filter.Committer)
//獲取訊息長度
func messageSizeBytes(message *cb.Envelope) uint32
//程式碼在orderer/common/blockcutter/blockcutter.go
```

func (r *receiver) Ordered(msg *cb.Envelope) (messageBatches [][]*cb.Envelope, committerBatches [][]filter.Committer, validTx bool, pending bool)程式碼如下:

```go
func (r *receiver) Ordered(msg *cb.Envelope) (messageBatches [][]*cb.Envelope, committerBatches [][]filter.Committer, validTx bool, pending bool) {
    committer, err := r.filters.Apply(msg) //執行過濾器
        if err != nil {
        logger.Debugf("Rejecting message: %s", err)
        return
    }
    
    validTx = true
    messageSizeBytes := messageSizeBytes(msg)
    //孤立的塊,或者交易大小超限,將被隔離
    if committer.Isolated() || messageSizeBytes > r.sharedConfigManager.BatchSize().PreferredMaxBytes {
        if committer.Isolated() {
            //發現訊息要求被隔離, 切割成自己的批
            logger.Debugf("Found message which requested to be isolated, cutting into its own batch")
        } else {
            //當前訊息 大於預設批處理大小, 將被隔離
            logger.Debugf("The current message, with %v bytes, is larger than the preferred batch size of %v bytes and will be isolated.", messageSizeBytes, r.sharedConfigManager.BatchSize().PreferredMaxBytes)
        }
        //剪下批處理
        if len(r.pendingBatch) > 0 {
            messageBatch, committerBatch := r.Cut()
            messageBatches = append(messageBatches, messageBatch)
            committerBatches = append(committerBatches, committerBatch)
        }

        //建立新批次
        messageBatches = append(messageBatches, []*cb.Envelope{msg})
        committerBatches = append(committerBatches, []filter.Committer{committer})

        return
    }

    //混合塊且大小未超限
    messageWillOverflowBatchSizeBytes := r.pendingBatchSizeBytes+messageSizeBytes > r.sharedConfigManager.BatchSize().PreferredMaxBytes

    if messageWillOverflowBatchSizeBytes { //新增當前訊息後,批處理將溢位
        messageBatch, committerBatch := r.Cut()
        messageBatches = append(messageBatches, messageBatch)
        committerBatches = append(committerBatches, committerBatch)
    }

    //訊息新增到批處理
    r.pendingBatch = append(r.pendingBatch, msg)
    r.pendingBatchSizeBytes += messageSizeBytes
    r.pendingCommitters = append(r.pendingCommitters, committer)
    pending = true

    if uint32(len(r.pendingBatch)) >= r.sharedConfigManager.BatchSize().MaxMessageCount {
        //批次大小滿足, 切割批次
        messageBatch, committerBatch := r.Cut()
        messageBatches = append(messageBatches, messageBatch)
        committerBatches = append(committerBatches, committerBatch)
        pending = false
    }

    return
}
//程式碼在orderer/common/blockcutter/blockcutter.go
```

## 6、filter相關實現(過濾器)

filter更詳細內容,參考:[Fabric 1.0原始碼筆記 之 consenter(共識外掛) #filter(過濾器)](filter.md)

相關文章