GO實現Redis:GO實現Redis叢集(5)

csgopher發表於2023-03-27

  • 採用一致性hash演算法將key分散到不同的節點,客戶端可以連線到叢集中任意一個節點
  • https://github.com/csgopher/go-redis
  • 本文涉及以下檔案:
    consistenthash:實現新增和選擇節點方法
    standalone_database:單機database
    client:客戶端
    client_pool:實現連線池
    cluster_database:對key進行路由
    com:與其他節點通訊
    router,ping,keys,del,select:各類命令的轉發具體邏輯

一致性雜湊

為什麼需要一致性 hash?
在採用分片方式建立分散式快取時,我們面臨的第一個問題是如何決定儲存資料的節點。最自然的方式是參考 hash 表的做法,假設叢集中存在 n 個節點,我們用 node = hashCode(key) % n 來決定所屬的節點。
普通 hash 演算法解決了如何選擇節點的問題,但在分散式系統中經常出現增加節點或某個節點當機的情況。若節點數 n 發生變化, 大多數 key 根據 node = hashCode(key) % n 計算出的節點都會改變。這意味著若要在 n 變化後維持系統正常運轉,需要將大多數資料在節點間進行重新分佈。這個操作會消耗大量的時間和頻寬等資源,這在生產環境下是不可接受的。
演算法原理
一致性 hash 演算法的目的是在節點數量 n 變化時, 使盡可能少的 key 需要進行節點間重新分佈。一致性 hash 演算法將資料 key 和伺服器地址 addr 雜湊到 2^32 的空間中。
我們將 2^32 個整數首尾相連形成一個環,首先計算伺服器地址 addr 的 hash 值放置在環上。然後計算 key 的 hash 值放置在環上,順時針查詢,將資料放在找到的的第一個節點上。
在增加或刪除節點時只有該節點附近的資料需要重新分佈,從而解決了上述問題。
如果伺服器節點較少則比較容易出現資料分佈不均勻的問題,一般來說環上的節點越多資料分佈越均勻。我們不需要真的增加一臺伺服器,只需要將實際的伺服器節點對映為幾個虛擬節點放在環上即可。
參考:https://www.cnblogs.com/Finley/p/14038398.html


lib/consistenthash/consistenthash.go

type HashFunc func(data []byte) uint32

type NodeMap struct {
   hashFunc    HashFunc
   nodeHashs   []int          
   nodehashMap map[int]string 
}

func NewNodeMap(fn HashFunc) *NodeMap {
   m := &NodeMap{
      hashFunc:    fn,
      nodehashMap: make(map[int]string),
   }
   if m.hashFunc == nil {
      m.hashFunc = crc32.ChecksumIEEE
   }
   return m
}

func (m *NodeMap) IsEmpty() bool {
   return len(m.nodeHashs) == 0
}

func (m *NodeMap) AddNode(keys ...string) {
   for _, key := range keys {
      if key == "" {
         continue
      }
      hash := int(m.hashFunc([]byte(key)))
      m.nodeHashs = append(m.nodeHashs, hash)
      m.nodehashMap[hash] = key
   }
   sort.Ints(m.nodeHashs)
}

func (m *NodeMap) PickNode(key string) string {
   if m.IsEmpty() {
      return ""
   }

   hash := int(m.hashFunc([]byte(key)))

   
   idx := sort.Search(len(m.nodeHashs), func(i int) bool {
      return m.nodeHashs[i] >= hash
   })

   
   if idx == len(m.nodeHashs) {
      idx = 0
   }

   return m.nodehashMap[m.nodeHashs[idx]]
}

HashFunc:hash函式定義,Go的hash函式就是這樣定義的
NodeMap:儲存所有節點和節點的hash

  • nodeHashs:各個節點的hash值,順序的
  • nodehashMap<hash, 節點>

AddNode:新增節點到一致性雜湊中
PickNode:選擇節點。使用二分查詢,如果hash比nodeHashs中最大的hash還要大,idx=0

database/standalone_database.go

type StandaloneDatabase struct {
   dbSet []*DB
   aofHandler *aof.AofHandler
}

func NewStandaloneDatabase() *StandaloneDatabase {
  ......
}

把database/database改名為database/standalone_database,再增加一個cluster_database用於對key的路由

resp/client/client.go

// Client is a pipeline mode redis client
type Client struct {
   conn        net.Conn
   pendingReqs chan *request // wait to send
   waitingReqs chan *request // waiting response
   ticker      *time.Ticker
   addr        string

   working *sync.WaitGroup // its counter presents unfinished requests(pending and waiting)
}

// request is a message sends to redis server
type request struct {
   id        uint64
   args      [][]byte
   reply     resp.Reply
   heartbeat bool
   waiting   *wait.Wait
   err       error
}

const (
   chanSize = 256
   maxWait  = 3 * time.Second
)

// MakeClient creates a new client
func MakeClient(addr string) (*Client, error) {
   conn, err := net.Dial("tcp", addr)
   if err != nil {
      return nil, err
   }
   return &Client{
      addr:        addr,
      conn:        conn,
      pendingReqs: make(chan *request, chanSize),
      waitingReqs: make(chan *request, chanSize),
      working:     &sync.WaitGroup{},
   }, nil
}

// Start starts asynchronous goroutines
func (client *Client) Start() {
   client.ticker = time.NewTicker(10 * time.Second)
   go client.handleWrite()
   go func() {
      err := client.handleRead()
      if err != nil {
         logger.Error(err)
      }
   }()
   go client.heartbeat()
}

// Close stops asynchronous goroutines and close connection
func (client *Client) Close() {
   client.ticker.Stop()
   // stop new request
   close(client.pendingReqs)

   // wait stop process
   client.working.Wait()

   // clean
   _ = client.conn.Close()
   close(client.waitingReqs)
}

func (client *Client) handleConnectionError(err error) error {
   err1 := client.conn.Close()
   if err1 != nil {
      if opErr, ok := err1.(*net.OpError); ok {
         if opErr.Err.Error() != "use of closed network connection" {
            return err1
         }
      } else {
         return err1
      }
   }
   conn, err1 := net.Dial("tcp", client.addr)
   if err1 != nil {
      logger.Error(err1)
      return err1
   }
   client.conn = conn
   go func() {
      _ = client.handleRead()
   }()
   return nil
}

func (client *Client) heartbeat() {
   for range client.ticker.C {
      client.doHeartbeat()
   }
}

func (client *Client) handleWrite() {
   for req := range client.pendingReqs {
      client.doRequest(req)
   }
}

// Send sends a request to redis server
func (client *Client) Send(args [][]byte) resp.Reply {
   request := &request{
      args:      args,
      heartbeat: false,
      waiting:   &wait.Wait{},
   }
   request.waiting.Add(1)
   client.working.Add(1)
   defer client.working.Done()
   client.pendingReqs <- request
   timeout := request.waiting.WaitWithTimeout(maxWait)
   if timeout {
      return reply.MakeErrReply("server time out")
   }
   if request.err != nil {
      return reply.MakeErrReply("request failed")
   }
   return request.reply
}

func (client *Client) doHeartbeat() {
   request := &request{
      args:      [][]byte{[]byte("PING")},
      heartbeat: true,
      waiting:   &wait.Wait{},
   }
   request.waiting.Add(1)
   client.working.Add(1)
   defer client.working.Done()
   client.pendingReqs <- request
   request.waiting.WaitWithTimeout(maxWait)
}

func (client *Client) doRequest(req *request) {
   if req == nil || len(req.args) == 0 {
      return
   }
   re := reply.MakeMultiBulkReply(req.args)
   bytes := re.ToBytes()
   _, err := client.conn.Write(bytes)
   i := 0
   for err != nil && i < 3 {
      err = client.handleConnectionError(err)
      if err == nil {
         _, err = client.conn.Write(bytes)
      }
      i++
   }
   if err == nil {
      client.waitingReqs <- req
   } else {
      req.err = err
      req.waiting.Done()
   }
}

func (client *Client) finishRequest(reply resp.Reply) {
   defer func() {
      if err := recover(); err != nil {
         debug.PrintStack()
         logger.Error(err)
      }
   }()
   request := <-client.waitingReqs
   if request == nil {
      return
   }
   request.reply = reply
   if request.waiting != nil {
      request.waiting.Done()
   }
}

func (client *Client) handleRead() error {
   ch := parser.ParseStream(client.conn)
   for payload := range ch {
      if payload.Err != nil {
         client.finishRequest(reply.MakeErrReply(payload.Err.Error()))
         continue
      }
      client.finishRequest(payload.Data)
   }
   return nil
}

client:Redis客戶端,具體看:https://www.cnblogs.com/Finley/p/14028402.html

go.mod

require github.com/jolestar/go-commons-pool/v2 v2.1.2

key的轉發需要當前節點儲存其他節點的連線,互相作為客戶端,使用連線池將其他連線池化

cluster/client_pool.go

type connectionFactory struct {
   Peer string // 連線地址
}

func (f *connectionFactory) MakeObject(ctx context.Context) (*pool.PooledObject, error) {
   c, err := client.MakeClient(f.Peer)
   if err != nil {
      return nil, err
   }
   c.Start()
   return pool.NewPooledObject(c), nil
}

func (f *connectionFactory) DestroyObject(ctx context.Context, object *pool.PooledObject) error {
   c, ok := object.Object.(*client.Client)
   if !ok {
      return errors.New("type mismatch")
   }
   c.Close()
   return nil
}

func (f *connectionFactory) ValidateObject(ctx context.Context, object *pool.PooledObject) bool {
   // do validate
   return true
}

func (f *connectionFactory) ActivateObject(ctx context.Context, object *pool.PooledObject) error {
   // do activate
   return nil
}

func (f *connectionFactory) PassivateObject(ctx context.Context, object *pool.PooledObject) error {
   // do passivate
   return nil
}

client_pool:使用連線池的NewObjectPoolWithDefaultConfig建立連線,需要實現PooledObjectFactory介面

redis.conf

self 127.0.0.1:6379
peers 127.0.0.1:6380

配置中寫自己和其他節點的地址

cluster/cluster_database.go

type clusterDatabase struct {
   self           string
   nodes          []string
   peerPicker     *consistenthash.NodeMap
   peerConnection map[string]*pool.ObjectPool
   db             databaseface.Database
}

func MakeClusterDatabase() *clusterDatabase {
   cluster := &clusterDatabase{
      self:           config.Properties.Self,
      db:             database.NewStandaloneDatabase(),
      peerPicker:     consistenthash.NewNodeMap(nil),
      peerConnection: make(map[string]*pool.ObjectPool),
   }
   nodes := make([]string, 0, len(config.Properties.Peers)+1)
   for _, peer := range config.Properties.Peers {
      nodes = append(nodes, peer)
   }
   nodes = append(nodes, config.Properties.Self)
   cluster.peerPicker.AddNode(nodes...)
   ctx := context.Background()
   for _, peer := range config.Properties.Peers {
      cluster.peerConnection[peer] = pool.NewObjectPoolWithDefaultConfig(ctx, &connectionFactory{
         Peer: peer,
      })
   }
   cluster.nodes = nodes
   return cluster
}

func (cluster *clusterDatabase) Close() {
	cluster.db.Close()
}

func (cluster *ClusterDatabase) AfterClientClose(c resp.Connection) {
	cluster.db.AfterClientClose(c)
}

type CmdFunc func(cluster *clusterDatabase, c resp.Connection, cmdAndArgs [][]byte) resp.Reply

cluster_database用於對key的路由
clusterDatabase:
nodes:所有節點
peerPicker :節點的新增和選擇
peerConnection:Map<node, 連線池>
db:單機database
CmdFunc:表示Redis的指令型別

cluster/com.go

func (cluster *clusterDatabase) getPeerClient(peer string) (*client.Client, error) {
   factory, ok := cluster.peerConnection[peer]
   if !ok {
      return nil, errors.New("connection factory not found")
   }
   raw, err := factory.BorrowObject(context.Background())
   if err != nil {
      return nil, err
   }
   conn, ok := raw.(*client.Client)
   if !ok {
      return nil, errors.New("connection factory make wrong type")
   }
   return conn, nil
}

func (cluster *clusterDatabase) returnPeerClient(peer string, peerClient *client.Client) error {
   connectionFactory, ok := cluster.peerConnection[peer]
   if !ok {
      return errors.New("connection factory not found")
   }
   return connectionFactory.ReturnObject(context.Background(), peerClient)
}

func (cluster *clusterDatabase) relay(peer string, c resp.Connection, args [][]byte) resp.Reply {
   if peer == cluster.self {
      return cluster.db.Exec(c, args)
   }
   peerClient, err := cluster.getPeerClient(peer)
   if err != nil {
      return reply.MakeErrReply(err.Error())
   }
   defer func() {
      _ = cluster.returnPeerClient(peer, peerClient)
   }()
   peerClient.Send(utils.ToCmdLine("SELECT", strconv.Itoa(c.GetDBIndex())))
   return peerClient.Send(args)
}

func (cluster *clusterDatabase) broadcast(c resp.Connection, args [][]byte) map[string]resp.Reply {
   result := make(map[string]resp.Reply)
   for _, node := range cluster.nodes {
      relay := cluster.relay(node, c, args)
      result[node] = relay
   }
   return result
}

communication:與其他節點通訊。執行模式有本地(自己執行),轉發(別人執行),群發(所有節點執行)
getPeerClient :從連線池拿一個連線
returnPeerClient :歸還連線
relay :轉發指令給其他客戶端,傳送指令之前需要先發一下選擇的db
broadcast :指令廣播給所有節點

cluster/router.go

func makeRouter() map[string]CmdFunc {
    routerMap := make(map[string]CmdFunc)
    routerMap["ping"] = ping
    routerMap["del"] = Del
    routerMap["exists"] = defaultFunc
    routerMap["type"] = defaultFunc
    routerMap["rename"] = Rename
    routerMap["renamenx"] = Rename
    routerMap["set"] = defaultFunc
    routerMap["setnx"] = defaultFunc
    routerMap["get"] = defaultFunc
    routerMap["getset"] = defaultFunc
    routerMap["flushdb"] = FlushDB
	routerMap["select"] = execSelect
    return routerMap
}

func defaultFunc(cluster *clusterDatabase, c resp.Connection, args [][]byte) resp.Reply {
    key := string(args[1])
    peer := cluster.peerPicker.PickNode(key)
    return cluster.relay(peer, c, args)
}

defaultFunc:轉發指令的預設實現

cluster/ping.go

func ping(cluster *clusterDatabase, c resp.Connection, cmdAndArgs [][]byte) resp.Reply {
   return cluster.db.Exec(c, cmdAndArgs)
}

cluster/rename.go

func Rename(cluster *clusterDatabase, c resp.Connection, args [][]byte) resp.Reply {
   if len(args) != 3 {
      return reply.MakeErrReply("ERR wrong number of arguments for 'rename' command")
   }
   src := string(args[1])
   dest := string(args[2])

   srcPeer := cluster.peerPicker.PickNode(src)
   destPeer := cluster.peerPicker.PickNode(dest)

   if srcPeer != destPeer {
      return reply.MakeErrReply("ERR rename must within one slot in cluster mode")
   }
   return cluster.relay(srcPeer, c, args)
}

Rename:修改key的name,兩個key的hash必須在同一個節點中

cluster/keys.go

func FlushDB(cluster *clusterDatabase, c resp.Connection, args [][]byte) resp.Reply {
   replies := cluster.broadcast(c, args)
   var errReply reply.ErrorReply
   for _, v := range replies {
      if reply.IsErrorReply(v) {
         errReply = v.(reply.ErrorReply)
         break
      }
   }
   if errReply == nil {
      return &reply.OkReply{}
   }
   return reply.MakeErrReply("error occurs: " + errReply.Error())
}

cluster/del.go

func Del(cluster *clusterDatabase, c resp.Connection, args [][]byte) resp.Reply {
   replies := cluster.broadcast(c, args)
   var errReply reply.ErrorReply
   var deleted int64 = 0
   for _, v := range replies {
      if reply.IsErrorReply(v) {
         errReply = v.(reply.ErrorReply)
         break
      }
      intReply, ok := v.(*reply.IntReply)
      if !ok {
         errReply = reply.MakeErrReply("error")
      }
      deleted += intReply.Code
   }

   if errReply == nil {
      return reply.MakeIntReply(deleted)
   }
   return reply.MakeErrReply("error occurs: " + errReply.Error())
}

cluster/select.go

func execSelect(cluster *clusterDatabase, c resp.Connection, cmdAndArgs [][]byte) resp.Reply {
   return cluster.db.Exec(c, cmdAndArgs)
}

cluster/cluster_database.go

var router = makeRouter()

func (cluster *clusterDatabase) Exec(c resp.Connection, cmdLine [][]byte) (result resp.Reply) {
   defer func() {
      if err := recover(); err != nil {
         logger.Warn(fmt.Sprintf("error occurs: %v\n%s", err, string(debug.Stack())))
         result = &reply.UnknownErrReply{}
      }
   }()
   cmdName := strings.ToLower(string(cmdLine[0]))
   cmdFunc, ok := router[cmdName]
   if !ok {
      return reply.MakeErrReply("ERR unknown command '" + cmdName + "', or not supported in cluster mode")
   }
   result = cmdFunc(cluster, c, cmdLine)
   return
}

resp/handler/handler.go

func MakeHandler() *RespHandler {
   var db databaseface.Database
   if config.Properties.Self != "" && len(config.Properties.Peers) > 0 {
      db = cluster.MakeClusterDatabase()
   } else {
      db = database.NewStandaloneDatabase()
   }
   return &RespHandler{
      db: db,
   }
}

MakeHandler:判斷是單機還是叢集

測試

先go build,開啟專案資料夾找到exe檔案,把exe檔案和redis.conf放到一個資料夾裡,redis.conf改成如下,然後啟動exe檔案。再回到GoLand啟動第二個節點6379。

bind 0.0.0.0
port 6380

appendonly yes
appendfilename appendonly.aof

self 127.0.0.1:6380
peers 127.0.0.1:6379

相關文章