轉載自:etcd實現分散式鎖
當併發的訪問共享資源的時候,如果沒有加鎖的話,無法保證共享資源安全性和正確性。這個時候就需要用到鎖
1、需要具備的特性
- 需要保證互斥訪問(分散式環境需要保證不同節點、不同執行緒的互斥訪問)
- 需要有超時機制,防止鎖意外未釋放,其他節點無法獲取到鎖;也要保證任務能夠正常執行完成,不能超時了任務還沒結束,導致任務執行一般被釋放鎖
- 需要有阻塞和非阻塞兩種請求鎖的介面
2、本地鎖
當業務執行在同一個執行緒內,也就是我初始化一個本地鎖,其他請求也認這把鎖。一般是服務部署在單機環境下。
我們可以看下下面的例子,開1000個goroutine併發的給Counter做自增操作,結果會是什麼樣的呢?
package main
import (
"fmt"
"sync"
)
var sg sync.WaitGroup
type Counter struct {
count int
}
// 自增操作
func (m *Counter) Incr() {
m.count++
}
// 獲取總數
func (m *Counter) Count() int {
return m.count
}
func main() {
c := &Counter{}
for i := 0; i < 1000; i++ {
sg.Add(1)
// 模擬併發請求
go func() {
c.Incr()
sg.Done()
}()
}
sg.Wait()
fmt.Println(c.Count())
}
結果是count的數量並不是預想中的1000,而是下面這樣,每次列印出的結果都不一樣,但是接近1000
user@userdeMacBook-Pro ~/go/src/go-demo/mutex go run main.go
953
user@userdeMacBook-Pro ~/go/src/go-demo/mutex go run main.go
982
user@userdeMacBook-Pro ~/go/src/go-demo/mutex go run main.go
984
出現這個問題的原因就是沒有給自增操作加鎖
下面我們修改程式碼如下,在Incr中加上go的mutex互斥鎖
package main
import (
"fmt"
"sync"
)
var sg sync.WaitGroup
type Counter struct {
count int
mu sync.Mutex
}
func (m *Counter) Incr() {
// 每次寫之前先加鎖,寫完之後釋放鎖
m.mu.Lock()
defer m.mu.Unlock()
m.count++
}
func (m *Counter) Count() int {
return m.count
}
func main() {
c := &Counter{}
for i := 0; i < 1000; i++ {
sg.Add(1)
go func() {
c.Incr()
sg.Done()
}()
}
sg.Wait()
fmt.Println(c.Count())
}
可以看到現在count正常輸出1000了
user@userdeMacBook-Pro ~/go/src/go-demo/mutex go run main.go
1000
user@userdeMacBook-Pro ~/go/src/go-demo/mutex go run main.go
1000
user@userdeMacBook-Pro ~/go/src/go-demo/mutex go run main.go
1000
3、etcd分散式鎖
簡單部署一個etcd叢集
├── docker-compose.yml
├── etcd
│ └── Dockerfile
Dockerfile檔案內容
FROM bitnami/etcd:latest
LABEL maintainer="liuyuede123 <liufutianoppo@163.com>"
Docker-compose.yml內容
version: '3.5'
# 網路配置
networks:
backend:
driver: bridge
# 服務容器配置
services:
etcd1: # 自定義容器名稱
build:
context: etcd # 指定構建使用的 Dockerfile 檔案
environment:
- TZ=Asia/Shanghai
- ALLOW_NONE_AUTHENTICATION=yes
- ETCD_NAME=etcd1
- ETCD_INITIAL_ADVERTISE_PEER_URLS=http://etcd1:2380
- ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380
- ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379
- ETCD_ADVERTISE_CLIENT_URLS=http://etcd1:2379
- ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster
- ETCD_INITIAL_CLUSTER=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380
- ETCD_INITIAL_CLUSTER_STATE=new
ports: # 設定埠對映
- "12379:2379"
- "12380:2380"
networks:
- backend
restart: always
etcd2: # 自定義容器名稱
build:
context: etcd # 指定構建使用的 Dockerfile 檔案
environment:
- TZ=Asia/Shanghai
- ALLOW_NONE_AUTHENTICATION=yes
- ETCD_NAME=etcd2
- ETCD_INITIAL_ADVERTISE_PEER_URLS=http://etcd2:2380
- ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380
- ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379
- ETCD_ADVERTISE_CLIENT_URLS=http://etcd2:2379
- ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster
- ETCD_INITIAL_CLUSTER=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380
- ETCD_INITIAL_CLUSTER_STATE=new
ports: # 設定埠對映
- "22379:2379"
- "22380:2380"
networks:
- backend
restart: always
etcd3: # 自定義容器名稱
build:
context: etcd # 指定構建使用的 Dockerfile 檔案
environment:
- TZ=Asia/Shanghai
- ALLOW_NONE_AUTHENTICATION=yes
- ETCD_NAME=etcd3
- ETCD_INITIAL_ADVERTISE_PEER_URLS=http://etcd3:2380
- ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380
- ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379
- ETCD_ADVERTISE_CLIENT_URLS=http://etcd3:2379
- ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster
- ETCD_INITIAL_CLUSTER=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380
- ETCD_INITIAL_CLUSTER_STATE=new
ports: # 設定埠對映
- "32379:2379"
- "32380:2380"
networks:
- backend
restart: always
執行docker-compose up -d
啟動etcd服務,可以看到docker中已經啟動了3個服務
實現互斥訪問
package main
import (
"fmt"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
"sync"
)
var sg sync.WaitGroup
type Counter struct {
count int
}
func (m *Counter) Incr() {
m.count++
}
func (m *Counter) Count() int {
return m.count
}
func main() {
endpoints := []string{"http://127.0.0.1:12379", "http://127.0.0.1:22379", "http://127.0.0.1:32379"}
// 初始化etcd客戶端
client, err := clientv3.New(clientv3.Config{Endpoints: endpoints})
if err != nil {
fmt.Println(err)
return
}
defer client.Close()
counter := &Counter{}
sg.Add(100)
for i := 0; i < 100; i++ {
go func() {
// 這裡會生成租約,預設是60秒
session, err := concurrency.NewSession(client)
if err != nil {
panic(err)
}
defer session.Close()
locker := concurrency.NewLocker(session, "/my-test-lock")
locker.Lock()
counter.Incr()
locker.Unlock()
sg.Done()
}()
}
sg.Wait()
fmt.Println("count:", counter.Count())
}
執行結果:
user@userdeMacBook-Pro ~/go/src/go-demo/mutex go run main.go
count: 100
user@userdeMacBook-Pro ~/go/src/go-demo/mutex go run main.go
count: 100
user@userdeMacBook-Pro ~/go/src/go-demo/mutex go run main.go
count: 100
實現超時機制
當某個客戶端持有鎖時,由於某些原因導致鎖未釋放,就會導致這個客戶端一直持有這把鎖,其他客戶端一直獲取不到鎖。所以需要分散式鎖實現超時機制,當鎖未釋放時,會因為etcd的租約會到期而釋放鎖。當業務正常處理時,租約到期之前會繼續續約,知道業務處理完畢釋放鎖。
package main
import (
"fmt"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
"sync"
"time"
)
var sg sync.WaitGroup
type Counter struct {
count int
}
func (m *Counter) Incr() {
m.count++
}
func (m *Counter) Count() int {
return m.count
}
func main() {
endpoints := []string{"http://127.0.0.1:12379", "http://127.0.0.1:22379", "http://127.0.0.1:32379"}
client, err := clientv3.New(clientv3.Config{Endpoints: endpoints})
if err != nil {
fmt.Println(err)
return
}
defer client.Close()
counter := &Counter{}
session, err := concurrency.NewSession(client)
if err != nil {
panic(err)
}
defer session.Close()
locker := concurrency.NewLocker(session, "/my-test-lock")
fmt.Println("locking...", time.Now().Format("2006-01-02 15:04:05"))
locker.Lock()
fmt.Println("locked...", time.Now().Format("2006-01-02 15:04:05"))
// 模擬業務
time.Sleep(100 * time.Second)
counter.Incr()
locker.Unlock()
fmt.Println("released...", time.Now().Format("2006-01-02 15:04:05"))
fmt.Println("count:", counter.Count())
}
命令列開2個視窗,第一個視窗執行程式並獲取鎖,之後模擬意外退出並沒有呼叫unlock方法
go run main.go
locking... 2022-09-03 23:41:48 # 租約生成時間
locked... 2022-09-03 23:41:48
^Csignal: interrupt
第二個視窗,在第一個視窗退出之前嘗試獲取鎖,此時是阻塞狀態。第一個視窗退出之後由於租約還沒到期,第二個視窗還是獲取鎖的狀態。等到第一個視窗租約到期(預設60秒),第二個獲取鎖成功
locking... 2022-09-03 23:41:52
locked... 2022-09-03 23:42:48 # 第一個租約60秒到期,獲取鎖成功
released... 2022-09-03 23:44:28
count: 1
實現阻塞和非阻塞介面
上面的例子中已經實現了阻塞介面,即當前有獲取到鎖的請求,則其他請求阻塞等待鎖釋放
非阻塞的方式就是嘗試獲取鎖,如果失敗立即返回。etcd中是實現了tryLock方法
// TryLock locks the mutex if not already locked by another session.
// If lock is held by another session, return immediately after attempting necessary cleanup
// The ctx argument is used for the sending/receiving Txn RPC.
func (m *Mutex) TryLock(ctx context.Context) error {
具體看下面的例子
package main
import (
"context"
"fmt"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
"sync"
"time"
)
var sg sync.WaitGroup
type Counter struct {
count int
}
func (m *Counter) Incr() {
m.count++
}
func (m *Counter) Count() int {
return m.count
}
func main() {
endpoints := []string{"http://127.0.0.1:12379", "http://127.0.0.1:22379", "http://127.0.0.1:32379"}
client, err := clientv3.New(clientv3.Config{Endpoints: endpoints})
if err != nil {
fmt.Println(err)
return
}
defer client.Close()
counter := &Counter{}
session, err := concurrency.NewSession(client)
if err != nil {
panic(err)
}
defer session.Close()
// 此處使用newMutex初始化
locker := concurrency.NewMutex(session, "/my-test-lock")
fmt.Println("locking...", time.Now().Format("2006-01-02 15:04:05"))
err = locker.TryLock(context.Background())
// 獲取鎖失敗就拋錯
if err != nil {
fmt.Println("lock failed", err)
return
}
fmt.Println("locked...", time.Now().Format("2006-01-02 15:04:05"))
time.Sleep(100 * time.Second)
counter.Incr()
err = locker.Unlock(context.Background())
if err != nil {
fmt.Println("unlock failed", err)
return
}
fmt.Println("released...", time.Now().Format("2006-01-02 15:04:05"))
fmt.Println("count:", counter.Count())
}
視窗1、視窗2執行結果
go run main.go
locking... 2022-09-04 00:00:21
locked... 2022-09-04 00:00:21
released... 2022-09-04 00:02:01
count: 1
go run main.go
locking... 2022-09-04 00:00:27
lock failed mutex: Locked by another session
本作品採用《CC 協議》,轉載必須註明作者和本文連結