redis.cluster/memcached.cluster/wmware esxi

小短腿電工發表於2020-07-03

1. 安裝配置redis的cluster 叢集

redis 叢集高可用

實驗環境

192.168.198.131 openvpn-server #42-Ubuntu SMP Mon Jun 8 14:14:24 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
192.168.198.132 openvpn-node1 #42-Ubuntu SMP Mon Jun 8 14:14:24 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
192.168.198.133 openvpn-node2 #42-Ubuntu SMP Mon Jun 8 14:14:24 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
192.168.198.134 openvpn-node3 #42-Ubuntu SMP Mon Jun 8 14:14:24 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux

redis 安裝

root@openvpn-server:~# apt list redis  #主節點倆張網路卡主網路卡可以上網
正在列表... 完成
redis/focal,now 5:5.0.7-2 all [已安裝]
#apt-get dowload redis <node節點提示缺啥包下載啥包>
#其他節點只有內網網路卡這裡本地安裝
root@openvpn-node1:/opt# ls
libhiredis0.14_0.14.0-6_amd64.deb      liblua5.1-0_5.1.5-8.1build4_amd64.deb  offlinePackage.tar.gz      redis-server_5%3a5.0.7-2_amd64.deb
libjemalloc2_5.2.1-1ubuntu1_amd64.deb  lua-bitop_1.0.2-5_amd64.deb            redis_5%3a5.0.7-2_all.deb  redis-tools_5%3a5.0.7-2_amd64.deb

建立叢集

#allnode
mkdir -p /var/lib/redis/redis6379
mkdir -p /var/lib/redis/redis6380
chown redis.redis /var/lib/redis/ -R

#openvpn-server
root@openvpn-server:~#  cat /etc/redis/redis.conf | grep -v "#" | grep -v ";" | grep -v "^$"
bind 192.168.198.131 ::1
protected-mode no
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis/redis-server6379.pid
loglevel notice
logfile /var/log/redis/redis-server6379.log
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error no
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /var/lib/redis/redis6379
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
replica-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
root@openvpn-server:~# cat /etc/redis/redis6380.conf | grep -v "#" | grep -v ";" | grep -v "^$"
bind 192.168.198.131 ::1
protected-mode no
port 6380
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis/redis-server6380.pid
loglevel notice
logfile /var/log/redis/redis-server6380.log
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error no
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /var/lib/redis/redis6380
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
replica-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
appendonly yes
appendfilename "appendonly6390.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
#openvpn-nodeall
	#配置檔案修改bind
	redis-server /etc/redis/redis6379.conf 
	redis-server /etc/redis/redis6380.conf 
	#驗證程式啟動

#修改所有配置檔案追加以下配置 重新啟動叢集
去除註釋    requirepass 123123
去除註釋    cluster-enabled yes        
去除註釋    cluster-config-file nodes-6379.conf
去除註釋    masterauth 123123
關閉保護模式 protected-mode no 
    redis-server /etc/redis/redis6379.conf 
	redis-server /etc/redis/redis6380.conf 
	
#新增node 到叢集
redis-cli -a 123123 --cluster create 192.168.198.131:6379 192.168.198.131:6380 192.168.198.132:6379 192.168.198.132:6380 192.168.198.133:6379 192.168.198.133:6380 --cluster-replicas 1

# #提示這個報錯需要清除key
root@openvpn-server:~# redis-cli -a 123123 --cluster create 192.168.198.131:6379 192.168.198.131:6380 192.168.198.132:6379 192.168.198.132:6380 192.168.198.133:6379 192.168.198.133:6380 --cluster-replicas 1
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
[ERR] Node 192.168.198.131:6380 is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0.
# 這個時候需要確認是否配置叢集模式如果CTRL+c 叢集初始化就會失敗,需要刪除備份檔案重新初始化

驗證叢集

root@openvpn-node1:~# redis-cli -h 192.168.198.132
192.168.198.132:6379> auth 123123
OK
192.168.198.132:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:3
cluster_stats_messages_ping_sent:161
cluster_stats_messages_pong_sent:165
cluster_stats_messages_meet_sent:1
cluster_stats_messages_sent:327
cluster_stats_messages_ping_received:161
cluster_stats_messages_pong_received:162
cluster_stats_messages_meet_received:4
cluster_stats_messages_received:327

192.168.198.132:6379> info Replication
# Replication
role:master
connected_slaves:1
slave0:ip=192.168.198.133,port=6380,state=online,offset=504,lag=0
master_replid:f990cc3e2c1e6464daf5a68f559290e9ba5ac316
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:504
second_repl_offset:-1
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:1
repl_backlog_histlen:504


動態新增節點

新增新的主節點

root@openvpn-server:~# redis-cli -a 123123 --cluster add-node  192.168.198.134:6379 192.168.198.131:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
>>> Adding node 192.168.198.134:6379 to cluster 192.168.198.134:6380
>>> Performing Cluster Check (using node 192.168.198.134:6380)
M: ac337b65315bec4f20ede868cb0ef510daee6a4c 192.168.198.134:6380
   slots:[0-16383] (16384 slots) master
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 192.168.198.134:6379 to make it join the cluster.
[OK] New node added correctly.

#如果新增叢集出現插槽分配不了就需要做修復
# redis-cli -a 123123 --cluster add-node 192.168.198.134:6379 192.168.198.131:6379 
>>> Adding node 192.168.198.134:6379 to cluster 192.168.198.134:6380
>>> Performing Cluster Check (using node 192.168.198.134:6379)
M: c0767666bee76e5e0dc67f24031a3e1b574235cc 192.168.198.134:6379
   slots: (0 slots) master
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[ERR] Not all 16384 slots are covered by nodes.

redis-cli -a 123123 --cluster fix 192.168.198.134:6379 
redis-cli -a 123123 --cluster fix 192.168.198.134:6380

#執行叢集新增操作
redis-cli -a 123123 --cluster add-node 192.168.198.134:6379 192.168.198.131:6379

#新增叢集檢測
root@openvpn-server:~# redis-cli -a 123123 --cluster check 192.168.198.131:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
192.168.198.134:6379 (0495462e...) -> 0 keys | 16384 slots | 1 slaves.
[OK] 0 keys in 1 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.198.134:6379)
M: 0495462ec6b3ba0a8148e1d278f8e1480f07fd80 192.168.198.134:6379
   slots:[0-16383] (16384 slots) master
   1 additional replica(s)
S: ac337b65315bec4f20ede868cb0ef510daee6a4c 192.168.198.134:6380
   slots: (0 slots) slave
   replicates 0495462ec6b3ba0a8148e1d278f8e1480f07fd80
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

#給新新增的node分配slot
 redis-cli -a 123123 --cluster check 192.168.198.131:6379 #通過check查詢nodeid 
 redis-cli -a 123123 --cluster reshard 192.168.198.134:6379 #按提示輸入接收的slot插槽數和nodeid
 
 
 #驗證插槽分配
 root@openvpn-server:~# redis-cli -a 123123 --cluster check 192.168.198.131:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
192.168.198.131:6379 (8e67a61f...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.133:6379 (6cee8692...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.134:6379 (d22fe1b5...) -> 0 keys | 4096 slots | 0 slaves.
192.168.198.132:6379 (87492444...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 0 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.198.131:6379)
M: 8e67a61f686172b3102004ba6ad5654e52334673 192.168.198.131:6379
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
S: 7f4c6e84372f6da44c2da7596f075343cf08d2ae 192.168.198.131:6380
   slots: (0 slots) slave
   replicates 6cee8692e8a6af3d5e36022eaf894d04dae6e994
S: 959e1a09f86b444cc53386ec3fa5df18b913fbc7 192.168.198.133:6380
   slots: (0 slots) slave
   replicates 87492444a2400f19ab26446a6f7eb7acc07aea39
S: 16ead4bf4b15746e4c5ba8666b1b83d41c46a1d9 192.168.198.132:6380
   slots: (0 slots) slave
   replicates 8e67a61f686172b3102004ba6ad5654e52334673
M: 6cee8692e8a6af3d5e36022eaf894d04dae6e994 192.168.198.133:6379
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
M: d22fe1b5fc43a52d5e1e9b6908a82c4649fddf37 192.168.198.134:6379
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
M: 87492444a2400f19ab26446a6f7eb7acc07aea39 192.168.198.132:6379
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

新增從節點

新增新的從節點(兩種方式)
redis-cli --cluster add-node  172.16.1.10:7008 172.16.1.10:7007 --cluster-slave   (不指定主節點)
redis-cli --cluster add-node  172.16.1.10:7008 172.16.1.10:7007 --cluster-slave --cluster-master-id 54fd24815e6e95f96201ce387ba6e31cb18f40e9

驗證
redis-cli --cluster check 192.168.198.131:6379

重新分片
redis-cli --cluster reshard 192.168.198.131:6379

刪除從節點
redis-cli -a 123123 --cluster del-node 192.168.198.132:6380 847d164cac6bde073b097de40e4897b24b8b665b

刪除主節點(需要把主節點得hash槽移動到其他主節點上)
redis-cli --cluster reshard 192.168.198.131:6379
redis-cli --cluster del-node 192.168.198.13x:6379 `節點id`

動態刪除節點

遷移mster槽位到其他節點

#檢視槽位資訊
root@openvpn-node3:/etc/redis# redis-cli -a 123123 --cluster check 192.168.198.131:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
192.168.198.131:6379 (8e67a61f...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.133:6379 (6cee8692...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.134:6379 (d22fe1b5...) -> 0 keys | 4096 slots | 0 slaves.
192.168.198.132:6379 (87492444...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 0 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.198.131:6379)
M: 8e67a61f686172b3102004ba6ad5654e52334673 192.168.198.131:6379
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
S: 7f4c6e84372f6da44c2da7596f075343cf08d2ae 192.168.198.131:6380
   slots: (0 slots) slave
   replicates 6cee8692e8a6af3d5e36022eaf894d04dae6e994
S: 959e1a09f86b444cc53386ec3fa5df18b913fbc7 192.168.198.133:6380
   slots: (0 slots) slave
   replicates 87492444a2400f19ab26446a6f7eb7acc07aea39
S: 16ead4bf4b15746e4c5ba8666b1b83d41c46a1d9 192.168.198.132:6380
   slots: (0 slots) slave
   replicates 8e67a61f686172b3102004ba6ad5654e52334673
M: 6cee8692e8a6af3d5e36022eaf894d04dae6e994 192.168.198.133:6379
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
M: d22fe1b5fc43a52d5e1e9b6908a82c4649fddf37 192.168.198.134:6379
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
M: 87492444a2400f19ab26446a6f7eb7acc07aea39 192.168.198.132:6379
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
#接下來將 192.168.198.132的槽位資訊轉移到192.168.198.131 節點
redis-cli -a 123123 --cluster reshard 192.168.198.131:6379
	#重新分配槽位 多少個:4096
	>>> Check slots coverage... [OK] All 16384 slots covered. 
	How many slots do you want to move (from 1 to 16384)? 4096 #遷移master上的多少個槽位
	#分配給誰:192.168.198.131 nodeid
	What is the receiving node ID? 8e67a61f686172b3102004ba6ad5654e52334673 #接收槽位的伺服器ID 
	Please enter all the source node IDs. Type 'all' to use all the nodes as source nodes for the hash slots. Type 'done' once you entered all the source nodes IDs.
	#分配誰的:192.168.192.132 nodeid
	Source node #1: 87492444a2400f19ab26446a6f7eb7acc07aea39 #從哪個伺服器遷移4096個槽位 
	Source node #2: done #寫done,表示沒有其他master了 
	Moving slot 5457 Do you want to proceed with the proposed reshard plan (yes/no)? yes #是否繼續

驗證槽位遷移完成

root@openvpn-server:~# redis-cli -a 123123 --cluster check 192.168.198.131:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
192.168.198.131:6379 (8e67a61f...) -> 0 keys | 8192 slots | 1 slaves.
192.168.198.133:6379 (6cee8692...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.134:6379 (d22fe1b5...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.132:6379 (87492444...) -> 0 keys | 0 slots | 0 slaves.
[OK] 0 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.198.131:6379)
M: 8e67a61f686172b3102004ba6ad5654e52334673 192.168.198.131:6379
   slots:[1365-5460],[6827-10922] (8192 slots) master
   1 additional replica(s)
S: 7f4c6e84372f6da44c2da7596f075343cf08d2ae 192.168.198.131:6380
   slots: (0 slots) slave
   replicates 6cee8692e8a6af3d5e36022eaf894d04dae6e994
S: 959e1a09f86b444cc53386ec3fa5df18b913fbc7 192.168.198.133:6380
   slots: (0 slots) slave
   replicates 8e67a61f686172b3102004ba6ad5654e52334673
S: 16ead4bf4b15746e4c5ba8666b1b83d41c46a1d9 192.168.198.132:6380
   slots: (0 slots) slave
   replicates d22fe1b5fc43a52d5e1e9b6908a82c4649fddf37
M: 6cee8692e8a6af3d5e36022eaf894d04dae6e994 192.168.198.133:6379
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
M: d22fe1b5fc43a52d5e1e9b6908a82c4649fddf37 192.168.198.134:6379
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
   1 additional replica(s)
M: 87492444a2400f19ab26446a6f7eb7acc07aea39 192.168.198.132:6379
   slots: (0 slots) master  #這裡插槽就沒了
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

從叢集刪除伺服器

root@openvpn-server:~# redis-cli -a 123123 --cluster del-node 192.168.198.132:6379 87492444a2400f19ab26446a6f7eb7acc07aea39
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
>>> Removing node 87492444a2400f19ab26446a6f7eb7acc07aea39 from cluster 192.168.198.132:6379
>>> Sending CLUSTER FORGET messages to the cluster...
>>> SHUTDOWN the node.


驗證node是否刪除

root@openvpn-server:~# redis-cli -a 123123 --cluster check 192.168.198.131:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
192.168.198.131:6379 (8e67a61f...) -> 0 keys | 8192 slots | 1 slaves.
192.168.198.133:6379 (6cee8692...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.134:6379 (d22fe1b5...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 0 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.198.131:6379)
M: 8e67a61f686172b3102004ba6ad5654e52334673 192.168.198.131:6379
   slots:[1365-5460],[6827-10922] (8192 slots) master
   1 additional replica(s)
S: 7f4c6e84372f6da44c2da7596f075343cf08d2ae 192.168.198.131:6380
   slots: (0 slots) slave
   replicates 6cee8692e8a6af3d5e36022eaf894d04dae6e994
S: 959e1a09f86b444cc53386ec3fa5df18b913fbc7 192.168.198.133:6380
   slots: (0 slots) slave
   replicates 8e67a61f686172b3102004ba6ad5654e52334673
S: 16ead4bf4b15746e4c5ba8666b1b83d41c46a1d9 192.168.198.132:6380
   slots: (0 slots) slave
   replicates d22fe1b5fc43a52d5e1e9b6908a82c4649fddf37
M: 6cee8692e8a6af3d5e36022eaf894d04dae6e994 192.168.198.133:6379
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
M: d22fe1b5fc43a52d5e1e9b6908a82c4649fddf37 192.168.198.134:6379
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
   1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.


模擬master 當機

redis-cli -c -h 192.168.198.131 -a 123123

kill -9 2121212 #主程式

2. 安裝配置memcached 高可用

memcache 本身沒有像 redis 所具備的資料持久化功能,比如 RDB 和 AOF 都沒有,但是可以通過做叢集同步的方式,讓各 memcache 伺服器的資料進行同步,從而實現資料的一致性,即保證各 memcache
的資料是一樣的,即使有任何一臺 memcache 發生故障,只要叢集種有一臺 memcache 可用就不會出現資料丟失,當其他 memcache 重新加入到叢集的時候可以自動從有資料的 memcache 當中自動獲取資料並提供服務。Memcache 藉助了作業系統的 libevent 工具做高效的讀寫。libevent 是個程式庫,它將 Linux 的 epoll、BSD 類作業系統的 kqueue 等事件處理功能封裝成統一的介面。即使對伺服器的連線數增加,也能發揮高效能。memcached 使用這個 libevent 庫,因此能在 Linux、BSD、Solaris 等作業系統上發揮其高效能。Memcache 支援最大的記憶體儲存物件為 1M,超過 1M 的資料可以使用客戶端壓縮或拆分報包放到多個key 中,比較大的資料在進行讀取的時候需要消耗的時間比較長,memcache 最適合儲存使用者的 session實現 session 共享,Memcached 儲存資料時, Memcached 會去申請1MB 的記憶體, 把該塊記憶體稱為一個slab, 也稱為一個 page。memcached具有多種語言的客戶端開發包,包括:Perl/PHP/JAVA/C/Python/Ruby/C#/

2.1 單機部署

2.1.1 yum 安裝與啟動

通過yum安裝相對簡單

# yum install memcached -y
# vim /etc/sysconfig/memcached
PORT="11211" #監聽埠
USER="memcached" #啟動使用者
MAXCONN="1024" #最大連線數
CACHESIZE="1024" #最大使用記憶體
OPTIONS="" #其他選項

image-20200703185520210

2.1.2 python 操作memcache

#!/usr/bin/env python
#coding:utf-8
#Author:Zhang ShiJie
import memcache
m = memcache.Client(['172.18.200.106:11211'], debug=True)
for i in range(100):
 m.set("key%d" % i,"v%d" % i)
 ret = m.get('key%d' % i)
 print ret
 

2.1.3 編譯安裝

# yum install libevent libevent-devel –y
# pwd
/usr/local/src
# tar xvf memcached-1.5.12.tar.gz
# ./configure --prefix=/usr/local/memcache
# make && make install

#啟動 memcached
# /usr/local/memcache/bin/memcached -u memcached -p 11211 -m 2048 -c 65536 &

2.2 memcached 叢集部署

2.2.1 基於magent 的部署架構

該部署方式依賴於 magent 實現高可用,應用端通過負載伺服器連線到 magent,然後再由 magent代理使用者應用請求到 memcached 處理,底層的 memcached 為雙主結構會自動同步資料,本部署方式存在 magent 單點問題因此需要兩個 magent 做高可用。

image-20200703185925386

2.2.2 repcached 實現原理

在 master 上可以通過 -X 指定 replication port,在 slave 上通過 -x/-X 找到 master 並 connect 上去,事實上,如果同時指定了 -x/-X, repcached 一定會嘗試連線,但如果連線失敗,它就會用 -X 引數來自己 listen(成為 master);如果 master壞掉,slave偵測到連線斷了,它會自動 listen而成為 master;而如果 slave 壞掉, master 也會偵測到連線斷,它就會重新 listen 等待新的 slave 加入。從這方案的技術實現來看,其實它是一個單 master 單 slave 的方案,但它的 master/slave 都是可讀寫的,而且可以相互同步,所以從功能上看,也可以認為它是雙機 master-master 方案。

2.2.3 簡化後的部署架構

magent 已經有很長時間沒有更新,因此可以不再使用 magent,直接通過負載均衡連線之 memcached,

任然有兩臺 memcached 做高可用,memcached 會自動同步資料保持資料一致性,即使一臺 memcached故障也不影響業務正常執行,故障的 memcached 修復上線後再自動從另外一臺同步資料即可保持資料一致性。

image-20200703190306290

2.2.4 部署 repcached

http://repcached.sourceforge.net/

[root@s6 src]# yum install libevent libevent-devel
[root@s6 src]# wget https://sourceforge.net/projects/repcached/files/repcached/2.2.1-1.2.8/memcached-1.2.8-repcached-2.2.1.tar.gz
[root@s6 src]# tar xvf memcached-1.2.8-repcached-2.2.1.tar.gz
[root@s6 src]# cd memcached-1.2.8-repcached-2.2.1
[root@s6 memcached-1.2.8-repcached-2.2.1]# ./configure --prefix=/usr/local/repcached --enablereplication
[root@s6 memcached-1.2.8-repcached-2.2.1]# make #報錯如下

image-20200703190435883

解決辦法:
[root@s6 memcached-1.2.8-repcached-2.2.1]# vim memcached.c
 56 #ifndef IOV_MAX
 57 #if defined(__FreeBSD__) || defined(__APPLE__)
 58 # define IOV_MAX 1024
 59 #endif
 60 #endif
改為如下內容:
 55 /* FreeBSD 4.x doesn't have IOV_MAX exposed. */
 56 #ifndef IOV_MAX 
 57 # define IOV_MAX 1024 
 58 #endif
再次編譯安裝:
[root@s6 memcached-1.2.8-repcached-2.2.1]# make && make install

image-20200703190526815

2.2.5 驗證是否可執行

[root@s5 memcached-1.2.8-repcached-2.2.1]# /usr/local/repcached/bin/memcached -h
memcached 1.2.8
repcached 2.2.1
-p <num> TCP port number to listen on (default: 11211)
-U <num> UDP port number to listen on (default: 11211, 0 is off)
-s <file> unix socket path to listen on (disables network support)
-a <mask> access mask for unix socket, in octal (default 0700)
-l <ip_addr> interface to listen on, default is INDRR_ANY
-d run as a daemon
-r maximize core file limit
-u <username> assume identity of <username> (only when run as root)
-m <num> max memory to use for items in megabytes, default is 64 MB
-M return error on memory exhausted (rather than removing items)
-c <num> max simultaneous connections, default is 1024
-k lock down all paged memory. Note that there is a
 limit on how much memory you may lock. Trying to
 allocate more than that would fail, so be sure you
 set the limit correctly for the user you started
 the daemon with (not for -u <username> user;
  under sh this is done with 'ulimit -S -l NUM_KB').
-v verbose (print errors/warnings while in event loop)
-vv very verbose (also print client commands/reponses)
-h print this help and exit
-i print memcached and libevent license
-P <file> save PID in <file>, only used with -d option
-f <factor> chunk size growth factor, default 1.25
-n <bytes> minimum space allocated for key+value+flags, default 48
-R Maximum number of requests per event
 limits the number of requests process for a given con nection
 to prevent starvation. default 20
-b Set the backlog queue limit (default 1024)
-x <ip_addr> hostname or IP address of peer repcached
-X <num:num> TCP port number for replication. <listen:connect> (default: 11212)

2.2.6 啟動memcache

通過 repcached 安裝的 memcached 命令啟動 memcache 服務並實現 memcache 主備結構,其中-x 為對
方即主 memcache 的 IP,-X 為本地啟動的用資料同步的埠:

2.2.6.1 server 1 相關操作

[root@s5 ~]# /usr/local/repcached/bin/memcached -d -m 2048 -p 11211 -u root -c 2048 -x 172.18.200.106 -X 16000

image-20200703190820215

2.2.6.2 server 2 相關操作

[root@s6 src]# /usr/local/repcached/bin/memcached -d -m 2048 -p 11211 -u root -c 2048 -x 172.18.200.105 -X 16000

image-20200703190912241

2.2.7 連線memcache 驗證資料

2.2.7.1 shell 命令

[root@s6 src]# telnet 172.18.200.106 11211
Trying 172.18.200.106...
Connected to 172.18.200.106.
Escape character is '^]'.
set name 0 0 4
jack
STORED
get name
VALUE name 0 4
jack
END
quit
Connection closed by foreign host.
[root@s6 src]# telnet 172.18.200.105 11211
Trying 172.18.200.105...
Connected to 172.18.200.105.
Escape character is '^]'.
get name
VALUE name 0 4
jack
END
quit
Connection closed by foreign host.
[root@s6 src]

2.2.7.2 python 指令碼

#!/usr/bin/env python
#coding:utf-8
#Author:Zhang ShiJie
import memcache
m = memcache.Client(['172.18.200.106:11211'], debug=True)
for i in range(100):
 m.set("key%d" % i,"v%d" % i)
 ret = m.get('key%d' % i)
 print ret

3. 安裝wmware esxi

3.1 下載安裝包

官網:wmware.com

下載頁面:ESXI_6.x

image-20200703192336159

3.2 製作u啟

下載後製作啟動U盤。推薦Rufus工具。
直接將系統映象包ISO寫入U盤:

3.3 安裝esxi

vm 開機按F6

image-20200703192447040

image-20200703192510852image-20200703192530026

#------------------如果需要配置靜態路由
摁F2進入個性設定,輸入賬號和密碼
選擇進入Configure Management Network --> IPv4 Configuration
摁空格鍵(Space)選擇Set static IPv4 address and network configuration:

IPv4 Address [10.10.10.11]
Subnet Mask [255.255.255.0]
Default Gateway [10.10.10.10]

image-20200703192702995

電腦端IP

image-20200703192740695

web 輸入IP https://10.10.10.11

image-20200703192825454

輸入使用者密碼

image-20200703192848303

3.4 後臺配置

3.4.1 啟用esxi

先啟用一下:
在主機->管理->許可(下面選一個輸入進去,檢查許可證,然後分配許可證)
0A65P-00HD0-3Z5M1-M097M-22P7H
HV4WC-01087-1ZJ48-031XP-9A843

image-20200703192927391

3.4.2 配置網路卡

首先是配置第一個網口開啟混合模式。

image-20200703193007346

image-20200703193044162

image-20200703193102086

3.4.3 新增內部硬碟

在軟路由安裝時,只會自動格式化你安裝ESXI的那個硬碟。
所以,即使我配備了兩個硬碟250G的ssd和1T的SEAGATE。但看不到SEAGATE這個硬碟,這時就需要我們將其格式化為VMFS格式。

image-20200703193249506

3.4.4 設定自動啟動

對於裝置斷電問題,就會導致ESXI重啟,但安裝的虛擬機器系統沒有自動啟動。
ESXI正好提供自動啟動的設定。

image-20200703193341781

3.5 其他

3.5.1 升級esxi

下載地址:(注意需要登入vmware)

image-20200703193500713

下載後上傳到esxi 的儲存裡面

image-20200703193546142

根據上傳的路徑,拼接得到絕對路徑:/vmfs/volumes/5d926a65-f446c226-6a8a-00e0670f2312/images/update-from-esxi6.7-6.7_update03.zip
然後進入ESXI的ssh命令終端。

image-20200703193622383

執行如下命令

# 檢視可更新專案
esxcli software sources profile list -d /vmfs/volumes/5d926a65-f446c226-6a8a-00e0670f2312/images/update-from-esxi6.7-6.7_update03.zip

# 可以看到多個,就選標準版吧:ESXi-6.7.0-20190802001-standard

# 安裝指定更新
esxcli software profile update -d /vmfs/volumes/5d926a65-f446c226-6a8a-00e0670f2312/images/update-from-esxi6.7-6.7_update03.zip -p ESXi-6.7.0-20190802001-standard

# 等待以下就安裝好了,然後執行重啟命令
reboot

3.5.2 修改預設的esxi web管理埠

#---------------------直接在終端修改配置
# 進入vi編輯模式
vi /etc/vmware/rhttpproxy/config.xml

3.5.3 修改esxi 主機名

ssh shell修改主機名:

esxcli system hostname set --domain kioye.cn
esxcli system hostname set --host esxi

3.5.4 更換ssl 證照

# 進入對應目錄
cd /etc/vmware/ssl
# 替換rui.crt和rui.key
# 重啟服務
service.sh restart

3.5.5 使用vm 軟體連線esxi

直接在VMware 新增sexi 主機節點即可

相關文章