二進位制部署1.23.4版本k8s叢集-5-部署Master節點服務

itteer發表於2022-03-14

1、安裝Docker

在21、22、200三臺機器上安裝Docker。安裝命令:
在21、22、200三臺主機上部署Docker。

~]# curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun

1.1 配置Docker

/etc/docker/daemon.json

{
    "graph": "/data/docker",
    "storage-driver": "overlay2",
    "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com"],
    "registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"],
    "bip": "172.7.21.1/24",
    "exec-opts": ["native.cgroupdriver=systemd"],
    "live-restore": true
}

說明:

exec-opts:CPU/MEM的資源管理方式

registry-mirrors:映象源

insecure-registries:信任的HTTP映象倉庫

bip根據不同的主機修改:

21:172.7.21.1/24

22:172.7.22.1/24

200: 172.7.200.1/24

建立目錄

[root@hdss7-21 ~]# mkdir -pv /data/docker
mkdir: created directory ‘/data’
mkdir: created directory ‘/data/docker’
[root@hdss7-21 ~]#

1.2 啟動docker

~]# systemctl enable docker
~]# systemctl start docker
~]# systemctl status docker -l
~]# docker info
~]# docker version

2 部署kube-apiserver叢集

2.1 叢集規劃

主機名 角色 IP
CFZX55-21.host.com kube-apiserver 10.211.55.21
CFZX55-22.host.com kube-apiserver 10.211.55.22
CFZX55-11.host.com 4層負載均衡 Nginx+Keepalived 10.211.55.11
CFZX55-12.host.com 4層負載均衡 Nginx+Keepalived 10.211.55.12

注意:這裡10.211.55.11和10.211.55.12使用nginx做4層負載均衡,用keepalived跑一個vip:10.211.55.10,代理兩個kube-apiserver,實現高可用。

2.2 下載軟體、解壓、做軟連結

在21主機上操作

本例使用1.23.4版本。

[root@cfzx55-21 src]# tar xf kubernetes-server-linux-amd64.tar.gz -C /opt/
[root@cfzx55-21 src]# cd ..
[root@cfzx55-21 kubernetes]# rm kubernetes-src.tar.gz -f
[root@cfzx55-21 kubernetes]# rm -rf LICENSES/
[root@cfzx55-21 kubernetes]# rm -rf addons/
[root@cfzx55-21 kubernetes]# cd server/
[root@cfzx55-21 server]# mv bin/ ../
[root@cfzx55-21 server]# cd ..
[root@cfzx55-21 kubernetes]# rm -rf server/
[root@cfzx55-21 kubernetes]# cd bin/
[root@cfzx55-21 bin]# rm *.tar -f
[root@cfzx55-21 bin]# rm *_tag -f
[root@cfzx55-21 opt]# vim /etc/profile
export PATH=$PATH:/opt/etcd:/opt/kubernetes/bin
[root@cfzx55-21 opt]# source /etc/profile

2.3 簽發kube-apiserver證書

在200主機上操作

/opt/certs/kube-apiserver-csr.json

{
    "CN": "kube-apiserver",
    "hosts": [
        "127.0.0.1",
        "192.168.0.1",
        "kubernetes",        
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local",
        "10.211.55.10",
        "10.211.55.21",
        "10.211.55.22",
        "10.211.55.23"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "system:masters",
            "OU": "system"            
        }
    ]
}

說明:

  • CN:K8S會提取CN欄位的值作為使用者名稱,實際是指K8S的"RoleBinding/ClusterRoleBinding"資源中,“subjects.kind”的值為“User",
  • hosts:包括所有Master節點的IP地址,LB節點、LB叢集節點、ClusterIP的首個IP,K8S的“ClusterIP”的範圍在“--service-cluster-ip-range”中指定,取值為192.168.0.0/16,此處配置為192.168.0.1
  • names
    • C:CN
    • ST:
    • L:
    • O:“system:masters”,定義“O”值的原因:apiserver向kubelet發起請求時,將複用此證書,參看官方文件。K8S預設會提取“O”欄位的值作為組,這實際是指K8S的“RoleBinding/ClusterRoleBinding”資源中"subjects.kind"的值為“Group”,

生成證書

[root@cfzx55-200 certs]# cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=ca-config.json \
> -profile=kubernetes \
> kube-apiserver-csr.json | cfssl-json -bare kube-apiserver
2022/03/12 21:28:43 [INFO] generate received request
2022/03/12 21:28:43 [INFO] received CSR
2022/03/12 21:28:43 [INFO] generating key: rsa-2048
2022/03/12 21:28:43 [INFO] encoded CSR
2022/03/12 21:28:43 [INFO] signed certificate with serial number 218531774642654852589087643914770351081106577228
[root@cfzx55-200 certs]# ll kube-apiserver*
-rw-r--r-- 1 root root  636 Mar 12 21:27 kube-apiserver-csr.json
-rw------- 1 root root 1679 Mar 12 21:28 kube-apiserver-key.pem
-rw-r--r-- 1 root root 1289 Mar 12 21:28 kube-apiserver.csr
-rw-r--r-- 1 root root 1655 Mar 12 21:28 kube-apiserver.pem
[root@cfzx55-200 certs]#

2.4 拷貝證書至各運算節點

在21主機上操作,把6張證書和金鑰從200主機上拷貝到certs目錄下

[root@cfzx55-21 certs]# pwd
/opt/kubernetes/certs
[root@cfzx55-21 certs]# ll
total 24
-rw------- 1 root root 1679 Mar 12 21:32 ca-key.pem
-rw-r--r-- 1 root root 1310 Mar 12 21:32 ca.pem
-rw------- 1 root root 1675 Mar 12 21:32 etcd-key.pem
-rw-r--r-- 1 root root 1448 Mar 12 21:32 etcd.pem
-rw------- 1 root root 1679 Mar 12 21:32 kube-apiserver-key.pem
-rw-r--r-- 1 root root 1655 Mar 12 21:32 kube-apiserver.pem
[root@cfzx55-21 certs]#

2.5 生成token.csv檔案

該檔案的作用是,在工作節點(kubelet)加入K8S叢集的過程中,向kube-apiserver申請簽發證書。

/opt/kubernetes/bin/certs/kube-apiserver.token.csv

[root@cfzx55-21 certs]# head -c 16 /dev/urandom | od -An -t x | tr -d " "
cceb7b589306a60ab6afe922f1f32d50
[root@cfzx55-21 certs]# echo cceb7b589306a60ab6afe922f1f32d50,kubelet-bootstrap,10001,"system:kubelet-bootstrap" > kube-apiserver.token.csv
[root@cfzx55-21 certs]# cat kube-apiserver.token.csv
cceb7b589306a60ab6afe922f1f32d50,kubelet-bootstrap,10001,system:kubelet-bootstrap
[root@cfzx55-21 certs]#

2.6 建立啟動指令碼

在21主機上操作

建立啟動指令碼

/opt/kubernetes/bin/kube-apiserver-startup.sh

#!/bin/bash
./kube-apiserver \
  --runtime-config=api/all=true \
  --anonymous-auth=false \
  --bind-address=0.0.0.0 \
  --advertise-address=10.211.55.21 \
  --secure-port=6443 \
  --tls-cert-file=./certs/kube-apiserver.pem \
  --tls-private-key-file=./certs/kube-apiserver-key.pem \
  --client-ca-file=./certs/ca.pem \
  --etcd-cafile=./certs/ca.pem \
  --etcd-certfile=./certs/etcd.pem \
  --etcd-keyfile=./certs/etcd-key.pem \
  --etcd-servers=https://10.211.55.12:2379,https://10.211.55.21:2379,https://10.211.55.22:2379 \
  --kubelet-client-certificate=./certs/kube-apiserver.pem \
  --kubelet-client-key=./certs/kube-apiserver-key.pem \
  --service-account-key-file=./certs/ca.pem \
  --service-account-signing-key-file=./certs/ca-key.pem \
  --service-account-issuer=https://kubernetes.default.svc.cluster.local \
  --enable-bootstrap-token-auth=true \
  --token-auth-file=./certs/kube-apiserver.token.csv \
  --allow-privileged=true \
  --service-cluster-ip-range=192.168.0.0/16 \
  --service-node-port-range=8000-20000 \
  --authorization-mode=RBAC,Node \
  --enable-aggregator-routing=true \
  --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
  --v=2 \
  --audit-log-path=/data/logs/kubernetes/kube-apiserver/audit-log \
  --log-dir=/data/logs/kubernetes/kube-apiserver

2.7 調整許可權和目錄

[root@cfzx55-21 bin]# chmod +x kube-apiserver-startup.sh
[root@cfzx55-21 bin]# mkdir -pv /data/logs/kubernetes/kube-apiserver

2.8 建立supervisor配置

/etc/supervisord.d/kube-apiserver.ini

[program:kube-apiserver-55-21]
command=/opt/kubernetes/bin/kube-apiserver-startup.sh
numprocs=1
directory=/opt/kubernetes/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=5
stdout_capture_maxbytes=1MB
stdout_events_enabled=false

2.9 啟動服務並檢查

[root@cfzx55-21 bin]# supervisorctl update
[root@cfzx55-21 bin]# supervisorctl status
etcd-server-55-21                RUNNING   pid 12536, uptime 2:29:07
kube-apiserver-55-21             RUNNING   pid 13122, uptime 0:00:40
[root@cfzx55-21 bin]# netstat -luntp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      13123/./kube-apiser
tcp        0      0 10.211.55.21:2379       0.0.0.0:*               LISTEN      12537/./etcd
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      12537/./etcd
tcp        0      0 10.211.55.21:2380       0.0.0.0:*               LISTEN      12537/./etcd
tcp        0      0 10.211.55.21:2381       0.0.0.0:*               LISTEN      12537/./etcd
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      912/sshd
udp        0      0 127.0.0.1:323           0.0.0.0:*                           700/chronyd
[root@cfzx55-21 bin]#

2.10 安裝部署所有節點

安裝22節點

# 在21節點上,把kubernetes檔案拷貝到22節點
[root@cfzx55-21 ~]# scp -r /opt/kubernetes/ root@cfzx55-22:/opt/
[root@cfzx55-21 ~]# scp /etc/supervisord.d/kube-apiserver.ini root@cfzx55-22:/etc/supervisord.d/
# 在22節點上,建立目錄
[root@cfzx55-22 certs]# mkdir -pv /data/logs/kubernetes/kube-apiserver
# 修改 kube-apiserver-startup.sh 中的ip地址
# 修改 /etc/supervisord.d/kube-apiserver.ini 中的名稱
# 啟動
[root@cfzx55-22 bin]# supervisorctl update
[root@cfzx55-22 bin]# supervisorctl status
etcd-server-55-22                RUNNING   pid 12495, uptime 2:37:27
kube-apiserver-55-22             RUNNING   pid 12675, uptime 0:00:38
[root@cfzx55-22 bin]# netstat -luntp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      12676/./kube-apiser
tcp        0      0 10.211.55.22:2379       0.0.0.0:*               LISTEN      12496/./etcd
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      12496/./etcd
tcp        0      0 10.211.55.22:2380       0.0.0.0:*               LISTEN      12496/./etcd
tcp        0      0 10.211.55.22:2381       0.0.0.0:*               LISTEN      12496/./etcd
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      914/sshd
udp        0      0 127.0.0.1:323           0.0.0.0:*                           704/chronyd
[root@cfzx55-22 bin]#

檢查叢集狀態

[root@cfzx55-21 ~]# curl --insecure https://10.211.55.21:6443/
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "Unauthorized",
  "reason": "Unauthorized",
  "code": 401
}[root@cfzx55-21 ~]# curl --insecure https://10.211.55.22:6443/
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "Unauthorized",
  "reason": "Unauthorized",
  "code": 401
}[root@cfzx55-21 ~]#

至此,kube-apiserver安裝完成。

2.11 配置4層反向代理

apiserver監聽埠

[root@cfzx55-21 ~]# netstat -luntp | grep kube
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      13123/./kube-apiser
[root@cfzx55-22 ~]# netstat -luntp | grep kube
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      12676/./kube-apiser

用keepalived跑一個10.211.55.10的vip

用10.211.55.10上的7443,反向代理10.211.55.21和10.211.55.22上的6443埠。

下面的操作在11和12主機上進行。

安裝nginx

~]# yum install nginx -y
~]# yum install nginx-mod-stream -y

配置nginx

把下面內容,新增到/etc/nginx/nginx.conf檔案最後,也就是並列在http模組的後面。

stream {
    upstream kube-apiserver {
        server 10.211.55.21:6443    max_fails=3 fail_timeout=30s;
        server 10.211.55.22:6443    max_fails=3 fail_timeout=30s;
    }
    server {
        listen 7443;
        proxy_connect_timeout 2s;
        proxy_timeout 900s;
        proxy_pass kube-apiserver;
    }
}
[root@cfzx55-11 ~]# nginx -t
nginx: [emerg] unknown directive "stream" in /etc/nginx/nginx.conf:85
nginx: configuration file /etc/nginx/nginx.conf test failed

上面錯誤提示,是因為沒有安裝stream模組。

啟動nginx

~]# systemctl start nginx
~]# systemctl enable nginx
~]# systemctl status nginx

檢查狀態

[root@cfzx55-11 ~]# netstat -luntp | grep nginx
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      1499/nginx: master
tcp        0      0 0.0.0.0:7443            0.0.0.0:*               LISTEN      1499/nginx: master
[root@cfzx55-11 ~]#

安裝keepalived

[root@cfzx55-11 ~]# yum install keepalived -y
[root@hdss7-12 ~]# yum install keepalived -y

建立監聽指令碼

/etc/keepalived/check_port.sh

#!/bin/bash
CHK_PORT=$1
if [ -n "$CHK_PORT" ];then
    PORT_PROCESS=`ss -lnt | grep $CHK_PORT | wc -l`
    if [ $PORT_PROCESS -eq 0 ];then
        echo "Port $CHK_PORT Is Not Used, End."
        exit 1
    fi
else
    echo "Check Port Cant Be Empty!"
fi

增加執行許可權

~]# chmod +x /etc/keepalived/check_port.sh

測試指令碼

[root@cfzx55-11 ~]# /etc/keepalived/check_port.sh
Check Port Cant Be Empty!
[root@cfzx55-11 ~]# /etc/keepalived/check_port.sh 7443
[root@cfzx55-11 ~]# echo $?
0
[root@cfzx55-11 ~]# /etc/keepalived/check_port.sh 7445
Port 7445 Is Not Used, End.
[root@cfzx55-11 ~]# echo $?
1
[root@cfzx55-11 ~]#

keepalived主配置

在11主機上操作

/etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
    router_id 10.211.55.11
}

vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 251
    priority 100
    advert_int 1
    mcast_src_ip 10.211.55.11
    nopreempt  #設定非搶佔式,當主服務down,vip漂移到備機,當主機服務up,vip依然在備機上

    authentication {
        auth_type PASS
        auth_pass 11111111
    }

    track_script {
        chk_nginx
    }

    virtual_ipaddress {
        10.211.55.10
    }
}

keepalived從配置

在12主機上操作

/etc/keepalived/keepalived.conf

! Configuration File for keepalived
 
global_defs {
    router_id 10.211.55.12
}
 
vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}
 
vrrp_instance VI_1 {
    state BACKUP
    interface eth0                                 
    virtual_router_id 251
    priority 90
    advert_int 1
    mcast_src_ip 10.211.55.12
    !  注意 備機上不能有 nopreempt 配置
    authentication {
        auth_type PASS
        auth_pass 11111111
    }
 
    track_script {
        chk_nginx
    }
 
    virtual_ipaddress {
        10.211.55.10
    }
}

2.12 啟動代理並檢查

在11主機上操作

[root@cfzx55-11 ~]# systemctl enable keepalived
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@cfzx55-11 ~]# systemctl start keepalived
[root@cfzx55-11 ~]# systemctl status keepalived
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2022-03-13 08:21:53 CST; 6s ago
  Process: 1580 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 1581 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─1581 /usr/sbin/keepalived -D
           ├─1582 /usr/sbin/keepalived -D
           └─1583 /usr/sbin/keepalived -D

Mar 13 08:21:53 cfzx55-11.host.com Keepalived_healthcheckers[1582]: Opening file '/etc/keepalive....
Mar 13 08:21:54 cfzx55-11.host.com Keepalived_vrrp[1583]: VRRP_Instance(VI_1) Transition to MAS...TE
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: VRRP_Instance(VI_1) Entering MASTER STATE
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: VRRP_Instance(VI_1) setting protocol VIPs.
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: Sending gratuitous ARP on eth0 for 10...10
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: VRRP_Instance(VI_1) Sending/queueing ...10
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: Sending gratuitous ARP on eth0 for 10...10
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: Sending gratuitous ARP on eth0 for 10...10
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: Sending gratuitous ARP on eth0 for 10...10
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: Sending gratuitous ARP on eth0 for 10...10
Hint: Some lines were ellipsized, use -l to show in full.
[root@cfzx55-11 ~]#

在12主機上操作

[root@cfzx55-12 ~]# systemctl enable keepalived
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@cfzx55-12 ~]# systemctl start keepalived
[root@cfzx55-12 ~]# systemctl status keepalived
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2022-03-13 08:22:29 CST; 7s ago
  Process: 1538 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 1539 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─1539 /usr/sbin/keepalived -D
           ├─1540 /usr/sbin/keepalived -D
           └─1541 /usr/sbin/keepalived -D

Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: Registering gratuitous ARP shared channel
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: Opening file '/etc/keepalived/keepali...'.
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: WARNING - default user 'keepalived_sc...e.
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: Unable to access script `/etc/keepali...h`
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: Disabling track script chk_nginx sinc...nd
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: VRRP_Instance(VI_1) removing protocol...s.
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: Using LinkWatch kernel netlink reflec.....
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: VRRP_Instance(VI_1) Entering BACKUP STATE
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: VRRP sockpool: [ifindex(2), proto(112...)]
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_healthcheckers[1540]: Opening file '/etc/keepalive....
Hint: Some lines were ellipsized, use -l to show in full.
[root@cfzx55-12 ~]#

在11上用ip addr命令,能看到VIP

[root@cfzx55-11 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc 
    link/ether 00:1c:42:76:65:e1 brd ff:ff:ff:ff:ff:ff
    inet 10.211.55.11/24 brd 10.211.55.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 10.211.55.10/32 scope global eth0
       valid_lft forever preferred_lft forever
[root@cfzx55-11 ~]#

Nginx+Keepalived高可用測試

在11主機上,停止nginx,vip不存在了。

[root@cfzx55-11 ~]# systemctl stop nginx
[root@cfzx55-11 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc 
    link/ether 00:1c:42:76:65:e1 brd ff:ff:ff:ff:ff:ff
    inet 10.211.55.11/24 brd 10.211.55.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
[root@cfzx55-11 ~]#

12主機上檢視,vip跑在了12主機上

[root@cfzx55-12 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc 
    link/ether 00:1c:42:e2:45:7b brd ff:ff:ff:ff:ff:ff
    inet 10.211.55.12/24 brd 10.211.55.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 10.211.55.10/32 scope global eth0
       valid_lft forever preferred_lft forever
[root@cfzx55-12 ~]#

重啟nginx,vip還在12主機上。

這是因為在11上配置了 nopreempt ,設定非搶佔式,當主服務down,vip漂移到備機,當主機服務up,vip依然在備機上。

如果要想vip回到11上,重新啟動keepalived。

3 部署kubectl元件

3.1 叢集規劃

主機名 角色 IP
CFZX55-21.host.com kubectl 10.211.55.21
CFZX55-22.host.com kubectl 10.211.55.22

3.2 簽發kubectl證書

在運維主機200上操作

生成kubectl證書請求csr檔案

/opt/certs/kubectl-csr.json

{
    "CN": "clusteradmin",
    "hosts": [],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "system:masters",
            "OU": "system"            
        }
    ]
}

說明

  • CN:kubectl證書中的CN值沒有意義,隨便取值
  • O:因為希望使用kubectl時有完整的叢集操作許可權,所以取值為“system:masters”,K8S預設會提取O欄位的值作為組,這實際是指K8S裡“RoleBinding/ClusterRoleBinding”資源中"subjects.kind"的值為“Group”
  • 後續 kube-apiserver 使用 RBAC 對客戶端(如 kubelet、kube-proxy、Pod)請求進行授權;
    kube-apiserver 預定義了一些 RBAC 使用的 RoleBindings,如 cluster-admin 將 Group system:masters 與 Role cluster-admin 繫結,該 Role 授予了呼叫kube-apiserver 的所有 API的許可權;
  • O指定該證書的 Group 為 system:masters,kubelet 使用該證書訪問 kube-apiserver 時 ,由於證書被 CA 簽名,所以認證通過,同時由於證書使用者組為經過預授權的 system:masters,所以被授予訪問所有 API 的許可權;
  • 這個證書,是將來生成管理員用的kube config 配置檔案用的,現在我們一般建議使用RBAC 來對kubernetes 進行角色許可權控制, kubernetes 將證書中的CN 欄位 作為User, O 欄位作為 Group;
    "O": "system:masters", 必須是system:masters,否則後面kubectl create clusterrolebinding報錯。

生成證書

[root@cfzx55-200 certs]# cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=ca-config.json \
> -profile=kubernetes \
> kubectl-csr.json | cfssl-json -bare kubectl
2022/03/13 08:48:36 [INFO] generate received request
2022/03/13 08:48:36 [INFO] received CSR
2022/03/13 08:48:36 [INFO] generating key: rsa-2048
2022/03/13 08:48:36 [INFO] encoded CSR
2022/03/13 08:48:36 [INFO] signed certificate with serial number 629903670193912591906490478447930251557864868755
2022/03/13 08:48:36 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@cfzx55-200 certs]# ll kubectl*
-rw-r--r-- 1 root root 1017 Mar 13 08:48 kubectl.csr
-rw-r--r-- 1 root root  306 Mar 13 08:44 kubectl-csr.json
-rw------- 1 root root 1679 Mar 13 08:48 kubectl-key.pem
-rw-r--r-- 1 root root 1411 Mar 13 08:48 kubectl.pem
[root@cfzx55-200 certs]#

3.3 把證書拷貝到21和22主機上

[root@cfzx55-200 certs]# scp kubectl*.pem root@cfzx55-21:/opt/kubernetes/bin/certs/
[root@cfzx55-200 certs]# scp kubectl*.pem root@cfzx55-22:/opt/kubernetes/bin/certs/

3.4 生成kubeconfig配置檔案

生成kubectl元件的kubectl.kubeconfig配置檔案,該檔案包含訪問kube-apiseerver的所有資訊,如kube-apiserver的地址,CA證書和自身使用的證書。

有了這個檔案,便可以在任何機器上以超級管理員身份對K8S叢集做任何操作,請務必保證此檔案的安全性。

kubectl命令預設使用的配置檔案為:~/.kube.config

以下在21主機上操作,完成後把生成的檔案拷貝到其餘Master節點,本例是22主機。

生成建立配置檔案的指令碼

#!/bin/bash
KUBE_CONFIG="/root/.kube/config"
KUBE_APISERVER="https://10.211.55.10:7443"

kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/kubernetes/bin/certs/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=${KUBE_CONFIG}
  
kubectl config set-credentials clusteradmin \
  --client-certificate=/opt/kubernetes/bin/certs/kubectl.pem \
  --client-key=/opt/kubernetes/bin/certs/kubectl-key.pem \
  --embed-certs=true \
  --kubeconfig=${KUBE_CONFIG}
  
kubectl config set-context default \
  --cluster=kubernetes \
  --user=clusteradmin \
  --kubeconfig=${KUBE_CONFIG}
  
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

說明:

  • 叢集名稱:描述叢集資訊的標記,沒有實際意義。
  • certificate-authority:K8S叢集的根CA證書
  • server:指向kube-apiserrver負載均衡器VIP的地址
  • kubeconfig:生成的kubeconfig檔案
  • 使用者名稱稱:clusteradmin為定義一個使用者,在kubeconfig配置檔案中,這個使用者用於關聯一組證書,這個證書對K8S叢集來說無實際意義,真正重要的是證書中的O欄位與CN欄位的定義。
  • client-certificate:客戶端證書
  • client-key:客戶端私鑰
  • 上下文:default用於將kubeconfig配置檔案“clusteradmin”和“kubernetes”作關聯。
  • cluster:set-cluster命令配置的叢集名稱。
  • cluster:set-credentials命令配置的使用者名稱稱。

執行指令碼

[root@cfzx55-21 ~]# mkdir ~/.kube
[root@cfzx55-21 ~]# mkdir k8s-shell
[root@cfzx55-21 ~]# cd k8s-shell/
[root@cfzx55-21 k8s-shell]# vim kubectl-config.sh
[root@cfzx55-21 k8s-shell]# chmod +x kubectl-config.sh
[root@cfzx55-21 k8s-shell]# ./kubectl-config.sh
Cluster "kubernetes" set.
User "clusteradmin" set.
Context "default" created.
Switched to context "default".
[root@cfzx55-21 k8s-shell]#

檢視叢集狀態

[root@cfzx55-21 k8s-shell]# kubectl cluster-info
Kubernetes control plane is running at https://10.211.55.10:7443

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@cfzx55-21 k8s-shell]# kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                        ERROR
controller-manager   Unhealthy   Get "https://127.0.0.1:10257/healthz": dial tcp 127.0.0.1:10257: connect: connection refused
scheduler            Unhealthy   Get "https://127.0.0.1:10259/healthz": dial tcp 127.0.0.1:10259: connect: connection refused
etcd-1               Healthy     {"health":"true","reason":""}
etcd-2               Healthy     {"health":"true","reason":""}
etcd-0               Healthy     {"health":"true","reason":""}
[root@cfzx55-21 k8s-shell]# kubectl get all -A
NAMESPACE   NAME                 TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
default     service/kubernetes   ClusterIP   192.168.0.1   <none>        443/TCP   10h
[root@cfzx55-21 k8s-shell]#

把21主機上生成的kubeconfig檔案拷貝到22節點

[root@cfzx55-22 ~]# mkdir ~/.kube
[root@cfzx55-22 ~]# scp root@cfzx55-21:/root/.kube/config ~/.kube/
[root@cfzx55-22 ~]# ll .kube/
total 8
-rw------- 1 root root 6224 Mar 13 09:42 config
[root@cfzx55-22 ~]#

在22上檢視叢集狀態

[root@cfzx55-22 ~]# kubectl cluster-info
Kubernetes control plane is running at https://10.211.55.10:7443

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@cfzx55-22 ~]# kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                        ERROR
controller-manager   Unhealthy   Get "https://127.0.0.1:10257/healthz": dial tcp 127.0.0.1:10257: connect: connection refused
scheduler            Unhealthy   Get "https://127.0.0.1:10259/healthz": dial tcp 127.0.0.1:10259: connect: connection refused
etcd-2               Healthy     {"health":"true","reason":""}
etcd-1               Healthy     {"health":"true","reason":""}
etcd-0               Healthy     {"health":"true","reason":""}
[root@cfzx55-22 ~]# kubectl get all -A
NAMESPACE   NAME                 TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
default     service/kubernetes   ClusterIP   192.168.0.1   <none>        443/TCP   11h
[root@cfzx55-22 ~]#

至此,kubectl主機部署完成。

4 部署controller-manager

4.1叢集規劃

主機名 角色 IP
CFZX55-21.host.com controller-manager 10.211.55.21
CFZX55-22.host.com controller-manager 10.211.55.22

4.2 生成kube-controller-manager證書

在運維主機200上操作。

生成證書請求檔案

/opt/certs/kube-controller-manager-csr.json

{
    "CN": "system:kube-controller-manager",
    "hosts": [
        "127.0.0.1",
        "10.211.55.11",
        "10.211.55.12",
        "10.211.55.21",
        "10.211.55.22",
        "10.211.55.23"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "system:masters",
            "OU": "system"            
        }
    ]
}

說明:

  • CN:這裡的CN值非常重要,kube-controller-manager能否正常與kubee-apiserver通訊與此值有關,K8S預設會提取CN欄位的值作為使用者名稱,這實際是指K8S的“RoleBinding/ClusterRoleBinding”資源中“subjects:kind”的值為“User”
  • hosts:kube-controller-manager執行節點的IP地址。
  • O:無實際意義。
  • OU:無實際意義。
  • hosts 列表包含所有 kube-controller-manager 節點 IP;
  • CN 為 system:kube-controller-manager、O 為 system:kube-controller-manager,kubernetes 內建的 ClusterRoleBindings system:kube-controller-manager 賦予 kube-controller-manager 工作所需的許可權

生成證書

[root@cfzx55-200 certs]# cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=ca-config.json \
> -profile=kubernetes \
> kube-controller-manager-csr.json | cfssl-json -bare kube-controller-manager
2022/03/13 10:35:55 [INFO] generate received request
2022/03/13 10:35:55 [INFO] received CSR
2022/03/13 10:35:55 [INFO] generating key: rsa-2048
2022/03/13 10:35:55 [INFO] encoded CSR
2022/03/13 10:35:55 [INFO] signed certificate with serial number 386505557530275475753178134460007976778023939766
[root@cfzx55-200 certs]# ll kube-controller*.pem
-rw------- 1 root root 1679 Mar 13 10:35 kube-controller-manager-key.pem
-rw-r--r-- 1 root root 1501 Mar 13 10:35 kube-controller-manager.pem
[root@cfzx55-200 certs]#

把證書拷貝到21和22主機上

[root@cfzx55-200 certs]# scp kube-controller-manager*.pem root@cfzx55-21:/opt/kubernetes/bin/certs/
[root@cfzx55-200 certs]# scp kube-controller-manager*.pem root@cfzx55-22:/opt/kubernetes/bin/certs/

4.3 生成kube-controller-manager的kubeconfig配置檔案

配置檔案路徑:/opt/kubernetes/cfg/

編寫生成kubeconfig配置檔案的指令碼

#!/bin/bash
KUBE_CONFIG="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig"
KUBE_APISERVER="https://10.211.55.10:7443"

kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/kubernetes/bin/certs/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=${KUBE_CONFIG}
  
kubectl config set-credentials kube-controller-manager \
  --client-certificate=/opt/kubernetes/bin/certs/kube-controller-manager.pem \
  --client-key=/opt/kubernetes/bin/certs/kube-controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=${KUBE_CONFIG}
  
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-controller-manager \
  --kubeconfig=${KUBE_CONFIG}
  
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

生成配置檔案

[root@cfzx55-21 k8s-shell]# vim kube-controller-manager-config.sh
[root@cfzx55-21 k8s-shell]# chmod +x kube-controller-manager-config.sh
[root@cfzx55-21 k8s-shell]# ./kube-controller-manager-config.sh
Cluster "kubernetes" set.
User "kube-controller-manager" set.
Context "default" created.
Switched to context "default".
[root@cfzx55-21 k8s-shell]#

把生成的配置檔案拷貝到22主機上。

[root@cfzx55-21 ~]# scp -r /opt/kubernetes/cfg/ root@cfzx55-22:/opt/kubernetes/
root@cfzx55-22's password:
kube-controller-manager.kubeconfig                                100% 6366     2.6MB/s   00:00
[root@cfzx55-21 ~]#

在22主機上檢視

[root@cfzx55-22 ~]# ll /opt/kubernetes/cfg/
total 8
-rw------- 1 root root 6366 Mar 13 10:49 kube-controller-manager.kubeconfig
[root@cfzx55-22 ~]#

4.4 建立啟動指令碼

在21主機上操作

/opt/kubernetes/bin/kube-controller-manager-startup.sh

#!/bin/sh
./kube-controller-manager \
  --cluster-name=kubernetes \
  --bind-address=127.0.0.1 \
  --service-cluster-ip-range=192.168.0.0/16 \
  --leader-elect=true \
  --controllers=*,bootstrapsigner,tokencleaner \
  --kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \
  --tls-cert-file=./certs/kube-controller-manager.pem \
  --tls-private-key-file=./certs/kube-controller-manager-key.pem \
  --cluster-signing-cert-file=./certs/ca.pem \
  --cluster-signing-key-file=./certs/ca-key.pem \
  --cluster-signing-duration=175200h0m0s \
  --use-service-account-credentials=true \
  --root-ca-file=./certs/ca.pem \
  --service-account-private-key-file=./certs/ca-key.pem \
  --log-dir=/data/logs/kubernetes/kube-controller-manager \
  --v=2

說明:

--secure-port=10252  這個引數去掉,狀態才能正常。
--cluster-cidr string
  CIDR Range for Pods in cluster. Requires --allocate-node-cidrs to be true
本例中,allocate-node-cidrs和cluster-cidr兩個引數不配置,使用docker的bip。
  

建立指令碼,調整許可權

[root@cfzx55-21 bin]# vim kube-controller-manager-startup.sh
[root@cfzx55-21 bin]# chmod +x kube-controller-manager-startup.sh
[root@cfzx55-21 bin]#

建立目錄

[root@cfzx55-21 ~]# mkdir -p /data/logs/kubernetes/kube-controller-manager

4.5 建立supervisor配置檔案

/etc/supervisord.d/kube-controller-manager.ini

[program:kube-controller-manager-55-21]
command=/opt/kubernetes/bin/kube-controller-manager-startup.sh
numprocs=1
directory=/opt/kubernetes/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false

4.6 啟動supervisor

[root@cfzx55-21 bin]# supervisorctl start kube-controller-manager-55-21
kube-controller-manager-55-21: started
[root@cfzx55-21 bin]# supervisorctl status
etcd-server-55-21                RUNNING   pid 1033, uptime 4:21:51
kube-apiserver-55-21             RUNNING   pid 1034, uptime 4:21:51
kube-controller-manager-55-21    RUNNING   pid 3330, uptime 0:00:37
[root@cfzx55-21 bin]#
[root@cfzx55-21 bin]# netstat -luntp | grep kube
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      1044/./kube-apiserv
tcp        0      0 127.0.0.1:10252         0.0.0.0:*               LISTEN      3331/./kube-control
[root@cfzx55-21 bin]#

4.7 把啟動指令碼、supervisor配置檔案拷貝到22主機。

[root@cfzx55-21 bin]# scp kube-controller-manager-startup.sh root@cfzx55-22:/opt/kubernetes/bin/
root@cfzx55-22's password:
kube-controller-manager-startup.sh                                    100%  778   489.1KB/s   00:00
[root@cfzx55-21 bin]# scp /etc/supervisord.d/kube-controller-manager.ini root@cfzx55-22:/etc/supervisord.d/
root@cfzx55-22's password:
kube-controller-manager.ini                                           100%  474   326.8KB/s   00:00
[root@cfzx55-21 bin]#

4.8 在22主機上啟動服務

# 修改程式名稱
[root@cfzx55-22 ~]# vim /etc/supervisord.d/kube-controller-manager.ini
[root@cfzx55-22 ~]# mkdir -p /data/logs/kubernetes/kube-controller-manager
[root@cfzx55-22 ~]# supervisorctl update
kube-controller-manager-55-21: added process group
[root@cfzx55-22 ~]# supervisorctl status
etcd-server-55-22                RUNNING   pid 1013, uptime 4:27:39
kube-apiserver-55-22             RUNNING   pid 1012, uptime 4:27:39
kube-controller-manager-55-21    RUNNING   pid 3099, uptime 0:00:34
[root@cfzx55-22 ~]# netstat -luntp | grep kube
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      1014/./kube-apiserv
tcp        0      0 127.0.0.1:10252         0.0.0.0:*               LISTEN      3100/./kube-control
[root@cfzx55-22 ~]#

5 部署kube-scheduler

5.1 叢集規劃

主機名 角色 IP
CFZX55-21.host.com kube-scheduler 10.211.55.21
CFZX55-22.host.com kube-scheduler 10.211.55.22

5.2 生成kube-scheduler證書

建立證書請求csr檔案

/opt/certs/kube-scheduler-csr.json

{
    "CN": "system:kube-scheduler",
    "hosts": [
        "127.0.0.1",
        "10.211.55.11",
        "10.211.55.12",
        "10.211.55.21",
        "10.211.55.22",
        "10.211.55.23"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "system:masters",
            "OU": "system"            
        }
    ]
}

生成證書

[root@cfzx55-200 certs]# cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=ca-config.json \
> -profile=kubernetes \
> kube-scheduler-csr.json | cfssl-json -bare kube-scheduler
2022/03/13 12:30:21 [INFO] generate received request
2022/03/13 12:30:21 [INFO] received CSR
2022/03/13 12:30:21 [INFO] generating key: rsa-2048
2022/03/13 12:30:21 [INFO] encoded CSR
2022/03/13 12:30:21 [INFO] signed certificate with serial number 78101929142938232987965103781662806513424359272
[root@cfzx55-200 certs]# ll kube-scheduler*.pem
-rw------- 1 root root 1679 Mar 13 12:30 kube-scheduler-key.pem
-rw-r--r-- 1 root root 1489 Mar 13 12:30 kube-scheduler.pem
[root@cfzx55-200 certs]#

5.3 把證書拷貝到21和22節點。

[root@cfzx55-200 certs]# scp kube-scheduler*.pem root@cfzx55-21:/opt/kubernetes/bin/certs/
root@cfzx55-21's password:
kube-scheduler-key.pem                                                100% 1679   957.6KB/s   00:00
kube-scheduler.pem                                                    100% 1489   953.3KB/s   00:00
[root@cfzx55-200 certs]# scp kube-scheduler*.pem root@cfzx55-22:/opt/kubernetes/bin/certs/
root@cfzx55-22's password:
kube-scheduler-key.pem                                                100% 1679   640.6KB/s   00:00
kube-scheduler.pem                                                    100% 1489   794.6KB/s   00:00
[root@cfzx55-200 certs]#

5.4 生成kubeconfig配置檔案

#!/bin/bash
KUBE_CONFIG="/opt/kubernetes/cfg/kube-scheduler.kubeconfig"
KUBE_APISERVER="https://10.211.55.10:7443"

kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/kubernetes/bin/certs/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=${KUBE_CONFIG}
  
kubectl config set-credentials kube-scheduler \
  --client-certificate=/opt/kubernetes/bin/certs/kube-scheduler.pem \
  --client-key=/opt/kubernetes/bin/certs/kube-scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=${KUBE_CONFIG}
  
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-scheduler \
  --kubeconfig=${KUBE_CONFIG}
  
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

執行指令碼

[root@cfzx55-21 k8s-shell]# vim kube-scheduler-config.sh
[root@cfzx55-21 k8s-shell]# chmod +x kube-scheduler-config.sh
[root@cfzx55-21 k8s-shell]# ./kube-scheduler-config.sh
Cluster "kubernetes" set.
User "kube-scheduler" set.
Context "default" created.
Switched to context "default".
[root@cfzx55-21 k8s-shell]#

把kubeconfig 檔案拷貝到22主機

[root@cfzx55-21 k8s-shell]# scp /opt/kubernetes/cfg/kube-scheduler.kubeconfig root@cfzx55-22:/opt/kubernetes/cfg/
root@cfzx55-22's password:
kube-scheduler.kubeconfig                                             100% 6332     2.6MB/s   00:00
[root@cfzx55-21 k8s-shell]#

5.5 建立kube-scheduler啟動指令碼

/opt/kubernetes/bin/kube-scheduler-startup.sh

#!/bin/sh
./kube-scheduler \
  --address=127.0.0.1 \
  --leader-elect=true \
  --kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \
  --log-dir=/data/logs/kubernetes/kube-scheduler \
  --v=2

建立指令碼,調整許可權

[root@cfzx55-21 bin]# vim kube-scheduler-startup.sh
[root@cfzx55-21 bin]# chmod +x kube-scheduler-startup.sh
[root@cfzx55-21 ~]# mkdir -p /data/logs/kubernetes/kube-scheduler

5.6 建立supervisor配置檔案

/etc/supervisord.d/kube-scheduler.ini

[program:kube-scheduler-55-21]
command=/opt/kubernetes/bin/kube-scheduler-startup.sh
numprocs=1
directory=/opt/kubernetes/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false

5.7 啟動kube-scheduler服務

[root@cfzx55-21 bin]# supervisorctl update
[root@cfzx55-21 bin]# supervisorctl status
etcd-server-55-21                RUNNING   pid 1033, uptime 5:16:26
kube-apiserver-55-21             RUNNING   pid 1034, uptime 5:16:26
kube-controller-manager-55-21    RUNNING   pid 3416, uptime 0:38:46
kube-scheduler-55-21             RUNNING   pid 3486, uptime 0:00:32
[root@cfzx55-21 bin]# netstat -luntp | grep kube
tcp        0      0 0.0.0.0:10259           0.0.0.0:*               LISTEN      3487/./kube-schedul
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      1044/./kube-apiserv
tcp        0      0 127.0.0.1:10252         0.0.0.0:*               LISTEN      3417/./kube-control
[root@cfzx55-21 bin]#

5.8 把kube-scheduler啟動指令碼和supervisor配置檔案拷貝到22主機

[root@cfzx55-21 bin]# scp kube-scheduler-startup.sh root@cfzx55-22:/opt/kubernetes/bin/
root@cfzx55-22's password:

kube-scheduler-startup.sh                                             100%  199   100.3KB/s   00:00
[root@cfzx55-21 bin]#
[root@cfzx55-21 bin]# scp /etc/supervisord.d/kube-scheduler.ini root@cfzx55-22:/etc/supervisord.d/
root@cfzx55-22's password:
kube-scheduler.ini                                                    100%  446   329.4KB/s   00:00
[root@cfzx55-21 bin]#

5.9 在22主機上啟動kube-scheduler服務

# 修改名稱
[root@cfzx55-22 ~]# vim /etc/supervisord.d/kube-scheduler.ini
[root@cfzx55-22 ~]# supervisorctl update
kube-controller-manager-55-21: stopped
kube-controller-manager-55-21: removed process group
kube-controller-manager-55-22: added process group
kube-scheduler-55-22: added process group
[root@cfzx55-22 ~]#
[root@cfzx55-22 ~]# supervisorctl status
etcd-server-55-22                RUNNING   pid 1013, uptime 5:25:59
kube-apiserver-55-22             RUNNING   pid 1012, uptime 5:25:59
kube-controller-manager-55-22    RUNNING   pid 3234, uptime 0:00:32
kube-scheduler-55-22             RUNNING   pid 3187, uptime 0:03:19
[root@cfzx55-22 ~]# netstat -luntp | grep kube
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      1014/./kube-apiserv
tcp        0      0 127.0.0.1:10252         0.0.0.0:*               LISTEN      3235/./kube-control
tcp        0      0 0.0.0.0:10259           0.0.0.0:*               LISTEN      3189/./kube-schedul
[root@cfzx55-22 ~]#

部署22上的kube-controller-manager時,沒有修改名稱。

檢視叢集狀態

[root@cfzx55-22 bin]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE                         ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-2               Healthy   {"health":"true","reason":""}
etcd-0               Healthy   {"health":"true","reason":""}
etcd-1               Healthy   {"health":"true","reason":""}
[root@cfzx55-22 bin]#

檢視叢集資源

[root@cfzx55-21 ~]# kubectl get sa -A
NAMESPACE         NAME                                 SECRETS   AGE
default           default                              1         114m
kube-node-lease   default                              1         114m
kube-public       default                              1         114m
kube-system       attachdetach-controller              1         116m
kube-system       bootstrap-signer                     1         114m
kube-system       certificate-controller               1         116m
kube-system       clusterrole-aggregation-controller   1         114m
kube-system       cronjob-controller                   1         116m
kube-system       daemon-set-controller                1         116m
kube-system       default                              1         114m
kube-system       deployment-controller                1         116m
kube-system       disruption-controller                1         116m
kube-system       endpoint-controller                  1         114m
kube-system       endpointslice-controller             1         116m
kube-system       endpointslicemirroring-controller    1         116m
kube-system       ephemeral-volume-controller          1         116m
kube-system       expand-controller                    1         114m
kube-system       generic-garbage-collector            1         114m
kube-system       horizontal-pod-autoscaler            1         116m
kube-system       job-controller                       1         116m
kube-system       namespace-controller                 1         116m
kube-system       node-controller                      1         116m
kube-system       persistent-volume-binder             1         114m
kube-system       pod-garbage-collector                1         114m
kube-system       pv-protection-controller             1         114m
kube-system       pvc-protection-controller            1         114m
kube-system       replicaset-controller                1         116m
kube-system       replication-controller               1         114m
kube-system       resourcequota-controller             1         114m
kube-system       root-ca-cert-publisher               1         116m
kube-system       service-account-controller           1         116m
kube-system       service-controller                   1         116m
kube-system       statefulset-controller               1         116m
kube-system       token-cleaner                        1         114m
kube-system       ttl-after-finished-controller        1         114m
kube-system       ttl-controller                       1         116m
[root@cfzx55-21 ~]# kubectl get ns -A
NAME              STATUS   AGE
default           Active   15h
kube-node-lease   Active   15h
kube-public       Active   15h
kube-system       Active   15h
[root@cfzx55-21 ~]# kubectl get role -A
NAMESPACE     NAME                                             CREATED AT
kube-public   system:controller:bootstrap-signer               2022-03-12T14:36:17Z
kube-system   extension-apiserver-authentication-reader        2022-03-12T14:36:16Z
kube-system   system::leader-locking-kube-controller-manager   2022-03-12T14:36:16Z
kube-system   system::leader-locking-kube-scheduler            2022-03-12T14:36:16Z
kube-system   system:controller:bootstrap-signer               2022-03-12T14:36:16Z
kube-system   system:controller:cloud-provider                 2022-03-12T14:36:16Z
kube-system   system:controller:token-cleaner                  2022-03-12T14:36:16Z
[root@cfzx55-21 ~]#

至此,Master節點部署完成。

相關文章