centos7 安裝k8s1.30.1高可用叢集(非獨立etcd叢集)

今夕何兮發表於2024-06-04

1. 節點配置

負載均衡器採用NGINX+keepalived實現,叢集架構圖如下:
image

節點規劃如下(vip:192.168.2.37):

節點角色 節點ip 節點hostname
控制平面+lb 192.168.2.31 k8s-master01
控制平面+lb 192.168.2.32 k8s-master02
控制平面+lb 192.168.2.33 k8s-master03
工作節點 192.168.2.34 k8s-node01
工作節點 192.168.2.35 k8s-node02
工作節點 192.168.2.36 k8s-node03

2. 系統初始化(所有節點執行)

2.1 關閉防火牆

systemctl disable firewalld --now

2.2 關閉selinux

setenforce 0   #臨時關閉
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config   #永久關閉

2.3 配置時間同步

yum install ntpdate -y
ntpdate time1.aliyun.com

2.4 關閉swap分割槽

swapoff -a  # free -m 可檢視swap分割槽配置
sed -i 's/^[^#].* swap .*/#&/' /etc/fstab

2.5 設定主機名和hosts解析

cat >> /etc/hosts << EOF
192.168.2.31  k8s-master01
192.168.2.32  k8s-master02
192.168.2.33  k8s-master03
192.168.2.34  k8s-node01
192.168.2.35  k8s-node02
192.168.2.36  k8s-node03
EOF

hostnamectl set-hostname k8s-master01 # 6個節點分別設定對應的主機名
ip a|egrep -o '192.168.2.[1-9]{2,3}'|head -n1 |xargs -i grep {} /etc/hosts|awk '{print $2}'| xargs hostnamectl set-hostname

2.6 核心模組設定

# 開機自動載入模組
tee /etc/modules-load.d/k8s.conf <<EOF
overlay
br_netfilter
EOF

# 立即載入模組
modprobe overlay
modprobe br_netfilter
 
# 設定 sysctl 引數,在重新啟動後生效
tee /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF
 
# 立即應用 sysctl 引數而不重新啟動
sysctl --system

2.7 安裝容器執行時containerd

containerd最新版(2024-06-03)下載地址https://github.com/containerd/containerd/releases/download/v1.7.17/containerd-1.7.17-linux-amd64.tar.gz

# 安裝基礎執行時runc
wget https://github.com/opencontainers/runc/releases/download/v1.1.12/runc.amd64
install -m 755 runc.amd64 /usr/local/sbin/runc

# 解壓縮
tar zxf containerd-1.7.17-linux-amd64.tar.gz -C /usr/local/
# 生成預設配置
mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml
sed -ri '/sandbox_image/s#".*"#"registry.aliyuncs.com/google_containers/pause:3.9"#' /etc/containerd/config.toml
# 新增啟動檔案
wget -P /usr/lib/systemd/system https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
systemctl enable --now containerd

3. 安裝NGINX+keepalived(控制平面節點執行)

3.1 安裝nginx

yum -y install https://nginx.org/packages/centos/7/x86_64/RPMS/nginx-1.24.0-1.el7.ngx.x86_64.rpm

cat >/etc/nginx/nginx.conf <<\EOF
user  nginx;
worker_processes  auto;

error_log  /var/log/nginx/error.log notice;
pid        /var/run/nginx.pid;


events {
    worker_connections  1024;
}

stream {

    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
    access_log  /var/log/nginx/k8s-access.log  main;
    upstream k8s-apiserver {
       server 192.168.2.31:6443 weight=5 max_fails=1 fail_timeout=3s;  	#k8s-master01的IP和6443埠
       server 192.168.2.32:6443 weight=5 max_fails=1 fail_timeout=3s;  	#k8s-master02的IP和6443埠
       server 192.168.2.33:6443 weight=5 max_fails=1 fail_timeout=3s;  	#k8s-master03的IP和6443埠
    }

    server {
       listen 9443; #監聽的是9443埠
       proxy_pass k8s-apiserver; #使用proxy_pass模組進行反向代理
    }
}
EOF

systemctl enable nginx --now

3.2 安裝keepalived

yum -y install keepalived

# master01執行,master02,master03 修改router_id,state, priority後再執行
cat >/etc/keepalived/keepalived.conf <<\EOF
! Configuration File for keepalived

global_defs {
   router_id k8s_master01 # 可配置為主機名
}

vrrp_script check_nginx {
    script "/usr/bin/killall -0 nginx" # 檢查服務指令碼
    interval 3  # 每3s檢查一次
    weight -40  # 檢查失敗weight-40
    fall 1 # 檢查失敗一次立即對本節點weight-40
    rise 3 # 檢查成功三次對本節點恢復初始weight,weight值大的節點搶奪master角色,master角色和weight相關
}

vrrp_instance VI_1 {
    state MASTER # 其他為BACKUP,且weight小於master節點
    interface ens33
    virtual_router_id 51 # 各節點一致
    priority 100 # 優先順序,master大於其他節點
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 123456
    }
    virtual_ipaddress {
        192.168.2.37 # 虛擬vip

    }
    track_script {
        check_nginx
    }
}
EOF

systemctl enable --now keepalived

4. 安裝 kubeadm、kubelet 和 kubectl(所有節點執行)

tee /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.30/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.30/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF

yum clean all && yum makecache
# 安裝指定版本,以下命令會以依賴包方式自動安裝cni,crictl工具
yum install -y kubelet-1.30.1 kubeadm-1.30.1 kubectl-1.30.1 --disableexcludes=kubernetes

systemctl enable --now kubelet

# 配置crictl
tee /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
EOF

5. 初始化叢集(k8s-master01節點執行)

5.1 初始化

kubeadm init --kubernetes-version=v1.30.1 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.2.31 --control-plane-endpoint=192.168.2.37:9443 --image-repository registry.aliyuncs.com/google_containers --upload-certs -v=5 | tee /tmp/kubeadm_init.log

詳細日誌

點選檢視日誌
I0604 01:13:44.550174    4990 initconfiguration.go:122] detected and using CRI socket: unix:///var/run/containerd/containerd.sock
I0604 01:13:44.550432    4990 kubelet.go:196] the value of KubeletConfiguration.cgroupDriver is empty; setting it to "systemd"
[init] Using Kubernetes version: v1.30.1
[preflight] Running pre-flight checks
I0604 01:13:44.561428    4990 checks.go:561] validating Kubernetes and kubeadm version
I0604 01:13:44.561528    4990 checks.go:166] validating if the firewall is enabled and active
I0604 01:13:44.581798    4990 checks.go:201] validating availability of port 6443
I0604 01:13:44.582624    4990 checks.go:201] validating availability of port 10259
I0604 01:13:44.582876    4990 checks.go:201] validating availability of port 10257
I0604 01:13:44.583089    4990 checks.go:278] validating the existence of file /etc/kubernetes/manifests/kube-apiserver.yaml
I0604 01:13:44.583179    4990 checks.go:278] validating the existence of file /etc/kubernetes/manifests/kube-controller-manager.yaml
I0604 01:13:44.583213    4990 checks.go:278] validating the existence of file /etc/kubernetes/manifests/kube-scheduler.yaml
I0604 01:13:44.583251    4990 checks.go:278] validating the existence of file /etc/kubernetes/manifests/etcd.yaml
I0604 01:13:44.583301    4990 checks.go:428] validating if the connectivity type is via proxy or direct
I0604 01:13:44.583395    4990 checks.go:467] validating http connectivity to first IP address in the CIDR
I0604 01:13:44.583451    4990 checks.go:467] validating http connectivity to first IP address in the CIDR
I0604 01:13:44.583500    4990 checks.go:102] validating the container runtime
I0604 01:13:44.673657    4990 checks.go:637] validating whether swap is enabled or not
	[WARNING Swap]: swap is supported for cgroup v2 only; the NodeSwap feature gate of the kubelet is beta but disabled by default
I0604 01:13:44.673943    4990 checks.go:368] validating the presence of executable crictl
I0604 01:13:44.674026    4990 checks.go:368] validating the presence of executable conntrack
I0604 01:13:44.674111    4990 checks.go:368] validating the presence of executable ip
I0604 01:13:44.674189    4990 checks.go:368] validating the presence of executable iptables
I0604 01:13:44.674298    4990 checks.go:368] validating the presence of executable mount
I0604 01:13:44.674433    4990 checks.go:368] validating the presence of executable nsenter
I0604 01:13:44.674513    4990 checks.go:368] validating the presence of executable ebtables
I0604 01:13:44.674634    4990 checks.go:368] validating the presence of executable ethtool
I0604 01:13:44.674743    4990 checks.go:368] validating the presence of executable socat
I0604 01:13:44.674871    4990 checks.go:368] validating the presence of executable tc
I0604 01:13:44.674965    4990 checks.go:368] validating the presence of executable touch
I0604 01:13:44.675082    4990 checks.go:514] running all checks
I0604 01:13:44.715828    4990 checks.go:399] checking whether the given node name is valid and reachable using net.LookupHost
I0604 01:13:44.716410    4990 checks.go:603] validating kubelet version
I0604 01:13:44.851276    4990 checks.go:128] validating if the "kubelet" service is enabled and active
	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0604 01:13:44.871177    4990 checks.go:201] validating availability of port 10250
I0604 01:13:44.871545    4990 checks.go:327] validating the contents of file /proc/sys/net/ipv4/ip_forward
I0604 01:13:44.871698    4990 checks.go:201] validating availability of port 2379
I0604 01:13:44.871995    4990 checks.go:201] validating availability of port 2380
I0604 01:13:44.872221    4990 checks.go:241] validating the existence and emptiness of directory /var/lib/etcd
I0604 01:13:44.872517    4990 checks.go:830] using image pull policy: IfNotPresent
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I0604 01:13:44.994280    4990 checks.go:870] pulling: registry.aliyuncs.com/google_containers/kube-apiserver:v1.30.1
I0604 01:13:57.059056    4990 checks.go:870] pulling: registry.aliyuncs.com/google_containers/kube-controller-manager:v1.30.1
I0604 01:14:10.844980    4990 checks.go:870] pulling: registry.aliyuncs.com/google_containers/kube-scheduler:v1.30.1
I0604 01:14:17.380927    4990 checks.go:870] pulling: registry.aliyuncs.com/google_containers/kube-proxy:v1.30.1
I0604 01:14:34.126616    4990 checks.go:870] pulling: registry.aliyuncs.com/google_containers/coredns:v1.11.1
I0604 01:14:41.666736    4990 checks.go:870] pulling: registry.aliyuncs.com/google_containers/pause:3.9
I0604 01:14:43.495332    4990 checks.go:870] pulling: registry.aliyuncs.com/google_containers/etcd:3.5.12-0
I0604 01:15:07.267337    4990 certs.go:112] creating a new certificate authority for ca
[certs] Using certificateDir folder "/etc/kubernetes/pki"
I0604 01:15:08.703074    4990 certs.go:483] validating certificate period for ca certificate
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.2.31 192.168.2.37]
I0604 01:15:10.651695    4990 certs.go:112] creating a new certificate authority for front-proxy-ca
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
I0604 01:15:11.286065    4990 certs.go:483] validating certificate period for front-proxy-ca certificate
I0604 01:15:11.517354    4990 certs.go:112] creating a new certificate authority for etcd-ca
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
I0604 01:15:12.150931    4990 certs.go:483] validating certificate period for etcd/ca certificate
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.2.31 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.2.31 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
I0604 01:15:16.732658    4990 certs.go:78] creating new public/private key files for signing service account users
[certs] Generating "apiserver-etcd-client" certificate and key
I0604 01:15:16.998175    4990 kubeconfig.go:112] creating kubeconfig file for admin.conf
W0604 01:15:16.998741    4990 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
I0604 01:15:18.021513    4990 kubeconfig.go:112] creating kubeconfig file for super-admin.conf
W0604 01:15:18.022037    4990 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "super-admin.conf" kubeconfig file
I0604 01:15:18.722412    4990 kubeconfig.go:112] creating kubeconfig file for kubelet.conf
W0604 01:15:18.722920    4990 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
I0604 01:15:19.323946    4990 kubeconfig.go:112] creating kubeconfig file for controller-manager.conf
W0604 01:15:19.324406    4990 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0604 01:15:19.748584    4990 kubeconfig.go:112] creating kubeconfig file for scheduler.conf
W0604 01:15:19.749101    4990 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0604 01:15:20.276589    4990 local.go:65] [etcd] wrote Static Pod manifest for a local etcd member to "/etc/kubernetes/manifests/etcd.yaml"
I0604 01:15:20.276707    4990 manifests.go:103] [control-plane] getting StaticPodSpecs
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
I0604 01:15:20.277317    4990 certs.go:483] validating certificate period for CA certificate
I0604 01:15:20.277470    4990 manifests.go:129] [control-plane] adding volume "ca-certs" for component "kube-apiserver"
I0604 01:15:20.277494    4990 manifests.go:129] [control-plane] adding volume "etc-pki" for component "kube-apiserver"
I0604 01:15:20.277508    4990 manifests.go:129] [control-plane] adding volume "k8s-certs" for component "kube-apiserver"
I0604 01:15:20.279601    4990 manifests.go:158] [control-plane] wrote static Pod manifest for component "kube-apiserver" to "/etc/kubernetes/manifests/kube-apiserver.yaml"
I0604 01:15:20.279666    4990 manifests.go:103] [control-plane] getting StaticPodSpecs
[control-plane] Creating static Pod manifest for "kube-controller-manager"
I0604 01:15:20.280096    4990 manifests.go:129] [control-plane] adding volume "ca-certs" for component "kube-controller-manager"
I0604 01:15:20.280121    4990 manifests.go:129] [control-plane] adding volume "etc-pki" for component "kube-controller-manager"
I0604 01:15:20.280134    4990 manifests.go:129] [control-plane] adding volume "flexvolume-dir" for component "kube-controller-manager"
I0604 01:15:20.280147    4990 manifests.go:129] [control-plane] adding volume "k8s-certs" for component "kube-controller-manager"
I0604 01:15:20.280159    4990 manifests.go:129] [control-plane] adding volume "kubeconfig" for component "kube-controller-manager"
I0604 01:15:20.282497    4990 manifests.go:158] [control-plane] wrote static Pod manifest for component "kube-controller-manager" to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
I0604 01:15:20.282568    4990 manifests.go:103] [control-plane] getting StaticPodSpecs
[control-plane] Creating static Pod manifest for "kube-scheduler"
I0604 01:15:20.283009    4990 manifests.go:129] [control-plane] adding volume "kubeconfig" for component "kube-scheduler"
I0604 01:15:20.284140    4990 manifests.go:158] [control-plane] wrote static Pod manifest for component "kube-scheduler" to "/etc/kubernetes/manifests/kube-scheduler.yaml"
I0604 01:15:20.284391    4990 kubelet.go:68] Stopping the kubelet
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet. This can take up to 4m0s
[kubelet-check] The kubelet is healthy after 1m13.504109911s
[api-check] Waiting for a healthy API server. This can take up to 4m0s
I0604 01:16:35.247416    4990 with_retry.go:234] Got a Retry-After 1s response for attempt 1 to https://192.168.2.37:9443/healthz?timeout=10s
I0604 01:16:36.249909    4990 with_retry.go:234] Got a Retry-After 1s response for attempt 2 to https://192.168.2.37:9443/healthz?timeout=10s
I0604 01:16:37.252885    4990 with_retry.go:234] Got a Retry-After 1s response for attempt 3 to https://192.168.2.37:9443/healthz?timeout=10s
I0604 01:16:38.257800    4990 with_retry.go:234] Got a Retry-After 1s response for attempt 4 to https://192.168.2.37:9443/healthz?timeout=10s
I0604 01:16:39.261879    4990 with_retry.go:234] Got a Retry-After 1s response for attempt 5 to https://192.168.2.37:9443/healthz?timeout=10s
I0604 01:16:40.263848    4990 with_retry.go:234] Got a Retry-After 1s response for attempt 6 to https://192.168.2.37:9443/healthz?timeout=10s
I0604 01:16:41.265842    4990 with_retry.go:234] Got a Retry-After 1s response for attempt 7 to https://192.168.2.37:9443/healthz?timeout=10s
I0604 01:16:42.267842    4990 with_retry.go:234] Got a Retry-After 1s response for attempt 8 to https://192.168.2.37:9443/healthz?timeout=10s
[api-check] The API server is healthy after 11.004046154s
I0604 01:16:45.249703    4990 kubeconfig.go:608] ensuring that the ClusterRoleBinding for the kubeadm:cluster-admins Group exists
I0604 01:16:45.320902    4990 kubeconfig.go:681] creating the ClusterRoleBinding for the kubeadm:cluster-admins Group by using super-admin.conf
I0604 01:16:45.343098    4990 uploadconfig.go:112] [upload-config] Uploading the kubeadm ClusterConfiguration to a ConfigMap
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0604 01:16:45.416027    4990 uploadconfig.go:126] [upload-config] Uploading the kubelet component config to a ConfigMap
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0604 01:16:45.439722    4990 uploadconfig.go:131] [upload-config] Preserving the CRISocket information for the control-plane node
I0604 01:16:45.439791    4990 patchnode.go:31] [patchnode] Uploading the CRI Socket information "unix:///var/run/containerd/containerd.sock" to the Node API object "k8s-master01" as an annotation
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
7da92cec26ab0e8d5f1a13f203751d8e3c06a1a9054711285fbf7388574edfca
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: 7sjz7s.9dcl3iv9qgb4l425
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0604 01:16:47.198190    4990 clusterinfo.go:47] [bootstrap-token] loading admin kubeconfig
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0604 01:16:47.199440    4990 clusterinfo.go:58] [bootstrap-token] copying the cluster from admin.conf to the bootstrap kubeconfig
I0604 01:16:47.200069    4990 clusterinfo.go:70] [bootstrap-token] creating/updating ConfigMap in kube-public namespace
I0604 01:16:47.212129    4990 clusterinfo.go:84] creating the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace
I0604 01:16:47.229315    4990 kubeletfinalize.go:91] [kubelet-finalize] Assuming that kubelet client certificate rotation is enabled: found "/var/lib/kubelet/pki/kubelet-client-current.pem"
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0604 01:16:47.232119    4990 kubeletfinalize.go:145] [kubelet-finalize] Restarting the kubelet to enable client certificate rotation
I0604 01:16:47.741213    4990 request.go:629] Waited for 153.174103ms due to client-side throttling, not priority and fairness, request: POST:https://192.168.2.37:9443/api/v1/namespaces/kube-system/serviceaccounts?timeout=10s
I0604 01:16:47.941906    4990 request.go:629] Waited for 71.419891ms due to client-side throttling, not priority and fairness, request: POST:https://192.168.2.37:9443/api/v1/namespaces/kube-system/services?timeout=10s
W0604 01:16:48.124505    4990 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: CoreDNS
I0604 01:16:48.341455    4990 request.go:629] Waited for 102.826885ms due to client-side throttling, not priority and fairness, request: POST:https://192.168.2.37:9443/api/v1/namespaces/kube-system/serviceaccounts?timeout=10s
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.2.37:9443 --token 7sjz7s.9dcl3iv9qgb4l425 \
	--discovery-token-ca-cert-hash sha256:6089433b9d69ab3eea083cc869e9c57cc7877d7178ac42472de1f1e0820f6ac8 \
	--control-plane --certificate-key 7da92cec26ab0e8d5f1a13f203751d8e3c06a1a9054711285fbf7388574edfca

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.2.37:9443 --token 7sjz7s.9dcl3iv9qgb4l425 \
	--discovery-token-ca-cert-hash sha256:6089433b9d69ab3eea083cc869e9c57cc7877d7178ac42472de1f1e0820f6ac8

5.2 管理員訪問配置

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

5.3 安裝flannel網路外掛

# flannel預設的pod子網和kubeadm init指定的 --pod-network-cidr 一致,預設網路模式為vxlan,可修改為host-gw,net-conf.json->Backend->"Type": "vxlan"
wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
kubectl apply -f kube-flannel.yml
# 安裝完以後,master狀態變成 Ready,coredns pod從 Pending 變為 Running

image

6. 其他節點加入叢集

主節點初始化之後,會有提示加入叢集的命令

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.2.37:9443 --token 7sjz7s.9dcl3iv9qgb4l425 \
	--discovery-token-ca-cert-hash sha256:6089433b9d69ab3eea083cc869e9c57cc7877d7178ac42472de1f1e0820f6ac8 \
	--control-plane --certificate-key 7da92cec26ab0e8d5f1a13f203751d8e3c06a1a9054711285fbf7388574edfca

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.2.37:9443 --token 7sjz7s.9dcl3iv9qgb4l425 \
	--discovery-token-ca-cert-hash sha256:6089433b9d69ab3eea083cc869e9c57cc7877d7178ac42472de1f1e0820f6ac8

6.1 其他控制平面節點加入(master02 master03節點執行)

直接執行join命令,成功日誌如下:

kubeadm join 192.168.2.37:9443 --token 7sjz7s.9dcl3iv9qgb4l425 --discovery-token-ca-cert-hash sha256:6089433b9d69ab3eea083cc869e9c57cc7877d7178ac42472de1f1e0820f6ac8 --control-plane --certificate-key 7da92cec26ab0e8d5f1a13f203751d8e3c06a1a9054711285fbf7388574edfca -v=5
點選檢視日誌
I0604 01:23:18.723943   13639 join.go:417] [preflight] found NodeName empty; using OS hostname as NodeName
I0604 01:23:18.724302   13639 join.go:421] [preflight] found advertiseAddress empty; using default interface's IP address as advertiseAddress
I0604 01:23:18.726829   13639 initconfiguration.go:122] detected and using CRI socket: unix:///var/run/containerd/containerd.sock
I0604 01:23:18.727388   13639 interface.go:432] Looking for default routes with IPv4 addresses
I0604 01:23:18.727417   13639 interface.go:437] Default route transits interface "ens33"
I0604 01:23:18.728112   13639 interface.go:209] Interface ens33 is up
I0604 01:23:18.728252   13639 interface.go:257] Interface "ens33" has 2 addresses :[192.168.2.33/24 fe80::250:56ff:fe95:c314/64].
I0604 01:23:18.728307   13639 interface.go:224] Checking addr  192.168.2.33/24.
I0604 01:23:18.728332   13639 interface.go:231] IP found 192.168.2.33
I0604 01:23:18.732447   13639 interface.go:263] Found valid IPv4 address 192.168.2.33 for interface "ens33".
I0604 01:23:18.732497   13639 interface.go:443] Found active IP 192.168.2.33 
[preflight] Running pre-flight checks
I0604 01:23:18.736045   13639 preflight.go:93] [preflight] Running general checks
I0604 01:23:18.736293   13639 checks.go:278] validating the existence of file /etc/kubernetes/kubelet.conf
I0604 01:23:18.736352   13639 checks.go:278] validating the existence of file /etc/kubernetes/bootstrap-kubelet.conf
I0604 01:23:18.736388   13639 checks.go:102] validating the container runtime
I0604 01:23:18.817027   13639 checks.go:637] validating whether swap is enabled or not
I0604 01:23:18.817231   13639 checks.go:368] validating the presence of executable crictl
I0604 01:23:18.817323   13639 checks.go:368] validating the presence of executable conntrack
I0604 01:23:18.817410   13639 checks.go:368] validating the presence of executable ip
I0604 01:23:18.817458   13639 checks.go:368] validating the presence of executable iptables
I0604 01:23:18.817579   13639 checks.go:368] validating the presence of executable mount
I0604 01:23:18.817708   13639 checks.go:368] validating the presence of executable nsenter
I0604 01:23:18.817828   13639 checks.go:368] validating the presence of executable ebtables
I0604 01:23:18.817911   13639 checks.go:368] validating the presence of executable ethtool
I0604 01:23:18.817982   13639 checks.go:368] validating the presence of executable socat
I0604 01:23:18.818080   13639 checks.go:368] validating the presence of executable tc
I0604 01:23:18.818629   13639 checks.go:368] validating the presence of executable touch
I0604 01:23:18.818868   13639 checks.go:514] running all checks
I0604 01:23:18.870965   13639 checks.go:399] checking whether the given node name is valid and reachable using net.LookupHost
I0604 01:23:18.872772   13639 checks.go:603] validating kubelet version
I0604 01:23:19.823078   13639 checks.go:128] validating if the "kubelet" service is enabled and active
	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0604 01:23:19.847918   13639 checks.go:201] validating availability of port 10250
I0604 01:23:19.848831   13639 checks.go:428] validating if the connectivity type is via proxy or direct
I0604 01:23:19.849013   13639 checks.go:327] validating the contents of file /proc/sys/net/ipv4/ip_forward
I0604 01:23:19.849118   13639 join.go:536] [preflight] Discovering cluster-info
I0604 01:23:19.849250   13639 token.go:79] [discovery] Created cluster-info discovery client, requesting info from "192.168.2.37:9443"
I0604 01:23:19.850875   13639 token.go:210] [discovery] Waiting for the cluster-info ConfigMap to receive a JWS signaturefor token ID "7sjz7s"
I0604 01:23:19.885263   13639 token.go:117] [discovery] Requesting info from "192.168.2.37:9443" again to validate TLS against the pinned public key
I0604 01:23:19.886123   13639 token.go:210] [discovery] Waiting for the cluster-info ConfigMap to receive a JWS signaturefor token ID "7sjz7s"
I0604 01:23:19.909873   13639 token.go:134] [discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.2.37:9443"
I0604 01:23:19.909995   13639 discovery.go:52] [discovery] Using provided TLSBootstrapToken as authentication credentials for the join process
I0604 01:23:19.910030   13639 join.go:550] [preflight] Fetching init configuration
I0604 01:23:19.910046   13639 join.go:596] [preflight] Retrieving KubeConfig objects
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
I0604 01:23:19.932994   13639 kubeproxy.go:55] attempting to download the KubeProxyConfiguration from ConfigMap "kube-proxy"
I0604 01:23:19.943650   13639 kubelet.go:74] attempting to download the KubeletConfiguration from ConfigMap "kubelet-config"
I0604 01:23:19.964809   13639 initconfiguration.go:114] skip CRI socket detection, fill with the default CRI socket unix:///var/run/containerd/containerd.sock
I0604 01:23:19.965270   13639 interface.go:432] Looking for default routes with IPv4 addresses
I0604 01:23:19.965296   13639 interface.go:437] Default route transits interface "ens33"
I0604 01:23:19.966375   13639 interface.go:209] Interface ens33 is up
I0604 01:23:19.966477   13639 interface.go:257] Interface "ens33" has 2 addresses :[192.168.2.33/24 fe80::250:56ff:fe95:c314/64].
I0604 01:23:19.966517   13639 interface.go:224] Checking addr  192.168.2.33/24.
I0604 01:23:19.966541   13639 interface.go:231] IP found 192.168.2.33
I0604 01:23:19.966560   13639 interface.go:263] Found valid IPv4 address 192.168.2.33 for interface "ens33".
I0604 01:23:19.966581   13639 interface.go:443] Found active IP 192.168.2.33 
I0604 01:23:19.974404   13639 preflight.go:104] [preflight] Running configuration dependant checks
[preflight] Running pre-flight checks before initializing the new control plane instance
I0604 01:23:19.974499   13639 checks.go:561] validating Kubernetes and kubeadm version
I0604 01:23:19.974538   13639 checks.go:166] validating if the firewall is enabled and active
I0604 01:23:20.002724   13639 checks.go:201] validating availability of port 6443
I0604 01:23:20.003085   13639 checks.go:201] validating availability of port 10259
I0604 01:23:20.003292   13639 checks.go:201] validating availability of port 10257
I0604 01:23:20.003501   13639 checks.go:278] validating the existence of file /etc/kubernetes/manifests/kube-apiserver.yaml
I0604 01:23:20.003547   13639 checks.go:278] validating the existence of file /etc/kubernetes/manifests/kube-controller-manager.yaml
I0604 01:23:20.003571   13639 checks.go:278] validating the existence of file /etc/kubernetes/manifests/kube-scheduler.yaml
I0604 01:23:20.003653   13639 checks.go:278] validating the existence of file /etc/kubernetes/manifests/etcd.yaml
I0604 01:23:20.003679   13639 checks.go:428] validating if the connectivity type is via proxy or direct
I0604 01:23:20.004064   13639 checks.go:467] validating http connectivity to first IP address in the CIDR
I0604 01:23:20.004116   13639 checks.go:467] validating http connectivity to first IP address in the CIDR
I0604 01:23:20.004734   13639 checks.go:201] validating availability of port 2379
I0604 01:23:20.004961   13639 checks.go:201] validating availability of port 2380
I0604 01:23:20.005180   13639 checks.go:241] validating the existence and emptiness of directory /var/lib/etcd
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I0604 01:23:20.005443   13639 checks.go:830] using image pull policy: IfNotPresent
I0604 01:23:20.164965   13639 checks.go:870] pulling: registry.aliyuncs.com/google_containers/kube-apiserver:v1.30.1
I0604 01:23:35.350776   13639 checks.go:870] pulling: registry.aliyuncs.com/google_containers/kube-controller-manager:v1.30.1
I0604 01:23:50.983798   13639 checks.go:870] pulling: registry.aliyuncs.com/google_containers/kube-scheduler:v1.30.1
I0604 01:24:02.347691   13639 checks.go:862] image exists: registry.aliyuncs.com/google_containers/kube-proxy:v1.30.1
I0604 01:24:02.595458   13639 checks.go:870] pulling: registry.aliyuncs.com/google_containers/coredns:v1.11.1
I0604 01:24:12.541394   13639 checks.go:862] image exists: registry.aliyuncs.com/google_containers/pause:3.9
I0604 01:24:12.612063   13639 checks.go:870] pulling: registry.aliyuncs.com/google_containers/etcd:3.5.12-0
[download-certs] Downloading the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[download-certs] Saving the certificates to the folder: "/etc/kubernetes/pki"
[certs] Using certificateDir folder "/etc/kubernetes/pki"
I0604 01:24:36.637667   13639 certs.go:47] creating PKI assets
I0604 01:24:36.637898   13639 certs.go:483] validating certificate period for etcd/ca certificate
I0604 01:24:36.638364   13639 certlist.go:156] [certs] Using the existing CA certificate "/etc/kubernetes/pki/etcd/ca.crt" and key "/etc/kubernetes/pki/etcd/ca.key"
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master03 localhost] and IPs [192.168.2.33 127.0.0.1 ::1]
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master03 localhost] and IPs [192.168.2.33 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
I0604 01:24:41.366300   13639 certs.go:483] validating certificate period for front-proxy-ca certificate
I0604 01:24:41.366757   13639 certlist.go:156] [certs] Using the existing CA certificate "/etc/kubernetes/pki/front-proxy-ca.crt" and key "/etc/kubernetes/pki/front-proxy-ca.key"
[certs] Generating "front-proxy-client" certificate and key
I0604 01:24:43.650375   13639 certs.go:483] validating certificate period for ca certificate
I0604 01:24:43.650789   13639 certlist.go:156] [certs] Using the existing CA certificate "/etc/kubernetes/pki/ca.crt" and key "/etc/kubernetes/pki/ca.key"
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master03 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.2.33 192.168.2.37]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
I0604 01:24:44.459501   13639 certs.go:78] creating new public/private key files for signing service account users
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
W0604 01:24:44.461035   13639 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
W0604 01:24:45.929188   13639 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
W0604 01:24:46.889032   13639 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
I0604 01:24:47.441577   13639 manifests.go:103] [control-plane] getting StaticPodSpecs
I0604 01:24:47.443732   13639 certs.go:483] validating certificate period for CA certificate
I0604 01:24:47.443982   13639 manifests.go:129] [control-plane] adding volume "ca-certs" for component "kube-apiserver"
I0604 01:24:47.444007   13639 manifests.go:129] [control-plane] adding volume "etc-pki" for component "kube-apiserver"
I0604 01:24:47.444020   13639 manifests.go:129] [control-plane] adding volume "k8s-certs" for component "kube-apiserver"
I0604 01:24:47.453020   13639 manifests.go:158] [control-plane] wrote static Pod manifest for component "kube-apiserver" to "/etc/kubernetes/manifests/kube-apiserver.yaml"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
I0604 01:24:47.453066   13639 manifests.go:103] [control-plane] getting StaticPodSpecs
I0604 01:24:47.453483   13639 manifests.go:129] [control-plane] adding volume "ca-certs" for component "kube-controller-manager"
I0604 01:24:47.453508   13639 manifests.go:129] [control-plane] adding volume "etc-pki" for component "kube-controller-manager"
I0604 01:24:47.453522   13639 manifests.go:129] [control-plane] adding volume "flexvolume-dir" for component "kube-controller-manager"
I0604 01:24:47.453535   13639 manifests.go:129] [control-plane] adding volume "k8s-certs" for component "kube-controller-manager"
I0604 01:24:47.453548   13639 manifests.go:129] [control-plane] adding volume "kubeconfig" for component "kube-controller-manager"
I0604 01:24:47.455340   13639 manifests.go:158] [control-plane] wrote static Pod manifest for component "kube-controller-manager" to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
[control-plane] Creating static Pod manifest for "kube-scheduler"
I0604 01:24:47.455381   13639 manifests.go:103] [control-plane] getting StaticPodSpecs
I0604 01:24:47.455779   13639 manifests.go:129] [control-plane] adding volume "kubeconfig" for component "kube-scheduler"
I0604 01:24:47.456861   13639 manifests.go:158] [control-plane] wrote static Pod manifest for component "kube-scheduler" to "/etc/kubernetes/manifests/kube-scheduler.yaml"
[check-etcd] Checking that the etcd cluster is healthy
I0604 01:24:47.459941   13639 local.go:71] [etcd] Checking etcd cluster health
I0604 01:24:47.459984   13639 local.go:74] creating etcd client that connects to etcd pods
I0604 01:24:47.460461   13639 etcd.go:215] retrieving etcd endpoints from "kubeadm.kubernetes.io/etcd.advertise-client-urls" annotation in etcd Pods
I0604 01:24:47.500282   13639 etcd.go:149] etcd endpoints read from pods: https://192.168.2.31:2379,https://192.168.2.32:2379
I0604 01:24:47.546537   13639 etcd.go:274] etcd endpoints read from etcd: https://192.168.2.31:2379,https://192.168.2.32:2379
I0604 01:24:47.546954   13639 etcd.go:167] update etcd endpoints: https://192.168.2.31:2379,https://192.168.2.32:2379
I0604 01:24:47.665470   13639 kubelet.go:122] [kubelet-start] writing bootstrap kubelet config file at /etc/kubernetes/bootstrap-kubelet.conf
I0604 01:24:47.667819   13639 kubelet.go:158] [kubelet-start] Checking for an existing Node in the cluster with name "k8s-master03" and status "Ready"
I0604 01:24:47.682386   13639 kubelet.go:173] [kubelet-start] Stopping the kubelet
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-check] Waiting for a healthy kubelet. This can take up to 4m0s
[kubelet-check] The kubelet is healthy after 2.003406683s
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap
I0604 01:24:50.073308   13639 kubelet.go:227] [kubelet-start] preserving the crisocket information for the node
I0604 01:24:50.073405   13639 patchnode.go:31] [patchnode] Uploading the CRI Socket information "unix:///var/run/containerd/containerd.sock" to the Node API object "k8s-master03" as an annotation
I0604 01:24:50.074079   13639 cert_rotation.go:137] Starting client certificate rotation controller
I0604 01:24:50.566011   13639 local.go:143] creating etcd client that connects to etcd pods
I0604 01:24:50.566085   13639 etcd.go:215] retrieving etcd endpoints from "kubeadm.kubernetes.io/etcd.advertise-client-urls" annotation in etcd Pods
I0604 01:24:50.585103   13639 etcd.go:149] etcd endpoints read from pods: https://192.168.2.31:2379,https://192.168.2.32:2379
I0604 01:24:50.683504   13639 etcd.go:274] etcd endpoints read from etcd: https://192.168.2.31:2379,https://192.168.2.32:2379
I0604 01:24:50.683548   13639 etcd.go:167] update etcd endpoints: https://192.168.2.31:2379,https://192.168.2.32:2379
I0604 01:24:50.683570   13639 local.go:155] [etcd] Adding etcd member: https://192.168.2.33:2380
I0604 01:24:50.747024   13639 etcd.go:434] [etcd] Adding etcd member as learner
[etcd] Announced new etcd member joining to the existing etcd cluster
I0604 01:24:50.830936   13639 local.go:165] Updated etcd member list: [{k8s-master01 https://192.168.2.31:2380} {k8s-master03 https://192.168.2.33:2380} {k8s-master02 https://192.168.2.32:2380}]
[etcd] Creating static Pod manifest for "etcd"
I0604 01:24:50.967829   13639 etcd.go:524] [etcd] Promoting a learner as a voting member: 790d1b59a390f4fd
{"level":"warn","ts":"2024-06-04T01:24:51.046447+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00045fdc0/192.168.2.31:2379","attempt":0,"error":"rpc error: code = FailedPrecondition desc = etcdserver: can only promote a learner member which is in sync with leader"}
I0604 01:24:51.046878   13639 etcd.go:550] [etcd] Promoting the learner 790d1b59a390f4fd failed: etcdserver: can only promote a learner member which is in sync with leader
{"level":"warn","ts":"2024-06-04T01:24:51.560851+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00045fdc0/192.168.2.31:2379","attempt":0,"error":"rpc error: code = FailedPrecondition desc = etcdserver: can only promote a learner member which is in sync with leader"}
I0604 01:24:51.561017   13639 etcd.go:550] [etcd] Promoting the learner 790d1b59a390f4fd failed: etcdserver: can only promote a learner member which is in sync with leader
{"level":"warn","ts":"2024-06-04T01:24:52.043372+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00045fdc0/192.168.2.31:2379","attempt":0,"error":"rpc error: code = FailedPrecondition desc = etcdserver: can only promote a learner member which is in sync with leader"}
I0604 01:24:52.043555   13639 etcd.go:550] [etcd] Promoting the learner 790d1b59a390f4fd failed: etcdserver: can only promote a learner member which is in sync with leader
{"level":"warn","ts":"2024-06-04T01:24:52.54165+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00045fdc0/192.168.2.31:2379","attempt":0,"error":"rpc error: code = FailedPrecondition desc = etcdserver: can only promote a learner member which is in sync with leader"}
I0604 01:24:52.541763   13639 etcd.go:550] [etcd] Promoting the learner 790d1b59a390f4fd failed: etcdserver: can only promote a learner member which is in sync with leader
{"level":"warn","ts":"2024-06-04T01:24:53.036746+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00045fdc0/192.168.2.31:2379","attempt":0,"error":"rpc error: code = FailedPrecondition desc = etcdserver: can only promote a learner member which is in sync with leader"}
I0604 01:24:53.036849   13639 etcd.go:550] [etcd] Promoting the learner 790d1b59a390f4fd failed: etcdserver: can only promote a learner member which is in sync with leader
{"level":"warn","ts":"2024-06-04T01:24:53.57984+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00045fdc0/192.168.2.31:2379","attempt":0,"error":"rpc error: code = FailedPrecondition desc = etcdserver: can only promote a learner member which is in sync with leader"}
I0604 01:24:53.580124   13639 etcd.go:550] [etcd] Promoting the learner 790d1b59a390f4fd failed: etcdserver: can only promote a learner member which is in sync with leader
{"level":"warn","ts":"2024-06-04T01:24:54.0379+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00045fdc0/192.168.2.31:2379","attempt":0,"error":"rpc error: code = FailedPrecondition desc = etcdserver: can only promote a learner member which is in sync with leader"}
I0604 01:24:54.038001   13639 etcd.go:550] [etcd] Promoting the learner 790d1b59a390f4fd failed: etcdserver: can only promote a learner member which is in sync with leader
{"level":"warn","ts":"2024-06-04T01:24:54.543696+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00045fdc0/192.168.2.31:2379","attempt":0,"error":"rpc error: code = FailedPrecondition desc = etcdserver: can only promote a learner member which is in sync with leader"}
I0604 01:24:54.543786   13639 etcd.go:550] [etcd] Promoting the learner 790d1b59a390f4fd failed: etcdserver: can only promote a learner member which is in sync with leader
{"level":"warn","ts":"2024-06-04T01:24:55.03974+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00045fdc0/192.168.2.31:2379","attempt":0,"error":"rpc error: code = FailedPrecondition desc = etcdserver: can only promote a learner member which is in sync with leader"}
I0604 01:24:55.039828   13639 etcd.go:550] [etcd] Promoting the learner 790d1b59a390f4fd failed: etcdserver: can only promote a learner member which is in sync with leader
{"level":"warn","ts":"2024-06-04T01:24:55.543713+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00045fdc0/192.168.2.31:2379","attempt":0,"error":"rpc error: code = FailedPrecondition desc = etcdserver: can only promote a learner member which is in sync with leader"}
I0604 01:24:55.543796   13639 etcd.go:550] [etcd] Promoting the learner 790d1b59a390f4fd failed: etcdserver: can only promote a learner member which is in sync with leader
{"level":"warn","ts":"2024-06-04T01:24:56.037771+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00045fdc0/192.168.2.31:2379","attempt":0,"error":"rpc error: code = Unavailable desc = etcdserver: rpc not supported for learner"}
I0604 01:24:56.037867   13639 etcd.go:550] [etcd] Promoting the learner 790d1b59a390f4fd failed: etcdserver: rpc not supported for learner
I0604 01:24:58.391126   13639 etcd.go:547] [etcd] The learner was promoted as a voting member: 790d1b59a390f4fd
[etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
I0604 01:24:58.483991   13639 etcd.go:608] [etcd] attempting to see if all cluster endpoints ([https://192.168.2.31:2379 https://192.168.2.32:2379 https://192.168.2.33:2379]) are available 1/8
The 'update-status' phase is deprecated and will be removed in a future release. Currently it performs no operation
[mark-control-plane] Marking the node k8s-master03 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master03 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
I0604 01:24:59.677511   13639 waitcontrolplane.go:64] [wait-control-plane] Skipping phase as the feature gate WaitForAllControlPlaneComponents is disabled

This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

	mkdir -p $HOME/.kube
	sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.

如果忘記加入叢集命令,可以手動生成新的命令:

 kubeadm token create --print-join-command

6.2 工作節點加入(node01 node02 node03節點執行)

kubeadm join 192.168.2.37:9443 --token 7sjz7s.9dcl3iv9qgb4l425 --discovery-token-ca-cert-hash sha256:6089433b9d69ab3eea083cc869e9c57cc7877d7178ac42472de1f1e0820f6ac8 -v=5

詳細日誌:

點選檢視日誌
I0604 01:52:13.709590   11485 join.go:417] [preflight] found NodeName empty; using OS hostname as NodeName
I0604 01:52:13.710474   11485 initconfiguration.go:122] detected and using CRI socket: unix:///var/run/containerd/containerd.sock
[preflight] Running pre-flight checks
I0604 01:52:13.710708   11485 preflight.go:93] [preflight] Running general checks
I0604 01:52:13.710841   11485 checks.go:278] validating the existence of file /etc/kubernetes/kubelet.conf
I0604 01:52:13.710865   11485 checks.go:278] validating the existence of file /etc/kubernetes/bootstrap-kubelet.conf
I0604 01:52:13.710896   11485 checks.go:102] validating the container runtime
I0604 01:52:13.766999   11485 checks.go:637] validating whether swap is enabled or not
I0604 01:52:13.767162   11485 checks.go:368] validating the presence of executable crictl
I0604 01:52:13.767238   11485 checks.go:368] validating the presence of executable conntrack
I0604 01:52:13.767281   11485 checks.go:368] validating the presence of executable ip
I0604 01:52:13.767322   11485 checks.go:368] validating the presence of executable iptables
I0604 01:52:13.767367   11485 checks.go:368] validating the presence of executable mount
I0604 01:52:13.767422   11485 checks.go:368] validating the presence of executable nsenter
I0604 01:52:13.767467   11485 checks.go:368] validating the presence of executable ebtables
I0604 01:52:13.767509   11485 checks.go:368] validating the presence of executable ethtool
I0604 01:52:13.767546   11485 checks.go:368] validating the presence of executable socat
I0604 01:52:13.767590   11485 checks.go:368] validating the presence of executable tc
I0604 01:52:13.767631   11485 checks.go:368] validating the presence of executable touch
I0604 01:52:13.767683   11485 checks.go:514] running all checks
I0604 01:52:13.788684   11485 checks.go:399] checking whether the given node name is valid and reachable using net.LookupHost
I0604 01:52:13.789169   11485 checks.go:603] validating kubelet version
I0604 01:52:13.904321   11485 checks.go:128] validating if the "kubelet" service is enabled and active
	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0604 01:52:13.920224   11485 checks.go:201] validating availability of port 10250
I0604 01:52:13.920718   11485 checks.go:278] validating the existence of file /etc/kubernetes/pki/ca.crt
I0604 01:52:13.920747   11485 checks.go:428] validating if the connectivity type is via proxy or direct
I0604 01:52:13.920825   11485 checks.go:327] validating the contents of file /proc/sys/net/ipv4/ip_forward
I0604 01:52:13.920887   11485 join.go:536] [preflight] Discovering cluster-info
I0604 01:52:13.920952   11485 token.go:79] [discovery] Created cluster-info discovery client, requesting info from "192.168.2.37:9443"
I0604 01:52:13.922384   11485 token.go:210] [discovery] Waiting for the cluster-info ConfigMap to receive a JWS signaturefor token ID "7sjz7s"
I0604 01:52:13.922726   11485 round_trippers.go:466] curl -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.30.1 (linux/amd64) kubernetes/6911225" 'https://192.168.2.37:9443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s'
I0604 01:52:13.924011   11485 round_trippers.go:510] HTTP Trace: Dial to tcp:192.168.2.37:9443 succeed
I0604 01:52:13.952540   11485 round_trippers.go:553] GET https://192.168.2.37:9443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s 200 OK in 29 milliseconds
I0604 01:52:13.952618   11485 round_trippers.go:570] HTTP Statistics: DNSLookup 0 ms Dial 0 ms TLSHandshake 11 ms ServerProcessing 15 ms Duration 29 ms
I0604 01:52:13.952638   11485 round_trippers.go:577] Response Headers:
I0604 01:52:13.952659   11485 round_trippers.go:580]     Content-Type: application/json
I0604 01:52:13.952674   11485 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: 2969d879-0b88-4e03-82ae-8ceb2336a7bb
I0604 01:52:13.952687   11485 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: fe2c46ee-0391-4a7e-a357-0d91588f97f6
I0604 01:52:13.952697   11485 round_trippers.go:580]     Content-Length: 2683
I0604 01:52:13.952710   11485 round_trippers.go:580]     Date: Mon, 03 Jun 2024 17:52:13 GMT
I0604 01:52:13.952721   11485 round_trippers.go:580]     Audit-Id: 65482de1-23c8-486b-bb20-1ff25deac743
I0604 01:52:13.952747   11485 round_trippers.go:580]     Cache-Control: no-cache, private
I0604 01:52:13.953114   11485 request.go:1212] Response Body: {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"cluster-info","namespace":"kube-public","uid":"27254cc7-6568-436b-be88-605f95fb27b5","resourceVersion":"4414","creationTimestamp":"2024-06-03T17:16:47Z","managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2024-06-03T17:16:47Z","fieldsType":"FieldsV1","fieldsV1":{"f:data":{".":{},"f:kubeconfig":{}}}},{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2024-06-03T17:50:29Z","fieldsType":"FieldsV1","fieldsV1":{"f:data":{"f:jws-kubeconfig-7kuc59":{},"f:jws-kubeconfig-7sjz7s":{},"f:jws-kubeconfig-gpt9zf":{}}}}]},"data":{"jws-kubeconfig-7kuc59":"eyJhbGciOiJIUzI1NiIsImtpZCI6IjdrdWM1OSJ9..yR1wbuc-309Bs34plPE0nJBjqvyItCT39uZ8eMeyZJo","jws-kubeconfig-7sjz7s":"eyJhbGciOiJIUzI1NiIsImtpZCI6Ijdzano3cyJ9..KIQdxenLZvth80Z3omXS5FF57icRG1EXC6cPZvq-3Uw","jws-kubeconfig-gpt9zf":"eyJhbGciOiJIUzI1NiIsImtpZCI6ImdwdDl6ZiJ9..oVKEyqcNYFd_qd-QOEph4ydZRRDProsF5wWgvWTq700","kubeconfig":"apiVersion: v1\nclusters:\n- cluster:\n    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCVENDQWUyZ0F3SUJBZ0lJQzdoY2hsb3pwcGN3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TkRBMk1ETXhOekV3TURoYUZ3MHpOREEyTURFeE56RTFNRGhhTUJVeApFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUUNhMmxZVUR0Q1ZmNzVzUlkyZUhiSHJ6alBLMEhMRnJoK29wU1BrSGhoQm5MektZUTJJMUorK0Mrc0kKQjRPZTE2cU9YcHlvQjZTWHZFZlRMZmg5TXhLV3VhV09pNkhGWmNraE9sTU5sTDZUT2pMeUg2UEl0YjhkZE5jOAorc2VpeSthSmpVbDFCL2ZESjc1TU1iRkpubG1SbWVaMTZvS2xNbmp6YTNBeWpuVzFnUDkvdnpLYktDNW14RHJxCmhmbFljU3hUMFFnU2ltNEttWFZkNkJuNHVva29uU0EyWFBHVm9wdklCWmF6OHV2M2h5aTVYMUVsM1hIQ3VwNUwKQURUY3FNY1ZVNWZPV0taL0hsVEJ4c3FSMWt5U3EzUEdzaG04Z2RabGtVWUdSSEJVdlBvWkJsdU5EdmVoQ1J0TwozU0xBYTVhR3hWNWFURGxBMU9GN3R4aTZrRytWQWdNQkFBR2pXVEJYTUE0R0ExVWREd0VCL3dRRUF3SUNwREFQCkJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJSSXE2NEVKck15UlZjWFZhZWI4eXVYb2ZxMzhUQVYKQmdOVkhSRUVEakFNZ2dwcmRXSmxjbTVsZEdWek1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ2FQVjdNbUxnTQpVemJyOEpYbkEvVWpKL0tvNEhFVkVBVFdYbmRUTFlDclY5TmwxT2xwRTRiZzY2VWRRWXd1WEhRNEN6eDJ0ZUZmCkoxWkVnL01rdUtJemsvQWZIQkQ2OW1rZGNid1c5R1JNakR3NGZlWWtrY1JMeGc5NlRkbUlWelV4UmVhVVhNOG8KN0tieXZIako4L0hhVDBkM0JkV0ZCUTdCOExFZ0Ixcno0YS9Sb3M3YmZ3cUhqQ1Evb1BuUDlrL1F6K2cvc0p3awpBTFo2R2FOUkJhUjJ6OGRWMDQrRzhLK0JCQWdLWjkydHlLazRIQU5FZStqT3d0bVZReUc0T3VZWlhnZ2FsRWllClpkWUtxcmJ2NWpCY0pEOFRrYkw5VngweU1XeExHOGhETnF1T01INWVVTjRrWVRYSzlhVEZrcy8rbjJoUlN0QW8KckRBVFlNekM4SXRCCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K\n    server: https://192.168.2.37:9443\n  name: \"\"\ncontexts: null\ncurrent-context: \"\"\nkind: Config\npreferences: {}\nusers: null\n"}}
I0604 01:52:13.955700   11485 token.go:117] [discovery] Requesting info from "192.168.2.37:9443" again to validate TLS against the pinned public key
I0604 01:52:13.956585   11485 token.go:210] [discovery] Waiting for the cluster-info ConfigMap to receive a JWS signaturefor token ID "7sjz7s"
I0604 01:52:13.956771   11485 round_trippers.go:466] curl -v -XGET  -H "User-Agent: kubeadm/v1.30.1 (linux/amd64) kubernetes/6911225" -H "Accept: application/json, */*" 'https://192.168.2.37:9443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s'
I0604 01:52:13.958648   11485 round_trippers.go:510] HTTP Trace: Dial to tcp:192.168.2.37:9443 succeed
I0604 01:52:14.001401   11485 round_trippers.go:553] GET https://192.168.2.37:9443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s 200 OK in 44 milliseconds
I0604 01:52:14.001458   11485 round_trippers.go:570] HTTP Statistics: DNSLookup 0 ms Dial 1 ms TLSHandshake 13 ms ServerProcessing 28 ms Duration 44 ms
I0604 01:52:14.001477   11485 round_trippers.go:577] Response Headers:
I0604 01:52:14.001496   11485 round_trippers.go:580]     Audit-Id: 1565efc8-adf8-4655-9f09-debc7da18d2c
I0604 01:52:14.001510   11485 round_trippers.go:580]     Cache-Control: no-cache, private
I0604 01:52:14.001523   11485 round_trippers.go:580]     Content-Type: application/json
I0604 01:52:14.001537   11485 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: 2969d879-0b88-4e03-82ae-8ceb2336a7bb
I0604 01:52:14.001549   11485 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: fe2c46ee-0391-4a7e-a357-0d91588f97f6
I0604 01:52:14.001559   11485 round_trippers.go:580]     Content-Length: 2683
I0604 01:52:14.001568   11485 round_trippers.go:580]     Date: Mon, 03 Jun 2024 17:52:13 GMT
I0604 01:52:14.002242   11485 request.go:1212] Response Body: {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"cluster-info","namespace":"kube-public","uid":"27254cc7-6568-436b-be88-605f95fb27b5","resourceVersion":"4414","creationTimestamp":"2024-06-03T17:16:47Z","managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2024-06-03T17:16:47Z","fieldsType":"FieldsV1","fieldsV1":{"f:data":{".":{},"f:kubeconfig":{}}}},{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2024-06-03T17:50:29Z","fieldsType":"FieldsV1","fieldsV1":{"f:data":{"f:jws-kubeconfig-7kuc59":{},"f:jws-kubeconfig-7sjz7s":{},"f:jws-kubeconfig-gpt9zf":{}}}}]},"data":{"jws-kubeconfig-7kuc59":"eyJhbGciOiJIUzI1NiIsImtpZCI6IjdrdWM1OSJ9..yR1wbuc-309Bs34plPE0nJBjqvyItCT39uZ8eMeyZJo","jws-kubeconfig-7sjz7s":"eyJhbGciOiJIUzI1NiIsImtpZCI6Ijdzano3cyJ9..KIQdxenLZvth80Z3omXS5FF57icRG1EXC6cPZvq-3Uw","jws-kubeconfig-gpt9zf":"eyJhbGciOiJIUzI1NiIsImtpZCI6ImdwdDl6ZiJ9..oVKEyqcNYFd_qd-QOEph4ydZRRDProsF5wWgvWTq700","kubeconfig":"apiVersion: v1\nclusters:\n- cluster:\n    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCVENDQWUyZ0F3SUJBZ0lJQzdoY2hsb3pwcGN3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TkRBMk1ETXhOekV3TURoYUZ3MHpOREEyTURFeE56RTFNRGhhTUJVeApFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUUNhMmxZVUR0Q1ZmNzVzUlkyZUhiSHJ6alBLMEhMRnJoK29wU1BrSGhoQm5MektZUTJJMUorK0Mrc0kKQjRPZTE2cU9YcHlvQjZTWHZFZlRMZmg5TXhLV3VhV09pNkhGWmNraE9sTU5sTDZUT2pMeUg2UEl0YjhkZE5jOAorc2VpeSthSmpVbDFCL2ZESjc1TU1iRkpubG1SbWVaMTZvS2xNbmp6YTNBeWpuVzFnUDkvdnpLYktDNW14RHJxCmhmbFljU3hUMFFnU2ltNEttWFZkNkJuNHVva29uU0EyWFBHVm9wdklCWmF6OHV2M2h5aTVYMUVsM1hIQ3VwNUwKQURUY3FNY1ZVNWZPV0taL0hsVEJ4c3FSMWt5U3EzUEdzaG04Z2RabGtVWUdSSEJVdlBvWkJsdU5EdmVoQ1J0TwozU0xBYTVhR3hWNWFURGxBMU9GN3R4aTZrRytWQWdNQkFBR2pXVEJYTUE0R0ExVWREd0VCL3dRRUF3SUNwREFQCkJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJSSXE2NEVKck15UlZjWFZhZWI4eXVYb2ZxMzhUQVYKQmdOVkhSRUVEakFNZ2dwcmRXSmxjbTVsZEdWek1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ2FQVjdNbUxnTQpVemJyOEpYbkEvVWpKL0tvNEhFVkVBVFdYbmRUTFlDclY5TmwxT2xwRTRiZzY2VWRRWXd1WEhRNEN6eDJ0ZUZmCkoxWkVnL01rdUtJemsvQWZIQkQ2OW1rZGNid1c5R1JNakR3NGZlWWtrY1JMeGc5NlRkbUlWelV4UmVhVVhNOG8KN0tieXZIako4L0hhVDBkM0JkV0ZCUTdCOExFZ0Ixcno0YS9Sb3M3YmZ3cUhqQ1Evb1BuUDlrL1F6K2cvc0p3awpBTFo2R2FOUkJhUjJ6OGRWMDQrRzhLK0JCQWdLWjkydHlLazRIQU5FZStqT3d0bVZReUc0T3VZWlhnZ2FsRWllClpkWUtxcmJ2NWpCY0pEOFRrYkw5VngweU1XeExHOGhETnF1T01INWVVTjRrWVRYSzlhVEZrcy8rbjJoUlN0QW8KckRBVFlNekM4SXRCCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K\n    server: https://192.168.2.37:9443\n  name: \"\"\ncontexts: null\ncurrent-context: \"\"\nkind: Config\npreferences: {}\nusers: null\n"}}
I0604 01:52:14.003298   11485 token.go:134] [discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.2.37:9443"
I0604 01:52:14.003355   11485 discovery.go:52] [discovery] Using provided TLSBootstrapToken as authentication credentials for the join process
I0604 01:52:14.003379   11485 join.go:550] [preflight] Fetching init configuration
I0604 01:52:14.003392   11485 join.go:596] [preflight] Retrieving KubeConfig objects
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
I0604 01:52:14.004252   11485 round_trippers.go:466] curl -v -XGET  -H "Authorization: Bearer <masked>" -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.30.1 (linux/amd64) kubernetes/6911225" 'https://192.168.2.37:9443/api/v1/namespaces/kube-system/configmaps/kubeadm-config?timeout=10s'
I0604 01:52:14.006060   11485 round_trippers.go:510] HTTP Trace: Dial to tcp:192.168.2.37:9443 succeed
I0604 01:52:14.028937   11485 round_trippers.go:553] GET https://192.168.2.37:9443/api/v1/namespaces/kube-system/configmaps/kubeadm-config?timeout=10s 200 OK in 24 milliseconds
I0604 01:52:14.028992   11485 round_trippers.go:570] HTTP Statistics: DNSLookup 0 ms Dial 1 ms TLSHandshake 11 ms ServerProcessing 10 ms Duration 24 ms
I0604 01:52:14.029011   11485 round_trippers.go:577] Response Headers:
I0604 01:52:14.029032   11485 round_trippers.go:580]     Content-Length: 931
I0604 01:52:14.029059   11485 round_trippers.go:580]     Date: Mon, 03 Jun 2024 17:52:14 GMT
I0604 01:52:14.029124   11485 round_trippers.go:580]     Audit-Id: 06709fac-cc88-42c8-b4b1-14896f678b4f
I0604 01:52:14.029141   11485 round_trippers.go:580]     Cache-Control: no-cache, private
I0604 01:52:14.029151   11485 round_trippers.go:580]     Content-Type: application/json
I0604 01:52:14.029163   11485 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: 2969d879-0b88-4e03-82ae-8ceb2336a7bb
I0604 01:52:14.029177   11485 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: fe2c46ee-0391-4a7e-a357-0d91588f97f6
I0604 01:52:14.029249   11485 request.go:1212] Response Body: {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"kubeadm-config","namespace":"kube-system","uid":"bc2a1b36-ec94-45d5-826d-ed6a573da6e3","resourceVersion":"235","creationTimestamp":"2024-06-03T17:16:45Z","managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2024-06-03T17:16:45Z","fieldsType":"FieldsV1","fieldsV1":{"f:data":{".":{},"f:ClusterConfiguration":{}}}}]},"data":{"ClusterConfiguration":"apiServer:\n  timeoutForControlPlane: 4m0s\napiVersion: kubeadm.k8s.io/v1beta3\ncertificatesDir: /etc/kubernetes/pki\nclusterName: kubernetes\ncontrolPlaneEndpoint: 192.168.2.37:9443\ncontrollerManager: {}\ndns: {}\netcd:\n  local:\n    dataDir: /var/lib/etcd\nimageRepository: registry.aliyuncs.com/google_containers\nkind: ClusterConfiguration\nkubernetesVersion: v1.30.1\nnetworking:\n  dnsDomain: cluster.local\n  podSubnet: 10.244.0.0/16\n  serviceSubnet: 10.96.0.0/12\nscheduler: {}\n"}}
I0604 01:52:14.030906   11485 kubeproxy.go:55] attempting to download the KubeProxyConfiguration from ConfigMap "kube-proxy"
I0604 01:52:14.031137   11485 round_trippers.go:466] curl -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.30.1 (linux/amd64) kubernetes/6911225" -H "Authorization: Bearer <masked>" 'https://192.168.2.37:9443/api/v1/namespaces/kube-system/configmaps/kube-proxy?timeout=10s'
I0604 01:52:14.037360   11485 round_trippers.go:553] GET https://192.168.2.37:9443/api/v1/namespaces/kube-system/configmaps/kube-proxy?timeout=10s 200 OK in 5 milliseconds
I0604 01:52:14.037405   11485 round_trippers.go:570] HTTP Statistics: GetConnection 0 ms ServerProcessing 5 ms Duration 5 ms
I0604 01:52:14.037423   11485 round_trippers.go:577] Response Headers:
I0604 01:52:14.037449   11485 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: fe2c46ee-0391-4a7e-a357-0d91588f97f6
I0604 01:52:14.037461   11485 round_trippers.go:580]     Content-Length: 2614
I0604 01:52:14.037474   11485 round_trippers.go:580]     Date: Mon, 03 Jun 2024 17:52:14 GMT
I0604 01:52:14.037487   11485 round_trippers.go:580]     Audit-Id: 0808cf79-46e8-4c31-9b0c-79551e03c408
I0604 01:52:14.037497   11485 round_trippers.go:580]     Cache-Control: no-cache, private
I0604 01:52:14.037506   11485 round_trippers.go:580]     Content-Type: application/json
I0604 01:52:14.037519   11485 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: 2969d879-0b88-4e03-82ae-8ceb2336a7bb
I0604 01:52:14.037677   11485 request.go:1212] Response Body: {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"kube-proxy","namespace":"kube-system","uid":"87343cb1-1192-4c80-a8aa-d850f8d7cbfc","resourceVersion":"281","creationTimestamp":"2024-06-03T17:16:48Z","labels":{"app":"kube-proxy"},"annotations":{"kubeadm.kubernetes.io/component-config.hash":"sha256:86c8c83fc5100d659cfcb6195a2c72b5b138d5a21da0226be93f027b4c932631"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2024-06-03T17:16:48Z","fieldsType":"FieldsV1","fieldsV1":{"f:data":{".":{},"f:config.conf":{},"f:kubeconfig.conf":{}},"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kubernetes.io/component-config.hash":{}},"f:labels":{".":{},"f:app":{}}}}}]},"data":{"config.conf":"apiVersion: kubeproxy.config.k8s.io/v1alpha1\nbindAddress: 0.0.0.0\nbindAddressHardFail: false\nclientConnection:\n  acceptContentTypes: \"\"\n  burst: 0\n  contentType: \"\"\n  kubeconfig: /var/lib/kube-proxy/kubeconfig.conf\n  qps: 0\nclusterCIDR: 10.244.0.0/16\nconfigSyncPeriod: 0s\nconntrack:\n  maxPerCore: null\n  min: null\n  tcpBeLiberal: false\n  tcpCloseWaitTimeout: null\n  tcpEstablishedTimeout: null\n  udpStreamTimeout: 0s\n  udpTimeout: 0s\ndetectLocal:\n  bridgeInterface: \"\"\n  interfaceNamePrefix: \"\"\ndetectLocalMode: \"\"\nenableProfiling: false\nhealthzBindAddress: \"\"\nhostnameOverride: \"\"\niptables:\n  localhostNodePorts: null\n  masqueradeAll: false\n  masqueradeBit: null\n  minSyncPeriod: 0s\n  syncPeriod: 0s\nipvs:\n  excludeCIDRs: null\n  minSyncPeriod: 0s\n  scheduler: \"\"\n  strictARP: false\n  syncPeriod: 0s\n  tcpFinTimeout: 0s\n  tcpTimeout: 0s\n  udpTimeout: 0s\nkind: KubeProxyConfiguration\nlogging:\n  flushFrequency: 0\n  options:\n    json:\n      infoBufferSize: \"0\"\n    text:\n      infoBufferSize: \"0\"\n  verbosity: 0\nmetricsBindAddress: \"\"\nmode: \"\"\nnftables:\n  masqueradeAll: false\n  masqueradeBit: null\n  minSyncPeriod: 0s\n  syncPeriod: 0s\nnodePortAddresses: null\noomScoreAdj: null\nportRange: \"\"\nshowHiddenMetricsForVersion: \"\"\nwinkernel:\n  enableDSR: false\n  forwardHealthCheckVip: false\n  networkName: \"\"\n  rootHnsEndpointName: \"\"\n  sourceVip: \"\"","kubeconfig.conf":"apiVersion: v1\nkind: Config\nclusters:\n- cluster:\n    certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n    server: https://192.168.2.37:9443\n  name: default\ncontexts:\n- context:\n    cluster: default\n    namespace: default\n    user: default\n  name: default\ncurrent-context: default\nusers:\n- name: default\n  user:\n    tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token"}}
I0604 01:52:14.041689   11485 kubelet.go:74] attempting to download the KubeletConfiguration from ConfigMap "kubelet-config"
I0604 01:52:14.041852   11485 round_trippers.go:466] curl -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.30.1 (linux/amd64) kubernetes/6911225" -H "Authorization: Bearer <masked>" 'https://192.168.2.37:9443/api/v1/namespaces/kube-system/configmaps/kubelet-config?timeout=10s'
I0604 01:52:14.048246   11485 round_trippers.go:553] GET https://192.168.2.37:9443/api/v1/namespaces/kube-system/configmaps/kubelet-config?timeout=10s 200 OK in 6 milliseconds
I0604 01:52:14.048289   11485 round_trippers.go:570] HTTP Statistics: GetConnection 0 ms ServerProcessing 5 ms Duration 6 ms
I0604 01:52:14.048307   11485 round_trippers.go:577] Response Headers:
I0604 01:52:14.048326   11485 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: fe2c46ee-0391-4a7e-a357-0d91588f97f6
I0604 01:52:14.048337   11485 round_trippers.go:580]     Content-Length: 1752
I0604 01:52:14.048349   11485 round_trippers.go:580]     Date: Mon, 03 Jun 2024 17:52:14 GMT
I0604 01:52:14.048361   11485 round_trippers.go:580]     Audit-Id: 44c38e1c-405e-42b9-9dec-de1ab0747b7c
I0604 01:52:14.048372   11485 round_trippers.go:580]     Cache-Control: no-cache, private
I0604 01:52:14.048383   11485 round_trippers.go:580]     Content-Type: application/json
I0604 01:52:14.048393   11485 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: 2969d879-0b88-4e03-82ae-8ceb2336a7bb
I0604 01:52:14.048632   11485 request.go:1212] Response Body: {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"kubelet-config","namespace":"kube-system","uid":"fe65526f-7fd7-4e12-9645-1c3f870dacaf","resourceVersion":"238","creationTimestamp":"2024-06-03T17:16:45Z","annotations":{"kubeadm.kubernetes.io/component-config.hash":"sha256:899c230cbcdf566dbb9a10f53cb164093ffdf06654cb687a43698e4215a04c4b"},"managedFields":[{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2024-06-03T17:16:45Z","fieldsType":"FieldsV1","fieldsV1":{"f:data":{".":{},"f:kubelet":{}},"f:metadata":{"f:annotations":{".":{},"f:kubeadm.kubernetes.io/component-config.hash":{}}}}}]},"data":{"kubelet":"apiVersion: kubelet.config.k8s.io/v1beta1\nauthentication:\n  anonymous:\n    enabled: false\n  webhook:\n    cacheTTL: 0s\n    enabled: true\n  x509:\n    clientCAFile: /etc/kubernetes/pki/ca.crt\nauthorization:\n  mode: Webhook\n  webhook:\n    cacheAuthorizedTTL: 0s\n    cacheUnauthorizedTTL: 0s\ncgroupDriver: systemd\nclusterDNS:\n- 10.96.0.10\nclusterDomain: cluster.local\ncontainerRuntimeEndpoint: \"\"\ncpuManagerReconcilePeriod: 0s\nevictionPressureTransitionPeriod: 0s\nfileCheckFrequency: 0s\nhealthzBindAddress: 127.0.0.1\nhealthzPort: 10248\nhttpCheckFrequency: 0s\nimageMaximumGCAge: 0s\nimageMinimumGCAge: 0s\nkind: KubeletConfiguration\nlogging:\n  flushFrequency: 0\n  options:\n    json:\n      infoBufferSize: \"0\"\n    text:\n      infoBufferSize: \"0\"\n  verbosity: 0\nmemorySwap: {}\nnodeStatusReportFrequency: 0s\nnodeStatusUpdateFrequency: 0s\nrotateCertificates: true\nruntimeRequestTimeout: 0s\nshutdownGracePeriod: 0s\nshutdownGracePeriodCriticalPods: 0s\nstaticPodPath: /etc/kubernetes/manifests\nstreamingConnectionIdleTimeout: 0s\nsyncFrequency: 0s\nvolumeStatsAggPeriod: 0s\n"}}
I0604 01:52:14.052127   11485 initconfiguration.go:114] skip CRI socket detection, fill with the default CRI socket unix:///var/run/containerd/containerd.sock
I0604 01:52:14.052546   11485 interface.go:432] Looking for default routes with IPv4 addresses
I0604 01:52:14.052567   11485 interface.go:437] Default route transits interface "ens33"
I0604 01:52:14.053316   11485 interface.go:209] Interface ens33 is up
I0604 01:52:14.053413   11485 interface.go:257] Interface "ens33" has 2 addresses :[192.168.2.36/24 fe80::250:56ff:fe95:283/64].
I0604 01:52:14.053448   11485 interface.go:224] Checking addr  192.168.2.36/24.
I0604 01:52:14.053469   11485 interface.go:231] IP found 192.168.2.36
I0604 01:52:14.053494   11485 interface.go:263] Found valid IPv4 address 192.168.2.36 for interface "ens33".
I0604 01:52:14.053512   11485 interface.go:443] Found active IP 192.168.2.36 
I0604 01:52:14.060368   11485 preflight.go:104] [preflight] Running configuration dependant checks
I0604 01:52:14.060413   11485 controlplaneprepare.go:225] [download-certs] Skipping certs download
I0604 01:52:14.060445   11485 kubelet.go:122] [kubelet-start] writing bootstrap kubelet config file at /etc/kubernetes/bootstrap-kubelet.conf
I0604 01:52:14.061819   11485 kubelet.go:137] [kubelet-start] writing CA certificate at /etc/kubernetes/pki/ca.crt
I0604 01:52:14.062536   11485 loader.go:395] Config loaded from file:  /etc/kubernetes/bootstrap-kubelet.conf
I0604 01:52:14.063671   11485 kubelet.go:158] [kubelet-start] Checking for an existing Node in the cluster with name "k8s-node03" and status "Ready"
I0604 01:52:14.063884   11485 round_trippers.go:466] curl -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.30.1 (linux/amd64) kubernetes/6911225" -H "Authorization: Bearer <masked>" 'https://192.168.2.37:9443/api/v1/nodes/k8s-node03?timeout=10s'
I0604 01:52:14.074098   11485 round_trippers.go:553] GET https://192.168.2.37:9443/api/v1/nodes/k8s-node03?timeout=10s 404 Not Found in 9 milliseconds
I0604 01:52:14.074159   11485 round_trippers.go:570] HTTP Statistics: GetConnection 0 ms ServerProcessing 8 ms Duration 9 ms
I0604 01:52:14.074178   11485 round_trippers.go:577] Response Headers:
I0604 01:52:14.074199   11485 round_trippers.go:580]     Audit-Id: 8a3b9ca0-2b84-4795-9078-829e404302dd
I0604 01:52:14.074214   11485 round_trippers.go:580]     Cache-Control: no-cache, private
I0604 01:52:14.074226   11485 round_trippers.go:580]     Content-Type: application/json
I0604 01:52:14.074238   11485 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: 2969d879-0b88-4e03-82ae-8ceb2336a7bb
I0604 01:52:14.074248   11485 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: fe2c46ee-0391-4a7e-a357-0d91588f97f6
I0604 01:52:14.074263   11485 round_trippers.go:580]     Content-Length: 190
I0604 01:52:14.074273   11485 round_trippers.go:580]     Date: Mon, 03 Jun 2024 17:52:14 GMT
I0604 01:52:14.074323   11485 request.go:1212] Response Body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"nodes \"k8s-node03\" not found","reason":"NotFound","details":{"name":"k8s-node03","kind":"nodes"},"code":404}
I0604 01:52:14.074866   11485 kubelet.go:173] [kubelet-start] Stopping the kubelet
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-check] Waiting for a healthy kubelet. This can take up to 4m0s
[kubelet-check] The kubelet is healthy after 2.009081299s
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap
I0604 01:52:16.308360   11485 loader.go:395] Config loaded from file:  /etc/kubernetes/kubelet.conf
I0604 01:52:17.308331   11485 loader.go:395] Config loaded from file:  /etc/kubernetes/kubelet.conf
I0604 01:52:17.310803   11485 loader.go:395] Config loaded from file:  /etc/kubernetes/kubelet.conf
I0604 01:52:17.311609   11485 kubelet.go:227] [kubelet-start] preserving the crisocket information for the node
I0604 01:52:17.311675   11485 patchnode.go:31] [patchnode] Uploading the CRI Socket information "unix:///var/run/containerd/containerd.sock" to the Node API object "k8s-node03" as an annotation
I0604 01:52:17.311871   11485 round_trippers.go:466] curl -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.30.1 (linux/amd64) kubernetes/6911225" 'https://192.168.2.37:9443/api/v1/nodes/k8s-node03?timeout=10s'
I0604 01:52:17.312158   11485 cert_rotation.go:137] Starting client certificate rotation controller
I0604 01:52:17.312965   11485 round_trippers.go:510] HTTP Trace: Dial to tcp:192.168.2.37:9443 succeed
I0604 01:52:17.339488   11485 round_trippers.go:553] GET https://192.168.2.37:9443/api/v1/nodes/k8s-node03?timeout=10s 200 OK in 27 milliseconds
I0604 01:52:17.339549   11485 round_trippers.go:570] HTTP Statistics: DNSLookup 0 ms Dial 0 ms TLSHandshake 12 ms ServerProcessing 13 ms Duration 27 ms
I0604 01:52:17.339565   11485 round_trippers.go:577] Response Headers:
I0604 01:52:17.339744   11485 round_trippers.go:580]     Date: Mon, 03 Jun 2024 17:52:17 GMT
I0604 01:52:17.339858   11485 round_trippers.go:580]     Audit-Id: 377a45f1-d019-47b1-9d5a-33e6a9e3112a
I0604 01:52:17.339880   11485 round_trippers.go:580]     Cache-Control: no-cache, private
I0604 01:52:17.339890   11485 round_trippers.go:580]     Content-Type: application/json
I0604 01:52:17.339897   11485 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: 7d5d349f-2d2c-4676-b784-6391894c76de
I0604 01:52:17.339904   11485 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: 078e9b9b-3fbc-48c1-b348-86c52fdddaa7
I0604 01:52:17.340921   11485 request.go:1212] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"k8s-node03","uid":"03eb4fc4-1690-4110-bbdf-a025fcbcbc73","resourceVersion":"4646","creationTimestamp":"2024-06-03T17:52:16Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"k8s-node03","kubernetes.io/os":"linux"},"annotations":{"node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2024-06-03T17:52:16Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.244.5.0/24\"":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2024-06-03T17:52:16Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}}}]},"spec":{"podCIDR":"10.244.5.0/24","podCIDRs":["10.244.5.0/24"],"taints":[{"key":"node.kubernetes.io/not-ready","effect":"NoSchedule"}]},"status":{"capacity":{"cpu":"2","ephemeral-storage":"36805060Ki","hugepages-2Mi":"0","memory":"1865308Ki","pods":"110"},"allocatable":{"cpu":"2","ephemeral-storage":"33919543240","hugepages-2Mi":"0","memory":"1762908Ki","pods":"110"},"conditions":[{"type":"MemoryPressure","status":"False","lastHeartbeatTime":"2024-06-03T17:52:16Z","lastTransitionTime":"2024-06-03T17:52:16Z","reason":"KubeletHasSufficientMemory","message":"kubelet has sufficient memory available"},{"type":"DiskPressure","status":"False","lastHeartbeatTime":"2024-06-03T17:52:16Z","lastTransitionTime":"2024-06-03T17:52:16Z","reason":"KubeletHasNoDiskPressure","message":"kubelet has no disk pressure"},{"type":"PIDPressure","status":"False","lastHeartbeatTime":"2024-06-03T17:52:16Z","lastTransitionTime":"2024-06-03T17:52:16Z","reason":"KubeletHasSufficientPID","message":"kubelet has sufficient PID available"},{"type":"Ready","status":"False","lastHeartbeatTime":"2024-06-03T17:52:16Z","lastTransitionTime":"2024-06-03T17:52:16Z","reason":"KubeletNotReady","message":"[PLEG is not healthy: pleg has yet to be successful, CSINode is not yet initialized]"}],"addresses":[{"type":"InternalIP","address":"192.168.2.36"},{"type":"Hostname","address":"k8s-node03"}],"daemonEndpoints":{"kubeletEndpoint":{"Port":10250}},"nodeInfo":{"machineID":"2f93c8f8df524d9abcb804f316d6910f","systemUUID":"CF651542-BFD7-F3B5-8511-D6A6C2A377D0","bootID":"6994e11d-e18c-4248-8fed-542d8bd43069","kernelVersion":"3.10.0-862.el7.x86_64","osImage":"CentOS Linux 7 (Core)","containerRuntimeVersion":"containerd://1.7.17","kubeletVersion":"v1.30.1","kubeProxyVersion":"v1.30.1","operatingSystem":"linux","architecture":"amd64"},"images":[{"names":["registry.aliyuncs.com/google_containers/kube-apiserver@sha256:fd55381fb07b1fbef20b58d2ad814510f01e3a204118b6a5a4695275dca19677","registry.aliyuncs.com/google_containers/kube-apiserver:v1.30.1"],"sizeBytes":32766275},{"names":["docker.io/flannel/flannel@sha256:dca6d372772e3c412d1f3a0f72519af9a0d0c9a4a4960ae15de25d54287d446e","docker.io/flannel/flannel:v0.25.3"],"sizeBytes":31425237},{"names":["registry.aliyuncs.com/google_containers/kube-controller-manager@sha256:30db1213774225b37133b96d95f27803ffd27ed6ea7506fd5e8fd23ba37e72e8","registry.aliyuncs.com/google_containers/kube-controller-manager:v1.30.1"],"sizeBytes":31135939},{"names":["registry.aliyuncs.com/google_containers/kube-proxy@sha256:fcd3b08795e3f5e2edf35582f3139149db20fd2bb21d2b184e75477b692c0f04","registry.aliyuncs.com/google_containers/kube-proxy:v1.30.1"],"sizeBytes":29020041},{"names":["docker.io/flannel/flannel-cni-plugin@sha256:e88c0d84fa89679eb6cb6a28bc257d652ced8d1b2e44d54a592f0a2cd85dba53","docker.io/flannel/flannel-cni-plugin:v1.4.1-flannel1"],"sizeBytes":4710551},{"names":["registry.aliyuncs.com/google_containers/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097","registry.aliyuncs.com/google_containers/pause:3.9"],"sizeBytes":321520}]}}
I0604 01:52:17.344142   11485 request.go:1212] Request Body: {"metadata":{"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/containerd/containerd.sock"}}}
I0604 01:52:17.344269   11485 round_trippers.go:466] curl -v -XPATCH  -H "Accept: application/json, */*" -H "Content-Type: application/strategic-merge-patch+json" -H "User-Agent: kubeadm/v1.30.1 (linux/amd64) kubernetes/6911225" 'https://192.168.2.37:9443/api/v1/nodes/k8s-node03?timeout=10s'
I0604 01:52:17.381625   11485 round_trippers.go:553] PATCH https://192.168.2.37:9443/api/v1/nodes/k8s-node03?timeout=10s 200 OK in 37 milliseconds
I0604 01:52:17.381675   11485 round_trippers.go:570] HTTP Statistics: GetConnection 0 ms ServerProcessing 36 ms Duration 37 ms
I0604 01:52:17.381692   11485 round_trippers.go:577] Response Headers:
I0604 01:52:17.381709   11485 round_trippers.go:580]     Audit-Id: 20cc26d2-cb65-4c69-8a24-63f0e6bd1906
I0604 01:52:17.381721   11485 round_trippers.go:580]     Cache-Control: no-cache, private
I0604 01:52:17.381731   11485 round_trippers.go:580]     Content-Type: application/json
I0604 01:52:17.381740   11485 round_trippers.go:580]     X-Kubernetes-Pf-Flowschema-Uid: 7d5d349f-2d2c-4676-b784-6391894c76de
I0604 01:52:17.381750   11485 round_trippers.go:580]     X-Kubernetes-Pf-Prioritylevel-Uid: 078e9b9b-3fbc-48c1-b348-86c52fdddaa7
I0604 01:52:17.381762   11485 round_trippers.go:580]     Date: Mon, 03 Jun 2024 17:52:17 GMT
I0604 01:52:17.384735   11485 request.go:1212] Response Body: {"kind":"Node","apiVersion":"v1","metadata":{"name":"k8s-node03","uid":"03eb4fc4-1690-4110-bbdf-a025fcbcbc73","resourceVersion":"4665","creationTimestamp":"2024-06-03T17:52:16Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"k8s-node03","kubernetes.io/os":"linux"},"annotations":{"kubeadm.alpha.kubernetes.io/cri-socket":"unix:///var/run/containerd/containerd.sock","node.alpha.kubernetes.io/ttl":"0","volumes.kubernetes.io/controller-managed-attach-detach":"true"},"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2024-06-03T17:52:16Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.244.5.0/24\"":{}}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2024-06-03T17:52:16Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/os":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{}}}}},{"manager":"kubeadm","operation":"Update","apiVersion":"v1","time":"2024-06-03T17:52:17Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeadm.alpha.kubernetes.io/cri-socket":{}}}}}]},"spec":{"podCIDR":"10.244.5.0/24","podCIDRs":["10.244.5.0/24"],"taints":[{"key":"node.kubernetes.io/not-ready","effect":"NoSchedule"}]},"status":{"capacity":{"cpu":"2","ephemeral-storage":"36805060Ki","hugepages-2Mi":"0","memory":"1865308Ki","pods":"110"},"allocatable":{"cpu":"2","ephemeral-storage":"33919543240","hugepages-2Mi":"0","memory":"1762908Ki","pods":"110"},"conditions":[{"type":"MemoryPressure","status":"False","lastHeartbeatTime":"2024-06-03T17:52:16Z","lastTransitionTime":"2024-06-03T17:52:16Z","reason":"KubeletHasSufficientMemory","message":"kubelet has sufficient memory available"},{"type":"DiskPressure","status":"False","lastHeartbeatTime":"2024-06-03T17:52:16Z","lastTransitionTime":"2024-06-03T17:52:16Z","reason":"KubeletHasNoDiskPressure","message":"kubelet has no disk pressure"},{"type":"PIDPressure","status":"False","lastHeartbeatTime":"2024-06-03T17:52:16Z","lastTransitionTime":"2024-06-03T17:52:16Z","reason":"KubeletHasSufficientPID","message":"kubelet has sufficient PID available"},{"type":"Ready","status":"False","lastHeartbeatTime":"2024-06-03T17:52:16Z","lastTransitionTime":"2024-06-03T17:52:16Z","reason":"KubeletNotReady","message":"[PLEG is not healthy: pleg has yet to be successful, CSINode is not yet initialized]"}],"addresses":[{"type":"InternalIP","address":"192.168.2.36"},{"type":"Hostname","address":"k8s-node03"}],"daemonEndpoints":{"kubeletEndpoint":{"Port":10250}},"nodeInfo":{"machineID":"2f93c8f8df524d9abcb804f316d6910f","systemUUID":"CF651542-BFD7-F3B5-8511-D6A6C2A377D0","bootID":"6994e11d-e18c-4248-8fed-542d8bd43069","kernelVersion":"3.10.0-862.el7.x86_64","osImage":"CentOS Linux 7 (Core)","containerRuntimeVersion":"containerd://1.7.17","kubeletVersion":"v1.30.1","kubeProxyVersion":"v1.30.1","operatingSystem":"linux","architecture":"amd64"},"images":[{"names":["registry.aliyuncs.com/google_containers/kube-apiserver@sha256:fd55381fb07b1fbef20b58d2ad814510f01e3a204118b6a5a4695275dca19677","registry.aliyuncs.com/google_containers/kube-apiserver:v1.30.1"],"sizeBytes":32766275},{"names":["docker.io/flannel/flannel@sha256:dca6d372772e3c412d1f3a0f72519af9a0d0c9a4a4960ae15de25d54287d446e","docker.io/flannel/flannel:v0.25.3"],"sizeBytes":31425237},{"names":["registry.aliyuncs.com/google_containers/kube-controller-manager@sha256:30db1213774225b37133b96d95f27803ffd27ed6ea7506fd5e8fd23ba37e72e8","registry.aliyuncs.com/google_containers/kube-controller-manager:v1.30.1"],"sizeBytes":31135939},{"names":["registry.aliyuncs.com/google_containers/kube-proxy@sha256:fcd3b08795e3f5e2edf35582f3139149db20fd2bb21d2b184e75477b692c0f04","registry.aliyuncs.com/google_containers/kube-proxy:v1.30.1"],"sizeBytes":29020041},{"names":["docker.io/flannel/flannel-cni-plugin@sha256:e88c0d84fa89679eb6cb6a28bc257d652ced8d1b2e44d54a592f0a2cd85dba53","docker.io/flannel/flannel-cni-plugin:v1.4.1-flannel1"],"sizeBytes":4710551},{"names":["registry.aliyuncs.com/google_containers/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097","registry.aliyuncs.com/google_containers/pause:3.9"],"sizeBytes":321520}]}}

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

7. 驗證叢集

主節點執行:

[root@k8s-master01 ~]# kubectl get no
NAME           STATUS   ROLES           AGE     VERSION
k8s-master01   Ready    control-plane   35m     v1.30.1
k8s-master02   Ready    control-plane   28m     v1.30.1
k8s-master03   Ready    control-plane   27m     v1.30.1
k8s-node01     Ready    <none>          11m     v1.30.1
k8s-node02     Ready    <none>          6m37s   v1.30.1
k8s-node03     Ready    <none>          8s      v1.30.1
[root@k8s-master01 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE   ERROR
controller-manager   Healthy   ok        
scheduler            Healthy   ok        
etcd-0               Healthy   ok

相關文章