kubeadm安裝k8s 1.23.5

哈哈哈hh發表於2022-04-26

映象下載、域名解析、時間同步請點選  阿里雲開源映象站

一. 環境準備

1.1 配置yum阿里源

yum -y install wget
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak
wget -O /etc/yum.repos.d/CentOS-Base.repo 
yum clean all
yum makecache

1.2 關閉防火牆

# 檢視防火牆狀態
firewall-cmd --state
# 臨時停止防火牆
systemctl stop firewalld.service
# 禁止防火牆開機啟動
systemctl disable firewalld.service

1.3 關閉selinux

# 檢視selinux狀態
getenforce
# 臨時關閉selinux
setenforce 0
# 永久關閉selinux
sed -i 's/^ *SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

1.4 關閉swap

# 臨時關閉swap
swapoff -a
# 永久關閉swap
sed -i.bak '/swap/s/^/#/' /etc/fstab
# 檢視
free -g

1.5 調整核心引數及模組

載入所需核心模組

cat <<EOF> /etc/modules-load.d/k8s.conf
br_netfilter
EOF
cat <<EOF> /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter

設定必需的 sysctl 引數,允許iptables檢查橋接流量,這些引數在重新啟動後仍然存在

cat <<EOF> /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
# 應用 sysctl 引數而無需重新啟動
sudo sysctl --system

1.6 開啟ipvs

不開啟ipvs將會使用iptables進行資料包轉發,但是效率低,所以推薦開通ipvs,使用

cat <<EOF> /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
# 載入模組
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
# 安裝了ipset軟體包
yum install ipset -y
 
# 安裝管理工具ipvsadm
yum install ipvsadm -y

1.7 同步伺服器時間

yum install chrony -y
systemctl enable chronyd
systemctl start chronyd
[root@master ~]# chronyc sources
210 Number of sources = 4
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^- ntp.wdc1.us.leaseweb.net      2   9   201   329  -8125us[-8125us] +/-  264ms
^- ntp5.flashdance.cx            2   9   373   189    -43ms[  -43ms] +/-  223ms
^+ time.cloudflare.com           3   8   377   197    +38ms[  +38ms] +/-  121ms
^* 119.28.183.184                2   8   155   30m  -8460us[  -13ms] +/-   67ms
[root@master ~]# date
2022年 03月 26日 星期六 15:11:32 CST

1.8 安裝containerd

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo 
# 檢視最新版本
yum list containerd --showduplicates | sort -r
yum install containerd -y
# 安裝了`containerd.io-1.5.11-3.1.el7.x86_64`
containerd config default > /etc/containerd/config.toml
systemctl start containerd
systemctl enable containerd

配置

# 修改cgroups為systemd
sed -i 's#SystemdCgroup = false#SystemdCgroup = true#' /etc/containerd/config.toml
# 修改基礎設施映象
sed -i 's#sandbox_image = "k8s.gcr.io/pause:3.5"#sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"#' /etc/containerd/config.toml
systemctl daemon-reload
systemctl restart containerd

安裝 CRI 客戶端 crictl
選擇版本

wget download/v1.23.0/crictl-v1.23.0-linux-amd64.tar.gz
tar zxvf crictl-v1.23.0-linux-amd64.tar.gz -C /usr/local/bin
cat <<EOF> /etc/crictl.yaml 
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
# 驗證是否可用
crictl pull nginx:alpine
crictl images
crictl rmi nginx:alpine

1.9 修改hostname和hosts

修改hostname

# master節點
hostnamectl set-hostname master
# node1節點
hostnamectl set-hostname node1
# node2節點
hostnamectl set-hostname node2

新增hosts

cat <<EOF> /etc/hosts 
192.168.4.27   master
192.168.4.28   node1 
192.168.4.29   node2
EOF

二. 安裝k8s

2.1 安裝 kubelet、kubeadm、kubectl

新增kubernetes源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=
        
EOF

然後安裝 kubeadm、kubelet、kubectl

# 檢視版本,最新版 1.23.5-0
yum list kubeadm --showduplicates | sort -r
yum install -y kubelet-1.23.5-0 kubectl-1.23.5-0 kubeadm-1.23.5-0
[root@master ~]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.5", GitCommit:"c285e781331a3785a7f436042c65c5641ce8a9e9", GitTreeState:"clean", BuildDate:"2022-03-16T15:57:37Z", GoVersion:"go1.17.8", Compiler:"gc", Platform:"linux/amd64"}

修改kubelet配置

cat <<EOF> /etc/sysconfig/kubelet
KUBELET_KUBEADM_ARGS="--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
EOF

啟動kubelet服務,並設定開機自啟

systemctl start kubelet
systemctl enable kubelet

2.2 初始化k8s叢集

2.2.1 master節點(二選一)

1. 透過配置檔案初始化:

kubeadm config print init-defaults > kubeadm.yaml
修改為
cat <<EOF> kubeadm.yaml 
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.4.27 # apiserver 節點內網IP
  bindPort: 6443
nodeRegistration:
  criSocket: /run/containerd/containerd.sock  # 修改為containerd
  imagePullPolicy: IfNotPresent
  name: master
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS # dns型別 type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers # 修改這個映象能下載
kind: ClusterConfiguration
kubernetesVersion: 1.23.5 # k8s版本
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16  
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs  # kube-proxy 模式
EOF
kubeadm init --config kubeadm.yaml

2. 直接初始化:

kube-proxy 模式是 iptables,可以透過kubectl edit configmap kube-proxy -n kube-system修改

kubeadm init \
--kubernetes-version v1.23.5 \
--apiserver-advertise-address 192.168.4.27 \
--control-plane-endpoint master \
--image-repository registry.aliyuncs.com/google_containers \
--pod-network-cidr 10.244.0.0/16 \
--cri-socket /run/containerd/containerd.sock
  • –kubernetes-version:指定的版本
  • –apiserver-advertise-address:K8S主節點的地址
  • –pod-network-cidr:pod的網路IP範圍

如果您的網路執行在192.168. .,需要將 pod-network-cidr 設定為10.0.0.0/16;
如果您的網路是10.0.*.*使用192.168.0.0/16,此時使用calico網路(如果設定錯了部署calico 網路外掛後coredns也執行不起來,會報錯coredns Failed to list *v1.Endpoints,該錯誤解決辦法參考https://blog.csdn.net/u011663005/article/details/87937800):

  • –image-repository:指定下載源

複製config檔案

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

2.2.2 node節點

kubeadm join master:6443 --token f6e3hv.uk6ctfgehstt92jw \
	--discovery-token-ca-cert-hash sha256:9962caed607e31de7b93732347c1ac681f216c290e6b35f91f3f5d67cd12cbcf

2.3 安裝Calico網路外掛(master節點執行)

mkdir -p /root/i && cd /root/i
# 下載
curl 
檢視一下版本`v3.22.2`,如果不是替換不生效
# 修改映象
sed -i 's#docker.io/calico/cni:v3.22.2#registry.cn-shanghai.aliyuncs.com/wanfei/cni:v3.22.2#' /root/i/calico.yaml
sed -i 's#docker.io/calico/pod2daemon-flexvol:v3.22.2#registry.cn-shanghai.aliyuncs.com/wanfei/pod2daemon-flexvol:v3.22.2#' /root/i/calico.yaml
sed -i 's#docker.io/calico/node:v3.22.2#registry.cn-shanghai.aliyuncs.com/wanfei/node:v3.22.2#' /root/i/calico.yaml
sed -i 's#docker.io/calico/kube-controllers:v3.22.2#registry.cn-shanghai.aliyuncs.com/wanfei/kube-controllers:v3.22.2#' /root/i/calico.yaml
# 執行
kubectl apply -f /root/i/calico.yaml

等幾分鐘

[root@master i]# kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-57845f44bb-tpvbr   1/1     Running   0          79s
calico-node-fpfxj                          1/1     Running   0          79s
calico-node-qcvqx                          1/1     Running   0          79s
calico-node-r4gsf                          1/1     Running   0          79s
coredns-6d8c4cb4d-7bclr                    1/1     Running   0          29m
coredns-6d8c4cb4d-djwxf                    1/1     Running   0          29m
etcd-master                                1/1     Running   0          29m
kube-apiserver-master                      1/1     Running   0          29m
kube-controller-manager-master             1/1     Running   0          29m
kube-proxy-pjkmd                           1/1     Running   0          7m35s
kube-proxy-snb84                           1/1     Running   0          7m46s
kube-proxy-tp7wm                           1/1     Running   0          29m
kube-scheduler-master                      1/1     Running   0          29m
[root@master i]# kubectl get nodes
NAME     STATUS   ROLES                  AGE     VERSION
master   Ready    control-plane,master   29m     v1.23.5
node1    Ready    <none>                 8m4s    v1.23.5
node2    Ready    <none>                 7m53s   v1.23.5

三. 安裝其他工具

3.1 持久化儲存 nfs

3.1.1 搭建NFS Server(隨便安裝那個節點,現在安裝master節點)

yum -y install nfs-utils rpcbind
#分配許可權
mkdir /nfsdata  && chmod 666 /nfsdata && chown nfsnobody /nfsdata
# 配置掛載
cat <<EOF> /etc/exports
/nfsdata *(rw,no_root_squash,no_all_squash,sync)
EOF
# 啟動
systemctl start rpcbind.service
systemctl enable rpcbind.service
systemctl start nfs.service
systemctl enable nfs.service

3.1.2 安裝NFS客戶端(所有node節點)

如果不安裝,使用StorageClass的nfs-client 的自動配置程式,我們也叫它 Provisioner所在的node節點就會一直ContainerCreating

[root@master nfs-client]# kubectl get pods -o wide
NAME                                      READY   STATUS              RESTARTS   AGE     IP       NODE    NOMINATED NODE   READINESS GATES
nfs-client-provisioner-798cfd7476-zrndd   0/1     ContainerCreating   0          3m53s   <none>   node1   <none>           <none>

安裝

yum -y install nfs-utils rpcbind
systemctl start rpcbind.service
systemctl enable rpcbind.service
systemctl start nfs.service
systemctl enable nfs.service
[root@node1 ~]# showmount -e 192.168.4.27
Export list for 192.168.4.27:
/nfsdata *

3.1.3 安裝nfs-client-provisioner

設定StorageClass,自動生成PV

nfs-rbac.yaml

cat <<EOF> nfs-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default        #根據實際環境設定namespace,下面類同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
    # replace with namespace where provisioner is deployed
  namespace: default
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
EOF

nfs-storage.yaml

cat <<EOF> nfs-storage.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-storage
provisioner: nfs-storage #這裡的名稱要和provisioner配置檔案中的環境變數PROVISIONER_NAME保持一致
parameters:
  archiveOnDelete: "true"
reclaimPolicy: Retain
EOF

nfs-provisioner.yaml

cat <<EOF> nfs-provisioner.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default  #與RBAC檔案中的namespace保持一致
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nfs-client-provisioner
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          #image: quay.io/external_storage/nfs-client-provisioner:latest
          #這裡特別注意,在k8s-1.20以後版本中使用上面提供的包,並不好用,這裡我折騰了好久,才解決,後來在官方的github上,別人提的問題中建議使用下面這個包才解決的,我這裡是下載後,傳到我自已的倉庫裡
          #easzlab/nfs-subdir-external-provisioner:v4.0.2
          image: registry.cn-shanghai.aliyuncs.com/wanfei/nfs-subdir-external-provisioner:v4.0.2
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: nfs-storage  #provisioner名稱,請確保該名稱與 nfs-StorageClass.yaml檔案中的provisioner名稱保持一致
            - name: NFS_SERVER
              value: 192.168.4.27   #NFS Server IP地址
            - name: NFS_PATH
              value: "/nfsdata"    #NFS掛載卷
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.4.27  #NFS Server IP地址
            path: "/nfsdata"     #NFS 掛載卷
EOF

安裝

kubectl apply -f .
[root@master nfs-client]# kubectl get pods | grep nfs-client
nfs-client-provisioner-777fbf8b55-2ptbm   1/1     Running   0          34s

設定預設的StorageClass(有default)

kubectl patch storageclass nfs-storage -p  '{ "metadata" : { "annotations" :{"storageclass.kubernetes.io/is-default-class": "true"}}}'
[root@master ~]# kubectl get sc | grep nfs-storage
nfs-storage (default)   nfs-storage                                     Retain          Immediate           false                  71s
# 取消default,值為"false"
kubectl patch storageclass nfs-storage -p  '{ "metadata" : { "annotations" :{"storageclass.kubernetes.io/is-default-class": "false"}}}'

參考 https://blog.csdn.net/m0_48898914/article/details/121752973

3.2 安裝helm

3.2.1 下載

下載地址

[root@master helm]# wget 

伺服器下載速度太慢,可以科學上網下載好了上傳到伺服器,安裝lrzsz https://blog.csdn.net/qq_22356995/article/details/104071562

解壓

[root@master helm]# tar -xvf helm-v3.8.1-linux-amd64.tar.gz
linux-amd64/
linux-amd64/helm
linux-amd64/README.md
linux-amd64/LICENSE

3.2.2 安裝

將helm移到/usr/local/bin目錄

[root@master helm]# mv linux-amd64/helm /usr/local/bin
[root@master helm]# helm version
version.BuildInfo{Version:"v3.8.1", GitCommit:"0ad800ef43d3b826f31a5ad8dfbb4fe05d143688", GitTreeState:"clean", GoVersion:"go1.13.12"}

3.2.3 新增幾個repo

helm repo add apphub 
helm repo add stable 
helm repo add bitnami 
helm repo update
# 例如搜尋redis chart
[root@master helm]# helm search repo redis
NAME                            	CHART VERSION	APP VERSION  	DESCRIPTION                                       
apphub/prometheus-redis-exporter	3.2.2        	1.3.4        	Prometheus exporter for Redis metrics             
apphub/redis                    	10.5.3       	5.0.7        	Open source, advanced key-value store. It is of...
apphub/redis-cache              	0.5.0        	4.0.12-alpine	A pure in-memory redis cache, using statefulset...
apphub/redis-ha                 	4.3.3        	5.0.6        	Highly available Kubernetes implementation of R...
apphub/redis-operator           	1.0.0        	             	Redis Operator provides high availability redis...
apphub/redispapa                	0.0.1        	0.0.1        	利用redis的info資訊對redis的使用情況進行監控的一...
bitnami/redis                   	16.6.0       	6.2.6        	Redis(TM) is an open source, advanced key-value...
bitnami/redis-cluster           	7.4.1        	6.2.6        	Redis(TM) is an open source, scalable, distribu...
stable/prometheus-redis-exporter	3.5.1        	1.3.4        	DEPRECATED Prometheus exporter for Redis metrics  
stable/redis                    	10.5.7       	5.0.7        	DEPRECATED Open source, advanced key-value stor...
stable/redis-ha                 	4.4.6        	5.0.6        	DEPRECATED - Highly available Kubernetes implem...
stable/sensu                    	0.2.5        	0.28         	DEPRECATED Sensu monitoring framework backed by...
apphub/codis                    	3.2          	3.2          	A Helm chart for Codis

本文轉自:https://blog.csdn.net/qq_38983728/article/details/123755691


來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/70003733/viewspace-2888774/,如需轉載,請註明出處,否則將追究法律責任。

相關文章