kubernetes 1.25.9 install

liypsky發表於2023-04-22

環境:

AlmaLinux  8.7

kuberentor 1.25.9

containerd 1.6.16

cni-plugins 1.2.0


步驟:

1. 環境主機名及安全性配置

2. 下載kubernetes1.25.9全部包

3. 安裝containerd環境

4. 使用kubeadm初始化




# hostnamectl set-hostname master

# vi /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4

::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.100.121 master master.abc.com

192.168.100.122 node01 node01.abc.com


# Set SELinux in permissive mode (effectively disabling it)

sudo setenforce 0

sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config


--down images

curl -Ls "(curl -Ls )/release"  | awk '/Package: registry.k8s.io\// {print $3}'


--binnnary Download, 

curl -LO

curl -LO

curl -LO

curl -LO

curl -LO

curl -LO

curl -LO

curl -LO

curl -LO -convert

curl -LO

curl -LO

curl -LO


--install

sudo install -o root -g root -m 0755 apiextensions-apiserver /usr/local/bin/apiextensions-apiserver 

sudo install -o root -g root -m 0755 kube-aggregator         /usr/local/bin/kube-aggregator         

sudo install -o root -g root -m 0755 kube-apiserver          /usr/local/bin/kube-apiserver          

sudo install -o root -g root -m 0755 kube-controller-manager /usr/local/bin/kube-controller-manager 

sudo install -o root -g root -m 0755 kube-log-runner         /usr/local/bin/kube-log-runner         

sudo install -o root -g root -m 0755 kube-scheduler          /usr/local/bin/kube-scheduler          

sudo install -o root -g root -m 0755 kubeadm                 /usr/local/bin/kubeadm                 

sudo install -o root -g root -m 0755 kubectl                 /usr/local/bin/kubectl                 

sudo install -o root -g root -m 0755 kubectl-convert         /usr/local/bin/kubectl-convert         

sudo install -o root -g root -m 0755 kubelet                 /usr/local/bin/kubelet                 

sudo install -o root -g root -m 0755 mounter                 /usr/local/bin/mounter                 

sudo install -o root -g root -m 0755 kube-proxy              /usr/local/bin/kube-proxy              



--If you do not have root access on the target system, you can still install kubectl to the ~/.local/bin directory:

--chmod +x kubectl

--mkdir -p ~/.local/bin

--mv ./kubectl ~/.local/bin/kubectl

--# and then append (or prepend) ~/.local/bin to $PATH


--Test to ensure the version you installed is up-to-date:

#kubectl version --client


--Or use this for detailed view of version:

# kubectl version --client --output=yaml  

clientVersion:

  buildDate: "2023-04-12T12:16:51Z"

  compiler: gc

  gitCommit: a1a87a0a2bcd605820920c6b0e618a8ab7d117d4

  gitTreeState: clean

  gitVersion: v1.25.9

  goVersion: go1.19.8

  major: "1"

  minor: "25"

  platform: linux/amd64

kustomizeVersion: v4.5.7 


# kubeadm config images list --kubernetes-version=1.25.9

registry.k8s.io/kube-apiserver:v1.25.9

registry.k8s.io/kube-controller-manager:v1.25.9

registry.k8s.io/kube-scheduler:v1.25.9

registry.k8s.io/kube-proxy:v1.25.9

registry.k8s.io/pause:3.8

registry.k8s.io/etcd:3.5.6-0

registry.k8s.io/coredns/coredns:v1.9.3


--yum install

#cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo

[kubernetes]

name=Kubernetes

baseurl=

enabled=1

gpgcheck=1

repo_gpgcheck=1

gpgkey=

EOF

#sudo yum install -y kubectl



--Install bash-completion

#yum install bash-completion



# source /usr/share/bash-completion/bash_completion

# kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl > /dev/null

# echo 'source <(kubectl completion bash)' >>~/.bashrc

# echo 'alias k=kubectl' >>~/.bashrc

# echo 'complete -o default -F __start_kubectl k' >>~/.bashrc



-- 生成 kubeadm 配置檔案

$ kubeadm config print init-defaults > kubeadm.conf


-- 修改 kubeadm 配置檔案

criSocket: unix:///run/containerd/containerd.sock

advertiseAddress: 192.168.100.121

imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers

kubernetesVersion: 1.25.9

podSubnet: 10.244.0.0/16



-- kubeadm 安裝

$ kubeadm init --config kubeadm.conf 

$ kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.100.121 --cri-socket unix:///run/containerd/containerd.sock



[root@master ~]# kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.100.121 --cri-socket unix:///run/containerd/containerd.sock

I0420 15:21:11.829823    6061 version.go:256] remote version is much newer: v1.27.1; falling back to: stable-1.25

[init] Using Kubernetes version: v1.25.9

[preflight] Running pre-flight checks

        [WARNING FileExisting-tc]: tc not found in system path

error execution phase preflight: [preflight] Some fatal errors occurred:

        [ERROR CRI]: container runtime is not running: output: time="2023-04-20T15:21:13+08:00" level=fatal msg="validate service connection: CRI v1 runtime API is not implemented for endpoint \"unix:///run/containerd/containerd.sock\": rpc error: code = Unimplemented desc = unknown service runtime.v1.RuntimeService"

, error: exit status 1

[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`

To see the stack trace of this error execute with --v=5 or higher


--那檢視/etc/containerd/config.toml配置檔案,是否是cri介面被disable了,如:disabled_plugins = ["cri"],關閉後重啟.

--[root@master1 ~]# rm -rf /etc/containerd/config.toml

--[root@master1 ~]# systemctl restart containerd

--sudo kubeadm reset -f

--journalctl | grep kubelet | grep "kube-api"


[root@master ~]# kubeadm init --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers --apiserver-advertise-address=192.168.100.121


[root@master ~]# kubeadm init --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers --apiserver-advertise-address=192.168.100.121

I0421 09:22:12.130136   42099 version.go:256] remote version is much newer: v1.27.1; falling back to: stable-1.25

[init] Using Kubernetes version: v1.25.9

[preflight] Running pre-flight checks

        [WARNING FileExisting-tc]: tc not found in system path

[preflight] Pulling images required for setting up a Kubernetes cluster

[preflight] This might take a minute or two, depending on the speed of your internet connection

[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'

[certs] Using certificateDir folder "/etc/kubernetes/pki"

[certs] Generating "ca" certificate and key

[certs] Generating "apiserver" certificate and key

[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master] and IPs [10.96.0.1 192.168.100.121]

[certs] Generating "apiserver-kubelet-client" certificate and key

[certs] Generating "front-proxy-ca" certificate and key

[certs] Generating "front-proxy-client" certificate and key

[certs] Generating "etcd/ca" certificate and key

[certs] Generating "etcd/server" certificate and key

[certs] etcd/server serving cert is signed for DNS names [localhost master] and IPs [192.168.100.121 127.0.0.1 ::1]

[certs] Generating "etcd/peer" certificate and key

[certs] etcd/peer serving cert is signed for DNS names [localhost master] and IPs [192.168.100.121 127.0.0.1 ::1]

[certs] Generating "etcd/healthcheck-client" certificate and key

[certs] Generating "apiserver-etcd-client" certificate and key

[certs] Generating "sa" key and public key

[kubeconfig] Using kubeconfig folder "/etc/kubernetes"

[kubeconfig] Writing "admin.conf" kubeconfig file

[kubeconfig] Writing "kubelet.conf" kubeconfig file

[kubeconfig] Writing "controller-manager.conf" kubeconfig file

[kubeconfig] Writing "scheduler.conf" kubeconfig file

[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"

[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"

[kubelet-start] Starting the kubelet

[control-plane] Using manifest folder "/etc/kubernetes/manifests"

[control-plane] Creating static Pod manifest for "kube-apiserver"

[control-plane] Creating static Pod manifest for "kube-controller-manager"

[control-plane] Creating static Pod manifest for "kube-scheduler"

[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"

[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s

[kubelet-check] Initial timeout of 40s passed.

[apiclient] All control plane components are healthy after 119.905634 seconds

[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace

[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster

[upload-certs] Skipping phase. Please see --upload-certs

[mark-control-plane] Marking the node master as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]

[mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]

[bootstrap-token] Using token: xoxl9h.pqwaatap6g1kvul9

[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles

[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes

[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials

[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token

[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster

[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace

[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key

[addons] Applied essential addon: CoreDNS

[addons] Applied essential addon: kube-proxy


Your Kubernetes control-plane has initialized successfully!


To start using your cluster, you need to run the following as a regular user:


  mkdir -p $HOME/.kube

  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

  sudo chown $(id -u):$(id -g) $HOME/.kube/config


Alternatively, if you are the root user, you can run:


  export KUBECONFIG=/etc/kubernetes/admin.conf


You should now deploy a pod network to the cluster.

Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:

 


Then you can join any number of worker nodes by running the following on each as root:


kubeadm join 192.168.100.121:6443 --token xoxl9h.pqwaatap6g1kvul9 \

        --discovery-token-ca-cert-hash sha256:340c24c7070332e29a3d2e4d272a8dd5783130bc791422a1d4f2f4d8e06cbbb0

        

--add node01 host

[root@node01 ~]# kubeadm join 192.168.100.121:6443 --token xoxl9h.pqwaatap6g1kvul9 \

>         --discovery-token-ca-cert-hash sha256:340c24c7070332e29a3d2e4d272a8dd5783130bc791422a1d4f2f4d8e06cbbb0

[preflight] Running pre-flight checks

        [WARNING FileExisting-tc]: tc not found in system path

[preflight] Reading configuration from the cluster...

[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'

[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"

[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"

[kubelet-start] Starting the kubelet

[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...


This node has joined the cluster:

* Certificate signing request was sent to apiserver and a response was received.

* The Kubelet was informed of the new secure connection details.


Run 'kubectl get nodes' on the control-plane to see this node join the cluster.




--kube-apiserver 配置檔案 

# more /usr/lib/systemd/system/kube-apiserver.service

--kubadmin init 時自己自動配置,不用建立自啟服務  

    

# 全域性重新整理service

--systemctl daemon-reload

# 設定kube-apiserver開機啟動

--systemctl enable kube-apiserver

#重啟kube-apiserver

--systemctl restart kube-apiserver



--start error

# /usr/local/bin/kube-apiserver 

W0420 18:10:34.424091    4805 services.go:37] No CIDR for service cluster IPs specified. Default value which was 10.0.0.0/24 is deprecated and will be removed in future releases. Please specify it using --service-cluster-ip-range on kube-apiserver.

I0420 18:10:34.751803    4805 serving.go:342] Generated self-signed cert (/var/run/kubernetes/apiserver.crt, /var/run/kubernetes/apiserver.key)

I0420 18:10:34.751895    4805 server.go:563] external host was not specified, using 10.10.15.25

W0420 18:10:34.751939    4805 authentication.go:525] AnonymousAuth is not allowed with the AlwaysAllow authorizer. Resetting AnonymousAuth to false. You should use a different authorizer

E0420 18:10:34.752280    4805 run.go:74] "command failed" err="[--etcd-servers must be specified, service-account-issuer is a required flag, --service-account-signing-key-file and --service-account-issuer are required flags]"



--kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to get sandbox image \"registry.k8s.io/pause:3.6\": failed to pull image \"registry.k8s.io/pause:3.6\": failed to pull and unpack image \"registry.k8s.io/pause:3.6\": failed to resolve reference \"registry.k8s.io/pause:3.6\": failed to do request: Head \"https://asia-east1-docker.pkg.dev/v2/k8s-artifacts-prod/images/pause/manifests/3.6\": dial tcp 74.125.23.82:443: i/o timeout" pod="kube-system/kube-apiserver-master

--如果你的systemd-resolved服務狀態是active的,那麼本文的方法不適用於你的情況,如果服務狀態是關閉的,那麼啟動該服務,再次進行Pod的建立即可成功進入拉取映象的Pod事件中。

# 獲取 systemd-resolved 狀態

systemctl status systemd-resolved

# 啟動 systemd-resolved

systemctl start systemd-resolved

# 需自啟,否則kubelet報錯,找不到node


# curl

curl: (7) Failed to connect to 127.0.0.1 port 6443: 拒絕連線


# curl

curl: (60) SSL certificate problem: unable to get local issuer certificate

More details here: https://curl.haxx.se/docs/sslcerts.html


curl failed to verify the legitimacy of the server and therefore could not

establish a secure connection to it. To learn more about this situation and

how to fix it, please visit the web page mentioned above.





--install Container Runtimes

$ systemctl disable docker

$ systemctl enable  containerd


--用的containerd.service 容器時

--crictl logs 命令可用

crictl  logs b3c010e0082be

[root@master ~]# crictl  ps -a

WARN[0000] runtime connect using default endpoints: [unix:///var/run/dockershim.sock unix:///run/containerd/containerd.sock unix:///run/crio/crio.sock unix:///var/run/cri-dockerd.sock]. As the default settings are now deprecated, you should set the endpoint instead. 

WARN[0000] image connect using default endpoints: [unix:///var/run/dockershim.sock unix:///run/containerd/containerd.sock unix:///run/crio/crio.sock unix:///var/run/cri-dockerd.sock]. As the default settings are now deprecated, you should set the endpoint instead. 

E0420 17:22:30.628112    4121 remote_runtime.go:390] "ListContainers with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory\"" filter="&ContainerFilter{Id:,State:nil,PodSandboxId:,LabelSelector:map[string]string{},}"

FATA[0000] listing containers: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory" 



--參考

cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf

overlay

br_netfilter

EOF


sudo modprobe overlay

sudo modprobe br_netfilter


# sysctl params required by setup, params persist across reboots

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf

net.bridge.bridge-nf-call-iptables  = 1

net.bridge.bridge-nf-call-ip6tables = 1

net.ipv4.ip_forward                 = 1

EOF


# Apply sysctl params without reboot

sudo sysctl --system


--Download

containerd-1.6.16-linux-amd64.tar.gz

--cri-containerd-1.6.16-linux-amd64.tar.gz

cri-containerd-cni-1.6.16-linux-amd64.tar.gz


--

--

--


$ tar Cxzvf /usr/local containerd-1.6.16-linux-amd64.tar.gz 

bin/

bin/containerd-shim-runc-v2

bin/containerd-stress

bin/ctr

bin/containerd

bin/containerd-shim

bin/containerd-shim-runc-v1


wget

into /usr/local/lib/systemd/system/containerd.service, and run the following commands:


systemctl daemon-reload

systemctl enable --now containerd



$ tar Cxzvf /usr/local cri-containerd-cni-1.6.16-linux-amd64.tar.gz

mkdir -p /etc/containerd/

containerd config default > /etc/containerd/config.toml

--命令建立一份模板配置檔案


--修改containerd的config.toml配置檔案,修改k8s的映象初始化pause底層網路映象的下載地址

root@master:~# vim /etc/containerd/config.toml

sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.8"



[root@master ~]# systemctl start containerd.service 

[root@master ~]# systemctl status containerd.service    

● containerd.service - containerd container runtime

   Loaded: loaded (/usr/local/lib/systemd/system/containerd.service; enabled; vendor preset: disabled)

   Active: active (running) since Thu 2023-04-20 17:48:14 CST; 7s ago

     Docs:

  Process: 4280 ExecStartPre=/sbin/modprobe overlay (code=exited, status=0/SUCCESS)

 Main PID: 4282 (containerd)

    Tasks: 10

   Memory: 19.7M

   CGroup: /system.slice/containerd.service

           └─4282 /usr/local/bin/containerd


4月 20 17:48:14 master containerd[4282]: time="2023-04-20T17:48:14.758792993+08:00" level=info msg=serving... address=/run/containerd/containerd.sock.ttrpc

4月 20 17:48:14 master containerd[4282]: time="2023-04-20T17:48:14.758978330+08:00" level=info msg=serving... address=/run/containerd/containerd.sock

4月 20 17:48:14 master containerd[4282]: time="2023-04-20T17:48:14.759001119+08:00" level=info msg="Start subscribing containerd event"

4月 20 17:48:14 master containerd[4282]: time="2023-04-20T17:48:14.759201750+08:00" level=info msg="Start recovering state"

4月 20 17:48:14 master systemd[1]: Started containerd container runtime.

4月 20 17:48:14 master containerd[4282]: time="2023-04-20T17:48:14.759084399+08:00" level=info msg="containerd successfully booted in 0.027322s"

4月 20 17:48:14 master containerd[4282]: time="2023-04-20T17:48:14.790494811+08:00" level=info msg="Start event monitor"

4月 20 17:48:14 master containerd[4282]: time="2023-04-20T17:48:14.790662518+08:00" level=info msg="Start snapshots syncer"

4月 20 17:48:14 master containerd[4282]: time="2023-04-20T17:48:14.790678027+08:00" level=info msg="Start cni network conf syncer for default"

4月 20 17:48:14 master containerd[4282]: time="2023-04-20T17:48:14.790683011+08:00" level=info msg="Start streaming server"

[root@master ~]# ctr version

Client:

  Version:  v1.6.16

  Revision: 31aa4358a36870b21a992d3ad2bef29e1d693bec

  Go version: go1.18.10


Server:

  Version:  v1.6.16

  Revision: 31aa4358a36870b21a992d3ad2bef29e1d693bec

  UUID: 89383311-f373-4e82-8f66-5d371bb33970


--2:Installing runc

--Download runc.amd64


$ install -m 755 runc.amd64 /usr/local/sbin/runc


--3: Installing CNI plugins

--Download cni-plugins-linux-amd64-v1.2.0.tgz


$ mkdir -p /opt/cni/bin

$ tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.2.0.tgz 

./

./loopback

./bandwidth

./ptp

./vlan

./host-device

./tuning

./vrf

./sbr

./dhcp

./static

./firewall

./macvlan

./dummy

./bridge

./ipvlan

./portmap

./host-local


[root@master ~]# runc -version

runc version 1.1.6

commit: v1.1.6-0-g0f48801a

spec: 1.0.2-dev

go: go1.20.3

libseccomp: 2.5.4


--Running the cni plugins

$ mkdir -p /etc/cni/net.d

$ cat >/etc/cni/net.d/10-mynet.conf <<EOF

{

"cniVersion": "0.2.0",

"name": "mynet",

"type": "bridge",

"bridge": "cni0",

"isGateway": true,

"ipMasq": true,

"ipam": {

"type": "host-local",

"subnet": "10.22.0.0/16",

"routes": [

{ "dst": "0.0.0.0/0" }

]

}

}

EOF

$ cat >/etc/cni/net.d/99-loopback.conf <<EOF

{

"cniVersion": "0.2.0",

"name": "lo",

"type": "loopback"

}

EOF


$ cd $GOPATH/src/github.com/containernetworking/plugins

$ ./build_linux.sh


[root@master ~]# more build_linux.sh 

#!/usr/bin/env bash

set -e

cd "$(dirname "$0")"


if [ "$(uname)" == "Darwin" ]; then

        export GOOS="${GOOS:-linux}"

fi


export GOFLAGS="${GOFLAGS} -mod=vendor"


mkdir -p "${PWD}/bin"


echo "Building plugins ${GOOS}"

PLUGINS="plugins/meta/* plugins/main/* plugins/ipam/*"

for d in $PLUGINS; do

        if [ -d "$d" ]; then

                plugin="$(basename "$d")"

                if [ "${plugin}" != "windows" ]; then

                        echo "  $plugin"

                        ${GO:-go} build -o "${PWD}/bin/$plugin" "$@" ./"$d"

                fi

        fi

done




--使用crictl命令

--需要先配置/etc/crictl.yaml如下:

runtime-endpoint: unix:///run/containerd/containerd.sock

image-endpoint: unix:///run/containerd/containerd.sock

timeout: 10

debug: false


也可以透過命令進行設定:

crictl config runtime-endpoint unix:///run/containerd/containerd.sock

crictl config image-endpoint unix:///run/containerd/containerd.sock


[root@master ~]# crictl image

IMAGE                                                                         TAG                 IMAGE ID            SIZE

registry.cn-hangzhou.aliyuncs.com/google_containers/coredns                   v1.9.3              5185b96f0becf       14.8MB

registry.cn-hangzhou.aliyuncs.com/google_containers/etcd                      3.5.4-0             a8a176a5d5d69       102MB

registry.cn-hangzhou.aliyuncs.com/google_containers/etcd                      3.5.6-0             fce326961ae2d       103MB

registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver            v1.25.9             dc245db8c2fae       34.4MB

registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager   v1.25.9             3ea2571fcc83d       31.5MB

registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy                v1.25.9             28d55f91d3d8f       20.3MB

registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler            v1.25.9             165df46c1bb9b       16MB

registry.cn-hangzhou.aliyuncs.com/google_containers/pause                     3.8                 4873874c08efc       311kB



-- 安裝go1.19.8

-- Golang官網下載地址:

-- down https://go.dev/dl/go1.19.8.linux-amd64.tar.gz

cd /usr/local

tar -zxvf go1.11.5.linux-amd64.tar.gz


vim /etc/profile

# 在最後一行新增

export GOROOT=/usr/local/go

export PATH=$PATH:$GOROOT/bin

# 儲存退出後source一下(vim 的使用方法可以自己搜尋一下)

source /etc/profile


#執行go -version



--安裝 Kuboard v3 - kubernetes 線上安裝

--# kubectl apply -f

--下面使用華為雲

# kubectl apply -f


--等待 Kuboard v3 就緒

# kubectl get pods -n kuboard


--刪除

--kubectl delete -f

# kubectl delete -f


--以上安裝超級慢,用離線安裝的方式

-- docker pull eipwork/kuboard-agent:v3

-- docker pull eipwork/etcd-host:3.4.16-1

-- docker pull eipwork/kuboard:v3

-- docker pull questdb/questdb:6.0.4

-- docker tag eipwork/kuboard-agent:v3 registry.mycompany.com/kuboard/kuboard-agent:v3

-- docker tag eipwork/etcd-host:3.4.16-1 registry.mycompany.com/kuboard/etcd-host:3.4.16-1

-- docker tag eipwork/kuboard:v3 registry.mycompany.com/kuboard/kuboard:v3

-- docker tag questdb/questdb:6.0.4 registry.mycompany.com/kuboard/questdb:6.0.4

-- docker push registry.mycompany.com/kuboard/kuboard-agent:v3

-- docker push registry.mycompany.com/kuboard/etcd-host:3.4.16-1

-- docker push registry.mycompany.com/kuboard/kuboard:v3

-- docker push registry.mycompany.com/kuboard/questdb:6.0.4


crictl pull

ctr image tag

ctr image push


# ctr namespace ls

NAME    LABELS 

default        

k8s.io         

moby     


crictl pull eipwork/etcd-host:3.4.16-1

crictl pull eipwork/kuboard-agent:v3

crictl pull eipwork/kuboard:v3

crictl pull questdb/questdb:6.0.4


--ctr -n k8s.io i tag docker.io/eipwork/etcd-host:3.4.16-1 registry.abc.com/kuboard/etcd-host:3.4.16-1

--ctr image push registry.abc.com/kuboard/etcd-host:3.4.16-1


--修改 kuboard-v3.yaml 內image=下載映象地址

image: 'docker.io/eipwork/etcd-host:3.4.16-1'

image: 'docker.io/eipwork/kuboard:v3'


--執行安裝指令

kubectl apply -f kuboard-v3.yaml


[root@master ~]# kubectl apply -f kuboard-v3.yaml

namespace/kuboard created

configmap/kuboard-v3-config created

serviceaccount/kuboard-boostrap created

clusterrolebinding.rbac.authorization.k8s.io/kuboard-boostrap-crb created

daemonset.apps/kuboard-etcd created

deployment.apps/kuboard-v3 created

service/kuboard-v3 created


來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/678020/viewspace-2947676/,如需轉載,請註明出處,否則將追究法律責任。

相關文章