master部署,也是參考大神的文章,然後自己親自實踐操作過多次
1.環境資訊
系統版本:CentOS 7.3(最小化安裝)
核心:3.10.0-514.el7.x86_64
Kubernetes: v1.13.3
Docker-ce: 18.06
Keepalived保證apiserever伺服器的IP高可用
Haproxy實現apiserver的負載均衡複製程式碼
vip 192.168.1.65
節點1 192.168.1.60
節點2 192.168.1.61
節點3 192.168.1.62
2.環境準備
2.1 關閉selinux和防火牆
sed -ri 's#(SELINUX=).*#\1disabled#' /etc/selinux/config
setenforce 0
systemctl disable firewalld
systemctl stop firewalld複製程式碼
2.2 關閉swap
swapoff -a複製程式碼
2.3 為每臺伺服器新增host解析記錄
cat >>/etc/hosts<<EOF
192.168.1.60 host60
192.168.1.61 host61
192.168.1.62 host62
EOF複製程式碼
2.4 配置核心引數
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
sysctl --system複製程式碼
2.5 載入ipvs模組
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4複製程式碼
2.6 新增yum源
cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
wget http://mirrors.aliyun.com/repo/Centos-7.repo -O /etc/yum.repos.d/CentOS-Base.repo
wget http://mirrors.aliyun.com/repo/epel-7.repo -O /etc/yum.repos.d/epel.repo
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo複製程式碼
3. 部署keepalived和haproxy
3.1安裝部署keepalived 和haproxy
yum install -y keepalived haproxy複製程式碼
3.2配置keepalived
3臺伺服器的權重分別是 priority 100 90 80
cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
*****@163.com
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_1
}
vrrp_instance VI_1 {
state MASTER
interface eth0
lvs_sync_daemon_inteface eth0
virtual_router_id 88
advert_int 1
priority 100
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.1.65/24
}
}複製程式碼
3.3配置harpoxy
cat /etc/haproxy/haproxy.cfg
global
chroot /var/lib/haproxy
daemon
group haproxy
user haproxy
log 127.0.0.1:514 local0 warning
pidfile /var/lib/haproxy.pid
maxconn 20000
spread-checks 3
nbproc 8
defaults
log global
mode tcp
retries 3
option redispatch
listen https-apiserver
bind 192.168.1.65:8443
mode tcp
balance roundrobin
timeout server 15s
timeout connect 15s
server apiserver01 192.168.1.60:6443 check port 6443 inter 5000 fall 5
server apiserver02 192.168.1.61:6443 check port 6443 inter 5000 fall 5
server apiserver03 192.168.1.62:6443 check port 6443 inter 5000 fall 5複製程式碼
3.4 啟動服務
systemctl enable keepalived && systemctl start keepalived
systemctl enable haproxy && systemctl start haproxy 複製程式碼
4. 部署kubernetes
4.1安裝對應的軟體
yum install -y kubelet-1.13.3 kubeadm-1.13.3 kubectl-1.13.3 ipvsadm ipset docker-ce-18.06.1.ce
#啟動docker
systemctl enable docker && systemctl start docker
#設定kubelet開機自啟動
systemctl enable kubelet 複製程式碼
4.2 配置kubeadmin初始化檔案
[root@host60 ~]# cat kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.1.60
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: host60
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
apiServer:
timeoutForControlPlane: 4m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.1.65:8443"
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kubernetesVersion: v1.13.3
networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: "10.245.0.0/16"
scheduler: {}
controllerManager: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
複製程式碼
4.3預先下載映象
[root@host60 ~]# kubeadm config images pull --config kubeadm-init.yaml
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.13.3
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.13.3
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.13.3
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.13.3
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.2.24
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.2.6
複製程式碼
4.4初始化叢集
[root@host60 ~]# kubeadm init --config kubeadm-init.yaml
[init] Using Kubernetes version: v1.13.3
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [host60 localhost] and IPs [192.168.1.60 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [host60 localhost] and IPs [192.168.1.60 127.0.0.1 ::1]
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [host60 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.245.0.1 192.168.1.60 192.168.1.65]
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 41.510432 seconds
[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "host60" as an annotation
[mark-control-plane] Marking the node host60 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node host60 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy
Your Kubernetes master has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of machines by running the following on each node
as root:
kubeadm join 192.168.1.65:8443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:e02b46c1f697709552018f706f96a03922b159ecc2c3d82140365e4a8d0a83d4
複製程式碼
kubeadm init主要執行了以下操作:
[init]:指定版本進行初始化操作
[preflight] :初始化前的檢查和下載所需要的Docker映象檔案
[kubelet-start] :生成kubelet的配置檔案”/var/lib/kubelet/config.yaml”,沒有這個檔案kubelet無法啟動,所以初始化之前的kubelet實際上啟動失敗。
[certificates]:生成Kubernetes使用的證書,存放在/etc/kubernetes/pki目錄中。
[kubeconfig] :生成 KubeConfig 檔案,存放在/etc/kubernetes目錄中,元件之間通訊需要使用對應檔案。
[control-plane]:使用/etc/kubernetes/manifest目錄下的YAML檔案,安裝 Master 元件。
[etcd]:使用/etc/kubernetes/manifest/etcd.yaml安裝Etcd服務。
[wait-control-plane]:等待control-plan部署的Master元件啟動。
[apiclient]:檢查Master元件服務狀態。
[uploadconfig]:更新配置
[kubelet]:使用configMap配置kubelet。
[patchnode]:更新CNI資訊到Node上,通過註釋的方式記錄。
[mark-control-plane]:為當前節點打標籤,打了角色Master,和不可排程標籤,這樣預設就不會使用Master節點來執行Pod。
[bootstrap-token]:生成token記錄下來,後邊使用kubeadm join往叢集中新增節點時會用到
- [addons]:安裝附加元件CoreDNS和kube-proxy
4.5為kubectl準備Kubeconfig檔案
kubectl預設會在執行的使用者家目錄下面的.kube目錄下尋找config檔案。這裡是將在初始化時[kubeconfig]步驟生成的admin.conf拷貝到.kube/config。
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
複製程式碼
4.6.檢視叢集狀態
[root@host60 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health": "true"}
複製程式碼
[root@host60 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
host60 NotReady master 16h v1.13.3
複製程式碼
4.7複製證書到其他節點
USER=root
CONTROL_PLANE_IPS="host61 host62"
for host in ${CONTROL_PLANE_IPS}; do
ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"
scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
done
複製程式碼
4.8其他節點加入叢集
kubeadm join 192.168.1.65:8443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:e02b46c1f697709552018f706f96a03922b159ecc2c3d82140365e4a8d0a83d4 --experimental-control-plane
複製程式碼
4.9再次檢視叢集狀態
因為網路沒有通,所以都是未準備好的狀態
[root@host60 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
host60 NotReady master 16h v1.13.3
host61 NotReady master 81s v1.13.3
host62 NotReady master 43s v1.13.3
複製程式碼
4.10配置叢集網路
未配置網路的時候dns是沒有啟動成功的
[root@host60 ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-89cc84847-lg9gr 0/1 Pending 0 16h
coredns-89cc84847-zvsn8 0/1 Pending 0 16h
etcd-host60 1/1 Running 0 16h
etcd-host61 1/1 Running 0 10m
etcd-host62 1/1 Running 0 9m20s
kube-apiserver-host60 1/1 Running 0 16h
kube-apiserver-host61 1/1 Running 0 9m55s
kube-apiserver-host62 1/1 Running 0 9m12s
kube-controller-manager-host60 1/1 Running 1 16h
kube-controller-manager-host61 1/1 Running 0 9m55s
kube-controller-manager-host62 1/1 Running 0 9m9s
kube-proxy-64pwl 1/1 Running 0 16h
kube-proxy-78bm9 1/1 Running 0 10m
kube-proxy-xwghb 1/1 Running 0 9m23s
kube-scheduler-host60 1/1 Running 1 16h
kube-scheduler-host61 1/1 Running 0 10m
kube-scheduler-host62 1/1 Running 0 9m23s
複製程式碼
export kubever=$(kubectl version | base64 | tr -d '\n')
kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$kubever"複製程式碼
網路方案有很多種,但是大部分的方案都需要在初始化的時候加引數,否則將不能用,而weave則不需要,所以這裡選擇這個
kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"複製程式碼
等待一段時間以後,網路外掛完成
再次檢視pod狀態發現dns已經排程成功
我的這個有一個失敗,和我的網路配置有關,還沒排查出來原因,但是 有一個節點的是正常的
[root@host60 ~]# kubectl get pod -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-89cc84847-9hpqm 1/1 Running 1 19m 10.32.0.4 host61 <none> <none>
coredns-89cc84847-jfgmx 0/1 ContainerCreating 0 9m49s <none> host60 <none> <none>
etcd-host60 1/1 Running 2 17h 192.168.1.60 host60 <none> <none>
etcd-host61 1/1 Running 2 73m 192.168.1.61 host61 <none> <none>
etcd-host62 1/1 Running 2 73m 192.168.1.62 host62 <none> <none>
kube-apiserver-host60 1/1 Running 2 17h 192.168.1.60 host60 <none> <none>
kube-apiserver-host61 1/1 Running 1 73m 192.168.1.61 host61 <none> <none>
kube-apiserver-host62 1/1 Running 2 73m 192.168.1.62 host62 <none> <none>
kube-controller-manager-host60 1/1 Running 3 17h 192.168.1.60 host60 <none> <none>
kube-controller-manager-host61 1/1 Running 3 73m 192.168.1.61 host61 <none> <none>
kube-controller-manager-host62 1/1 Running 3 73m 192.168.1.62 host62 <none> <none>
kube-proxy-64pwl 1/1 Running 2 17h 192.168.1.60 host60 <none> <none>
kube-proxy-78bm9 1/1 Running 1 73m 192.168.1.61 host61 <none> <none>
kube-proxy-xwghb 1/1 Running 2 73m 192.168.1.62 host62 <none> <none>
kube-scheduler-host60 1/1 Running 3 17h 192.168.1.60 host60 <none> <none>
kube-scheduler-host61 1/1 Running 2 73m 192.168.1.61 host61 <none> <none>
kube-scheduler-host62 1/1 Running 2 73m 192.168.1.62 host62 <none> <none>
weave-net-57xhp 2/2 Running 4 54m 192.168.1.60 host60 <none> <none>
weave-net-d9l29 2/2 Running 2 54m 192.168.1.61 host61 <none> <none>
weave-net-h8lbk 2/2 Running 4 54m 192.168.1.62 host62 <none> <none>複製程式碼
叢集狀態也正常了
[root@host60 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
host60 Ready master 17h v1.13.3
host61 Ready master 76m v1.13.3
host62 Ready master 75m v1.13.3
複製程式碼
5.新增node節點
5.1初始化系統
請參考上面的步驟
5.2安裝必要的軟體
請參考上面的步驟
5.3加入叢集
kubeadm join 192.168.1.65:8443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:e02b46c1f697709552018f706f96a03922b159ecc2c3d82140365e4a8d0a83d4複製程式碼
5.4檢視叢集狀態
[root@host60 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
host60 Ready master 17h v1.13.3
host61 Ready master 95m v1.13.3
host62 Ready master 95m v1.13.3
host63 Ready <none> 2m51s v1.13.3複製程式碼
ps:刪除了剛才有問題的dns,現在dns被排程到剛加入的節點裡面,狀態正常
[root@host60 ~]# kubectl get pod -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-89cc84847-9hpqm 1/1 Running 1 45m 10.32.0.4 host61 <none> <none>
coredns-89cc84847-sglw7 1/1 Running 0 103s 10.37.0.1 host63 <none> <none>
etcd-host60 1/1 Running 2 17h 192.168.1.60 host60 <none> <none>
etcd-host61 1/1 Running 2 100m 192.168.1.61 host61 <none> <none>
etcd-host62 1/1 Running 2 99m 192.168.1.62 host62 <none> <none>
kube-apiserver-host60 1/1 Running 2 17h 192.168.1.60 host60 <none> <none>
kube-apiserver-host61 1/1 Running 1 100m 192.168.1.61 host61 <none> <none>
kube-apiserver-host62 1/1 Running 2 99m 192.168.1.62 host62 <none> <none>
kube-controller-manager-host60 1/1 Running 3 17h 192.168.1.60 host60 <none> <none>
kube-controller-manager-host61 1/1 Running 3 100m 192.168.1.61 host61 <none> <none>
kube-controller-manager-host62 1/1 Running 3 99m 192.168.1.62 host62 <none> <none>
kube-proxy-64pwl 1/1 Running 2 17h 192.168.1.60 host60 <none> <none>
kube-proxy-78bm9 1/1 Running 1 100m 192.168.1.61 host61 <none> <none>
kube-proxy-v28fs 1/1 Running 0 6m59s 192.168.1.63 host63 <none> <none>
kube-proxy-xwghb 1/1 Running 2 99m 192.168.1.62 host62 <none> <none>
kube-scheduler-host60 1/1 Running 3 17h 192.168.1.60 host60 <none> <none>
kube-scheduler-host61 1/1 Running 2 100m 192.168.1.61 host61 <none> <none>
kube-scheduler-host62 1/1 Running 2 99m 192.168.1.62 host62 <none> <none>
weave-net-57xhp 2/2 Running 4 80m 192.168.1.60 host60 <none> <none>
weave-net-d9l29 2/2 Running 2 80m 192.168.1.61 host61 <none> <none>
weave-net-h8lbk 2/2 Running 4 80m 192.168.1.62 host62 <none> <none>
weave-net-mhbpr 2/2 Running 1 6m59s 192.168.1.63 host63 <none> <none>
複製程式碼
6.整個叢集檢視
[root@host60 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health": "true"}
[root@host60 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
host60 Ready master 18h v1.13.3
host61 Ready master 114m v1.13.3
host62 Ready master 113m v1.13.3
host63 Ready <none> 21m v1.13.3
[root@host60 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-deployment-67d4b848b4-qpmbz 1/1 Running 0 8m9s
nginx-deployment-67d4b848b4-zdn4f 1/1 Running 0 8m9s
nginx-deployment-67d4b848b4-zxd7l 1/1 Running 0 8m9s
[root@host60 ~]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.245.0.1 <none> 443/TCP 18h
nginx-server ClusterIP 10.245.117.70 <none> 80/TCP 68s
[root@host60 ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.245.0.1:443 rr
-> 192.168.1.60:6443 Masq 1 1 0
-> 192.168.1.61:6443 Masq 1 0 0
-> 192.168.1.62:6443 Masq 1 1 0
TCP 10.245.0.10:53 rr
-> 10.32.0.4:53 Masq 1 0 0
-> 10.37.0.1:53 Masq 1 0 0
TCP 10.245.117.70:80 rr
-> 10.37.0.2:80 Masq 1 0 0
-> 10.37.0.3:80 Masq 1 0 1
-> 10.37.0.4:80 Masq 1 0 0
UDP 10.245.0.10:53 rr
-> 10.32.0.4:53 Masq 1 0 0
-> 10.37.0.1:53 Masq 1 0 0
複製程式碼