名稱空間資源限制;對名稱空間使用資源的總限額,不是單個pod
[root@master pod]# cat ../nsrq.yaml apiVersion: v1 kind: ResourceQuota metadata: name: mem-cpu-quota namespace: test spec: hard: limits.cpu: "2" limits.memory: "2Gi" requests.cpu: "4" requests.memory: "4Gi" [root@master pod]# kubectl get po -n test No resources found in test namespace. [root@master pod]# kubectl apply -f ../nsrq.yaml resourcequota/mem-cpu-quota configured [root@master pod]# kubectl describe -n test resourcequotas Name: mem-cpu-quota Namespace: test Resource Used Hard -------- ---- ---- limits.cpu 0 2 limits.memory 0 2Gi requests.cpu 0 4 requests.memory 0 4Gi [root@master pod]# cat po po2.yaml po3.yaml po.yaml [root@master pod]# cat po.yaml apiVersion: v1 kind: Pod metadata: name: pod-test namespace: test labels: app: tomcat spec: containers: - name: tomcat-test image: tomcat imagePullPolicy: IfNotPresent ports: - containerPort: 8080 resources: limits: memory: "2Gi" cpu: "1" requests: memory: "2Gi" cpu: "1" [root@master pod]# kubectl apply -f po.yaml pod/pod-test created [root@master pod]# cat po2.yaml apiVersion: v1 kind: Pod metadata: name: pod-test-1 namespace: test labels: app: tomcat spec: containers: - name: tomcat-test image: tomcat imagePullPolicy: IfNotPresent ports: - containerPort: 8080 resources: limits: memory: "100Mi" cpu: "500m" requests: memory: "100Mi" cpu: "500m" [root@master pod]# kubectl apply -f po2.yaml Error from server (Forbidden): error when creating "po2.yaml": pods "pod-test-1" is forbidden: exceeded quota: mem-cpu-quota, requested: limits.memory=100Mi, used: limits.memory=2Gi, limited: limits.memory=2Gi [root@master pod]# kubectl get po -n test NAME READY STATUS RESTARTS AGE pod-test 1/1 Running 0 2m53s [root@master pod]# kubectl edit resourcequotas -n test mem-cpu-quota # Please edit the object below. Lines beginning with a '#' will be ignored, # and an empty file will abort the edit. If an error occurs while saving this file will be # reopened with the relevant failures. # apiVersion: v1 kind: ResourceQuota metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"v1","kind":"ResourceQuota","metadata":{"annotations":{},"name":"mem-cpu-quota","namespace":"test"},"spec":{"hard":{"limits.cpu":"2","limits.memory":"2Gi","requests.cpu":"4","requests.memor y":"4Gi"}}} creationTimestamp: "2024-10-11T07:12:37Z" name: mem-cpu-quota namespace: test resourceVersion: "113002" uid: 9f669803-9b56-46cd-a10d-0291b5c59576 spec: hard: limits.cpu: "2" limits.memory: 3Gi # 修改為3G requests.cpu: "4" requests.memory: 4Gi status: hard: limits.cpu: "2" limits.memory: 2Gi requests.cpu: "4" requests.memory: 4Gi used: limits.cpu: "1" limits.memory: 2Gi requests.cpu: "1" requests.memory: 2Gi [root@master pod]# kubectl describe -n test resourcequotas Name: mem-cpu-quota Namespace: test Resource Used Hard -------- ---- ---- limits.cpu 1 2 limits.memory 2Gi 3Gi requests.cpu 1 4 requests.memory 2Gi 4Gi [root@master pod]# kubectl apply -f po2.yaml pod/pod-test-1 created [root@master pod]# kubectl get po -n test NAME READY STATUS RESTARTS AGE pod-test 1/1 Running 0 1s pod-test-1 1/1 Running 0 11s
po 指定node 節點
[root@master pod]# cat po3.yaml apiVersion: v1 kind: Pod metadata: name: demo-pod namespace: default labels: app: busybox-tomcat env: pro spec: nodeName: node-1 containers: - name: tomcat ports: - containerPort: 8080 image: tomcat:8.5-jre8-alpine imagePullPolicy: IfNotPresent - name: busybox image: busybox:latest imagePullPolicy: IfNotPresent command: - "/bin/sh" - "-c" - "sleep 36000" [root@master pod]# kubectl get po demo-pod -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES demo-pod 2/2 Running 0 124m 172.16.84.139 node-1 <none> <none>
po 的透過標籤選擇器選定node
[root@master pod]# cat po4.yaml apiVersion: v1 kind: Pod metadata: name: demo-pod-1 namespace: default labels: app: busybox-tomcat env: pro spec: # nodeName: node-1 nodeSelector: app: v1 #選擇擁有此標籤的node containers: - name: tomcat ports: - containerPort: 8080 image: tomcat:8.5-jre8-alpine imagePullPolicy: IfNotPresent - name: busybox image: busybox:latest imagePullPolicy: IfNotPresent command: - "/bin/sh" - "-c" - "sleep 36000" [root@master pod]# kubectl get po NAME READY STATUS RESTARTS AGE demo-pod 2/2 Running 0 126m demo-pod-1 0/2 Pending(待排程狀態) 0 3m55s hamster-65db5bcc68-fm6f2 1/1 Running 0 13h hamster-65db5bcc68-j8bss 1/1 Running 0 13h [root@master pod]# kubectl get node --show-labels NAME STATUS ROLES AGE VERSION LABELS node Ready control-plane 19h v1.28.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node,kubernetes.io/os=linux,node-role.kubernetes.io/c ontrol-plane=,node.kubernetes.io/exclude-from-external-load-balancers=node-1 Ready <none> 19h v1.28.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node-1,kubernetes.io/os=linux node-2 Ready <none> 19h v1.28.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node-2,kubernetes.io/os=linux [root@master pod]# kubectl label nodes node-2 app=v1 node/node-2 labeled [root@master pod]# kubectl get po NAME READY STATUS RESTARTS AGE demo-pod 2/2 Running 0 128m demo-pod-1 0/2 ContainerCreating 0 5m34s hamster-65db5bcc68-fm6f2 1/1 Running 0 13h hamster-65db5bcc68-j8bss 1/1 Running 0 13h [root@master pod]# kubectl get po -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES demo-pod 2/2 Running 0 128m 172.16.84.139 node-1 <none> <none> demo-pod-1 0/2 ContainerCreating 0 5m49s <none> node-2 <none> <none> hamster-65db5bcc68-fm6f2 1/1 Running 0 13h 172.16.247.10 node-2 <none> <none> hamster-65db5bcc68-j8bss 1/1 Running 0 13h 172.16.84.138 node-1 <none> <none>
po 節點 硬親和性
[root@master pod]# cat po.yaml apiVersion: v1 kind: Pod metadata: name: pod-test # namespace: test labels: app: tomcat spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: #硬親和性 nodeSelectorTerms: - matchExpressions: - key: app #標籤 operator: In # 等於 values: - v2 #值 - v3 containers: - name: tomcat-test image: tomcat imagePullPolicy: IfNotPresent ports: - containerPort: 8080 [root@master pod]# kubectl apply -f po.yaml pod/pod-test created [root@master pod]# kubectl get po NAME READY STATUS RESTARTS AGE demo-pod 2/2 Running 0 145m demo-pod-1 2/2 Running 0 22m hamster-65db5bcc68-fm6f2 1/1 Running 0 13h hamster-65db5bcc68-j8bss 1/1 Running 0 13h pod-test 0/1 Pending 0 4s [root@master pod]# kubectl label nodes node-1 app=v2 && kubectl get pod pod-test -owide node/node-1 labeled NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod-test 0/1 ContainerCreating 0 50s <none> node-1 <none> <none>
po 節點軟親和性
[root@master pod]# cat po6.yaml apiVersion: v1 kind: Pod metadata: name: pod-test-cx # namespace: test labels: app: tomcat spec: affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: #軟親和性 - preference: matchExpressions: - key: cx #標籤 operator: In # 等於 values: - dev #值 weight: 90 # 親和力度 containers: - name: tomcat-test image: tomcat imagePullPolicy: IfNotPresent ports: - containerPort: 8080 [root@master pod]# kubectl apply -f po6.yaml pod/pod-test-cx created [root@master pod]# kubectl get pod pod-test-cx -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod-test-cx 1/1 Running 0 26s 172.16.247.13 node-2 <none> <none> [root@master pod]# kubectl get nodes --show-labels | grep cx=dev
打標籤刪除標籤
打標籤 [root@master pod]# kubectl label nodes node-1 app=v2 node/node-1 labeled 刪除標籤 [root@master pod]# kubectl label nodes node-2 app- node/node-2 unlabeled [root@master pod]# kubectl label nodes node-1 app- node/node-1 unlabeled [root@master pod]# kubectl label nodes node-1 cx- node/node-1 unlabeled
po 與po 的親和性
[root@master pod]# kubectl get po --show-labels NAME READY STATUS RESTARTS AGE LABELS pod-test 1/1 Running 0 94s app=tomcat,security=S1 [root@master pod]# cat poaffinity.yaml apiVersion: v1 kind: Pod metadata: name: pod-test-cx # namespace: test labels: app: tomcat spec: affinity: podAffinity: preferredDuringSchedulingIgnoredDuringExecution: requiredDuringSchedulingIgnoredDuringExecution: # 硬親和性 - labelSelector: matchExpressions: - key: security operator: In values: - S1 topologyKey: kubernetes.io/hostname # 根據這個node 標籤判斷是不是一個節點 containers: - name: tomcat-test image: tomcat imagePullPolicy: IfNotPresent ports: - containerPort: 8080 [root@master pod]# kubectl apply -f poaffinity.yaml pod/pod-test-cx created [root@master pod]# kubectl get po --show-labels NAME READY STATUS RESTARTS AGE LABELS pod-test 1/1 Running 0 109s app=tomcat,security=S1 pod-test-cx 1/1 Running 0 3s app=tomcat [root@master pod]# kubectl get po -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod-test 1/1 Running 0 2m 172.16.84.147 node-1 <none> <none> pod-test-cx 1/1 Running 0 14s 172.16.84.149 node-1 <none> <none>
po反親和性
[root@master pod]# cat nopo.yaml apiVersion: v1 kind: Pod metadata: name: pod-test-cx # namespace: test labels: app: tomcat spec: affinity: podAntiAffinity: # 反親和性 preferredDuringSchedulingIgnoredDuringExecution: requiredDuringSchedulingIgnoredDuringExecution: # 硬反親和性 - labelSelector: matchExpressions: - key: security operator: In values: - S1 topologyKey: kubernetes.io/hostname # 根據這個node 標籤判斷 containers: - name: tomcat-test image: tomcat imagePullPolicy: IfNotPresent ports: - containerPort: 8080 [root@master pod]# kubectl get po --show-labels NAME READY STATUS RESTARTS AGE LABELS pod-test 1/1 Running 0 8m53s app=tomcat,security=S1 pod-test-cx 1/1 Running 0 18s app=tomcat [root@master pod]# kubectl get po -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod-test 1/1 Running 0 9m 172.16.84.150 node-1 <none> <none> pod-test-cx 1/1 Running 0 25s 172.16.247.14 node-2 <none> <none>
汙點與容忍度
KIND: Node VERSION: v1 FIELD: taints <[]Taint> DESCRIPTION: If specified, the node's taints. The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. FIELDS: effect <string> -required- Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. Possible enum values: - `"NoExecute"` Evict any already-running pods that do not tolerate the taint. Currently enforced by NodeController. - `"NoSchedule"` Do not allow new pods to schedule onto the node unless they tolerate the taint, but allow all pods submitted to Kubelet without going through the scheduler to start, and allow all already-running pods to continue running. Enforced by the scheduler. - `"PreferNoSchedule"` Like TaintEffectNoSchedule, but the scheduler tries not to schedule new pods onto the node, rather than prohibiting new pods from scheduling onto the node entirely. Enforced by the scheduler. key <string> -required- Required. The taint key to be applied to a node. timeAdded <string> TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. value <string> The taint value corresponding to the taint key.
effect
欄位的允許值包括:
NoExecute
- 這會影響已在節點上執行的 Pod,具體影響如下:
- 如果 Pod 不能容忍這類汙點,會馬上被驅逐。
- 如果 Pod 能夠容忍這類汙點,但是在容忍度定義中沒有指定
tolerationSeconds
, 則 Pod 還會一直在這個節點上執行。 - 如果 Pod 能夠容忍這類汙點,而且指定了
tolerationSeconds
, 則 Pod 還能在這個節點上繼續執行這個指定的時間長度。 這段時間過去後,節點生命週期控制器從節點驅除這些 Pod。
NoSchedul
PreferNoSchedule
PreferNoSchedule
是“偏好”或“軟性”的NoSchedule
。 控制平面將嘗試避免將不能容忍汙點的 Pod 排程到的節點上,但不能保證完全避免。-
檢視node-1 .節點pod [root@master ~]# kubectl get node NAME STATUS ROLES AGE VERSION node Ready control-plane 4d18h v1.28.2 node-1 Ready <none> 4d18h v1.28.2 node-2 Ready <none> 4d18h v1.28.2 [root@master ~]# kubectl get pod -A -owide | grep node-1 default pod-test 1/1 Running 1 (28m ago) 3d22h 172.16.84.151 node-1 <none> <none> kube-system calico-node-2bjbd 1/1 Running 3 (28m ago) 4d18h 192.168.10.30 node-1 <none> <none> kube-system kube-proxy-jxl5j 1/1 Running 3 (28m ago) 4d18h 192.168.10.30 node-1 <none> <none> kube-system vpa-admission-controller-6cfd4f784d-w8c58 1/1 Running 1 (28m ago) 4d17h 172.16.84.153 node-1 <none> <none> kube-system vpa-updater-cc89b6c56-grq55 1/1 Running 1 (28m ago) 4d17h 172.16.84.152 node-1 <none> <none> 給node-1 打汙點,並檢視node-1 上pod [root@master ~]# kubectl taint node node-1 a=b:NoExecute && kubectl get pod -A -owide | grep node-1 node/node-1 tainted default pod-test 1/1 Terminating 1 (30m ago) 3d22h 172.16.84.151 node-1 <none> <none> kube-system calico-node-2bjbd 1/1 Running 3 (30m ago) 4d18h 192.168.10.30 node-1 <none> <none> kube-system kube-proxy-jxl5j 1/1 Running 3 (30m ago) 4d19h 192.168.10.30 node-1 <none> <none> kube-system vpa-admission-controller-6cfd4f784d-w8c58 1/1 Terminating 1 (30m ago) 4d17h 172.16.84.153 node-1 <none> <none> kube-system vpa-updater-cc89b6c56-grq55 1/1 Terminating 1 (30m ago) 4d17h 172.16.84.152 node-1 <none> <none>
operator
的預設值是Equal
。一個容忍度和一個汙點相“匹配”是指它們有一樣的鍵名和效果,並且:
- 如果
operator
是Exists
(此時容忍度不能指定value
),或者 - 如果
operator
是Equal
,則它們的值應該相等
[root@master ~]# kubectl taint node node-1 a=b:NoSchedule node/node-1 tainted [root@master ~]# cat po.yaml apiVersion: v1 kind: Pod metadata: name: pod-test-1 labels: app: tomcat spec: tolerations: - key: a operator: Exists #可以不知道value effect: "NoSchedule" #容忍度 nodeName: node-1 containers: - name: tomcat-test image: tomcat imagePullPolicy: IfNotPresent ports: - containerPort: 8080
指定value
[root@master ~]# kubectl apply -f po-2.yaml pod/pod-test-2 created [root@master ~]# kubectl get pod -A -owide | grep node-1 default pod-test-1 1/1 Running 0 4m57s 172.16.84.154 node-1 <none> <none> default pod-test-2 1/1 Running 0 4s 172.16.84.155 node-1 <none> <none> kube-system calico-node-2bjbd 1/1 Running 3 (54m ago) 4d19h 192.168.10.30 node-1 <none> <none> kube-system kube-proxy-jxl5j 1/1 Running 3 (54m ago) 4d19h 192.168.10.30 node-1 <none> <none> [root@master ~]# cat po-2.yaml apiVersion: v1 kind: Pod metadata: name: pod-test-2 labels: app: tomcat spec: tolerations: - key: a operator: Equal #可以省略,預設需要指定value value: b effect: "NoSchedule" nodeName: node-1 containers: - name: tomcat-test image: tomcat imagePullPolicy: IfNotPresent ports: - containerPort: 8080
pod 的探測
1.存活探測: livenessProbe 用於存活探針決定何時重啟容器。檢測pod 容器是否處於執行狀態,當存活性探測失敗。存活探針失敗多次,kubelet 將重啟該容器存活探針不會等待就緒探針成功。 如果你想在執行存活探針前等待,你可以定義
initialDelaySeconds
,或者使用啟動探針KIND: Pod VERSION: v1 FIELD: livenessProbe <Probe> DESCRIPTION: Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. FIELDS: exec <ExecAction> # 命令列探測方式 Exec specifies the action to take. failureThreshold <integer> Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. grpc <GRPCAction> #grpc 探測 GRPC specifies an action involving a GRPC port. httpGet <HTTPGetAction> # http 協議探測 HTTPGet specifies the http request to perform. initialDelaySeconds <integer> # initialDelaySeconds 欄位告訴 kubelet 在執行第一次探測前應該等待 時間 秒 Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes periodSeconds <integer> #periodSeconds 欄位指定了 kubelet 應該每 5 秒執行一次存活探測 How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. successThreshold <integer> #探測成功的最少連續成功次數 Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. tcpSocket <TCPSocketAction> # tcp 探測 TCPSocket specifies an action involving a TCP port. terminationGracePeriodSeconds <integer> # 正常終止的可選持續時間;Pod 中執行的程序會收到終止訊號以及終止時間 Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. timeoutSeconds <integer> #探測超時後的秒數。預設為 1 秒。 Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
http 探測
[root@master pod]# cat livenessProbe-1.yaml apiVersion: v1 kind: Pod metadata: name: liveness-1 spec: containers: - name: liveness image: nginx imagePullPolicy: IfNotPresent ports: - name: web containerPort: 80 livenessProbe: initialDelaySeconds: 10 httpGet: port: web path: /index.html [root@master pod]# kubectl apply -f livenessProbe-1.yaml pod/liveness-1 created [root@master pod]# kubectl get po NAME READY STATUS RESTARTS AGE liveness-1 0/1 ContainerCreating 0 7s pod-test-1 1/1 Running 0 159m pod-test-2 1/1 Running 0 154m pod-test-cx 1/1 Running 1 (15h ago) 4d1h [root@master pod]# kubectl get po NAME READY STATUS RESTARTS AGE liveness-1 0/1 ContainerCreating 0 8s pod-test-1 1/1 Running 0 159m pod-test-2 1/1 Running 0 154m pod-test-cx 1/1 Running 1 (15h ago) 4d1h [root@master pod]# kubectl get po -w NAME READY STATUS RESTARTS AGE liveness-1 0/1 ContainerCreating 0 11s pod-test-1 1/1 Running 0 159m pod-test-2 1/1 Running 0 154m pod-test-cx 1/1 Running 1 (15h ago) 4d1h liveness-1 1/1 Running 0 29s
exec 探測
[root@master pod]# cat livenessProbe-2.yaml apiVersion: v1 kind: Pod metadata: name: liveness-2 spec: containers: - name: liveness image: busybox imagePullPolicy: IfNotPresent command: - /bin/sh - "-c" - "echo 1 > /tmp/1.txt && sleep 120;rm -f /tmp/1.txt;sleep 1200" livenessProbe: initialDelaySeconds: 10 exec: command: - /bin/sh - "-c" - "cat /tmp/1.txt" periodSeconds: 3 successThreshold: 1 [root@master pod]# kubectl apply -f livenessProbe-2.yaml && kubectl get po -w pod/liveness-2 created NAME READY STATUS RESTARTS AGE liveness-1 1/1 Running 0 24m liveness-2 0/1 ContainerCreating 0 0s pod-test-1 1/1 Running 0 3h3m pod-test-2 1/1 Running 0 179m pod-test-cx 1/1 Running 1 (15h ago) 4d1h liveness-2 0/1 ContainerCreating 0 1s liveness-2 1/1 Running 0 2s liveness-2 1/1 Running 1 (1s ago) 2m41s
tcp
[root@master pod]# cat livenessProbe-3.yaml apiVersion: v1 kind: Pod metadata: name: liveness-3 spec: containers: - name: liveness image: nginx imagePullPolicy: IfNotPresent ports: - name: web containerPort: 80 livenessProbe: initialDelaySeconds: 10 tcpSocket: port: web successThreshold: 1 [root@master pod]# kubectl apply -f livenessProbe-3.yaml && kubectl get po -w pod/liveness-3 created NAME READY STATUS RESTARTS AGE liveness-1 1/1 Running 0 44m liveness-3 0/1 ContainerCreating 0 0s pod-test-1 1/1 Running 0 3h23m pod-test-2 1/1 Running 0 3h18m pod-test-cx 1/1 Running 1 (16h ago) 4d1h liveness-3 0/1 ContainerCreating 0 0s liveness-3 1/1 Running 0 1s
2.就緒探測:就緒探針決定何時容器準備好開始接受流量。 這種探針在等待應用執行耗時的初始任務時非常有用,例如建立網路連線、載入檔案和預熱快取。
tcp
[root@master pod]# cat readinessProbe-1.yaml apiVersion: v1 kind: Pod metadata: name: periodseconds-1 spec: containers: - name: liveness image: nginx imagePullPolicy: IfNotPresent ports: - name: web containerPort: 80 readinessProbe: initialDelaySeconds: 10 tcpSocket: port: web successThreshold: 1 periodSeconds: 3 [root@master pod]# vim readinessProbe-1.yaml [root@master pod]# kubectl apply -f readinessProbe-1.yaml && kubectl get po -w pod/periodseconds-1 created NAME READY STATUS RESTARTS AGE liveness-1 1/1 Running 0 52m liveness-3 1/1 Running 0 8m54s periodseconds-1 0/1 ContainerCreating 0 0s pod-test-1 1/1 Running 0 3h32m pod-test-2 1/1 Running 0 3h27m pod-test-cx 1/1 Running 1 (16h ago) 4d1h periodseconds-1 0/1 ContainerCreating 0 1s periodseconds-1 0/1 Running 0 1s periodseconds-1 1/1 Running 0 12s
http
[root@master pod]# cat readinessProbe-2.yaml apiVersion: v1 kind: Pod metadata: name: periodseconds-2 spec: containers: - name: liveness image: nginx imagePullPolicy: IfNotPresent ports: - name: web containerPort: 80 readinessProbe: initialDelaySeconds: 10 httpGet: path: /index.html port: web successThreshold: 1 periodSeconds: 3 [root@master pod]# kubectl apply -f readinessProbe-2.yaml ; kubectl get pod -w pod/periodseconds-2 created NAME READY STATUS RESTARTS AGE liveness-1 1/1 Running 0 4h8m liveness-3 1/1 Running 0 3h24m periodseconds-1 1/1 Running 0 3h15m periodseconds-2 0/1 ContainerCreating 0 0s pod-test-1 1/1 Running 0 6h47m pod-test-2 1/1 Running 0 6h42m pod-test-cx 1/1 Running 1 (19h ago) 4d5h periodseconds-2 0/1 ContainerCreating 0 1s periodseconds-2 0/1 Running 0 2s periodseconds-2 1/1 Running 0 12s
exec
[root@master pod]# cat readinessProbe-3.yaml apiVersion: v1 kind: Pod metadata: name: riodseconds-2 labels: app: exec spec: containers: - name: liveness image: nginx imagePullPolicy: IfNotPresent command: - /bin/sh - "-c" - "sleep 120;rm -f /usr/share/nginx/html/index.html" ports: - name: web containerPort: 80 readinessProbe: initialDelaySeconds: 10 exec: command: - /bin/sh - "-c" - "ls /usr/share/nginx/html/index.html" successThreshold: 1 periodSeconds: 3 --- apiVersion: v1 kind: Service metadata: name: riodseconds-exec spec: selector: app: exec ports: name: web protocol: TCP targetPort: web port: 80 [root@master pod]# vim readinessProbe-3.yaml [root@master pod]# kubectl apply -f readinessProbe-3.yaml ; kubectl get pod && kubectl get svc -w pod/riodseconds-2 created service/riodseconds-exec created NAME READY STATUS RESTARTS AGE liveness-1 1/1 Running 0 4h28m liveness-3 1/1 Running 0 3h43m periodseconds-1 1/1 Running 0 3h35m periodseconds-2 1/1 Running 0 19m pod-test-1 1/1 Running 0 7h7m pod-test-2 1/1 Running 0 7h2m pod-test-cx 1/1 Running 1 (19h ago) 4d5h riodseconds-2 0/1 ContainerCreating 0 0s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 5d2h riodseconds-exec ClusterIP 10.108.185.32 <none> 80/TCP 0s ^C[root@master pod] [root@master pod]# [root@master pod]# [root@master pod]# [root@master pod]# kubectl describe svc riodseconds-exec Name: riodseconds-exec Namespace: default Labels: <none> Annotations: <none> Selector: app=exec Type: ClusterIP IP Family Policy: SingleStack IP Families: IPv4 IP: 10.108.185.32 IPs: 10.108.185.32 Port: web 80/TCP TargetPort: web/TCP Endpoints: 172.16.84.166:80 Session Affinity: None Events: <none> [root@master pod]# kubectl describe svc riodseconds-exec Name: riodseconds-exec Namespace: default Labels: <none> Annotations: <none> Selector: app=exec Type: ClusterIP IP Family Policy: SingleStack IP Families: IPv4 IP: 10.108.185.32 IPs: 10.108.185.32 Port: web 80/TCP TargetPort: web/TCP Endpoints: Session Affinity: None Events: <none>
3.啟動探針檢查容器內的應用是否已啟動。 啟動探針可以用於對慢啟動容器進行存活性檢測,避免它們在啟動執行之前就被 kubelet 殺掉。如果配置了這類探針,它會禁用存活檢測和就緒檢測,直到啟動探針成功為止。這類探針僅在啟動時執行,不像存活探針和就緒探針那樣週期性地執行。
exec
[root@master pod]# cat stat-1.yaml apiVersion: v1 kind: Pod metadata: name: stat-1 spec: containers: - name: stat-1 image: nginx imagePullPolicy: IfNotPresent ports: - name: web containerPort: 80 # readinessProbe: # initialDelaySeconds: 10 # httpGet: # path: /index.html # port: web # successThreshold: 1 # periodSeconds: 3 startupProbe: #httpGet: # path: /healthz # port: liveness-port exec: command: - /bin/sh - "-c" - "ls /usr/share/nginx/html/index.html" failureThreshold: 3 # 失敗的次數 periodSeconds: 10 # 探測間隔 [root@master pod]# kubectl apply -f stat-1.yaml ; kubectl get po -w pod/stat-1 created NAME READY STATUS RESTARTS AGE liveness-1 1/1 Running 0 7h11m liveness-3 1/1 Running 0 6h27m periodseconds-1 1/1 Running 0 6h18m periodseconds-2 1/1 Running 0 3h3m pod-test-1 1/1 Running 0 9h pod-test-2 1/1 Running 0 9h pod-test-cx 1/1 Running 1 (22h ago) 4d8h stat-1 0/1 ContainerCreating 0 0s stat-1 0/1 ContainerCreating 0 0s stat-1 0/1 Running 0 1s stat-1 0/1 Running 0 10s stat-1 1/1 Running 0 10s
tcp
[root@master pod]# cat stat-2.yaml apiVersion: v1 kind: Pod metadata: name: stat-2 spec: containers: - name: stat-2 image: nginx imagePullPolicy: IfNotPresent ports: - name: web containerPort: 80 # readinessProbe: # initialDelaySeconds: 10 # httpGet: # path: /index.html # port: web # successThreshold: 1 # periodSeconds: 3 startupProbe: #httpGet: # path: /healthz # port: liveness-port #exec: # command: # - /bin/sh # - "-c" # - "ls /usr/share/nginx/html/index.html" tcpSocket: port: web failureThreshold: 3 # 失敗的次數 periodSeconds: 10 # 探測間隔 [root@master pod]# kubectl apply -f stat-2.yaml ; kubectl get po -w pod/stat-2 created NAME READY STATUS RESTARTS AGE liveness-1 1/1 Running 0 19h liveness-3 1/1 Running 0 18h periodseconds-1 1/1 Running 0 18h periodseconds-2 1/1 Running 0 15h pod-test-1 1/1 Running 0 22h pod-test-2 1/1 Running 0 22h pod-test-cx 1/1 Running 1 (35h ago) 4d20h stat-1 1/1 Running 0 12h stat-2 0/1 ContainerCreating 0 0s stat-2 0/1 ContainerCreating 0 1s stat-2 0/1 Running 0 1s stat-2 0/1 Running 0 10s stat-2 1/1 Running 0 11s
http
[root@master pod]# cat stat-3.yaml apiVersion: v1 kind: Pod metadata: name: stat-3 spec: containers: - name: stat-3 image: nginx imagePullPolicy: IfNotPresent ports: - name: web containerPort: 80 # readinessProbe: # initialDelaySeconds: 10 # httpGet: # path: /index.html # port: web # successThreshold: 1 # periodSeconds: 3 startupProbe: httpGet: path: /index.html port: web #exec: # command: # - /bin/sh # - "-c" # - "ls /usr/share/nginx/html/index.html" #tcpSocket: # port: web failureThreshold: 3 # 失敗的次數 periodSeconds: 10 # 探測間隔 [root@master pod]# kubectl apply -f stat-3.yaml ; kubectl get po -w pod/stat-3 created NAME READY STATUS RESTARTS AGE liveness-1 1/1 Running 0 19h liveness-3 1/1 Running 0 18h periodseconds-1 1/1 Running 0 18h periodseconds-2 1/1 Running 0 15h pod-test-1 1/1 Running 0 22h pod-test-2 1/1 Running 0 22h pod-test-cx 1/1 Running 1 (35h ago) 4d20h stat-1 1/1 Running 0 12h stat-2 1/1 Running 0 5m stat-3 0/1 ContainerCreating 0 1s stat-3 0/1 ContainerCreating 0 1s stat-3 0/1 Running 0 2s stat-3 0/1 Running 0 11s stat-3 1/1 Running 0 11s
容器重啟策略:
restartPolicy
restartPolicy
應用於 Pod 中的應用容器和常規的 Init 容器。 Sidecar 容器忽略 Pod 級別的restartPolicy
欄位:在 Kubernetes 中,Sidecar 被定義為initContainers
內的一個條目,其容器級別的restartPolicy
被設定為Always
。 對於因錯誤而退出的 Init 容器,如果 Pod 級別restartPolicy
為OnFailure
或Always
, 則 kubelet 會重新啟動 Init 容器。Always
:只要容器終止就自動重啟容器。OnFailure
:只有在容器錯誤退出(退出狀態非零)時才重新啟動容器。Never
:不會自動重啟已終止的容器。
混合使用
[root@master pod]# cat readinessProbe-3.yaml apiVersion: v1 kind: Pod metadata: name: lrso labels: app: exec-1 spec: containers: - name: liveness image: nginx imagePullPolicy: IfNotPresent ports: - name: web containerPort: 80 readinessProbe: initialDelaySeconds: 10 exec: command: - /bin/sh - "-c" - "ls /usr/share/nginx/html/index.html" successThreshold: 1 periodSeconds: 3 livenessProbe: initialDelaySeconds: 15 httpGet: port: web path: /index.html startupProbe: httpGet: path: /index.html port: web failureThreshold: 3 # 失敗的次數 periodSeconds: 10 # 探測間隔 --- apiVersion: v1 kind: Service metadata: name: riodseconds-exec-1 spec: selector: app: exec-1 ports: - name: web protocol: TCP targetPort: web port: 80 [root@master pod]# kubectl apply -f readinessProbe-3.yaml ; kubectl get po -w pod/lrso created service/riodseconds-exec-1 created NAME READY STATUS RESTARTS AGE liveness-1 1/1 Running 0 20h liveness-3 1/1 Running 0 19h lrso 0/1 ContainerCreating 0 1s periodseconds-1 1/1 Running 0 19h periodseconds-2 1/1 Running 0 16h pod-test-1 1/1 Running 0 22h pod-test-2 1/1 Running 0 22h pod-test-cx 1/1 Running 1 (35h ago) 4d21h stat-1 1/1 Running 0 12h stat-2 1/1 Running 0 35m stat-3 1/1 Running 0 30m lrso 0/1 ContainerCreating 0 1s lrso 0/1 Running 0 2s lrso 0/1 Running 0 11s lrso 1/1 Running 0 11s
- 如果