名稱空間,親和性,pod生命週期,健康檢查

q_7發表於2024-06-16

一、名稱空間

1、切換名稱空間

[root@master pod]# kubectl create ns test
namespace/test created
[root@master pod]# kubectl get ns
NAME              STATUS   AGE
default           Active   10h
kube-node-lease   Active   10h
kube-public       Active   10h
kube-system       Active   10h
test              Active   2s
[root@master pod]# kubectl config set-context --current --namespace=kube-system
Context "kubernetes-admin@kubernetes" modified.
[root@master pod]# kubectl get pod
NAME                                      READY   STATUS    RESTARTS        AGE
calico-kube-controllers-d886b8fff-mbdz7   1/1     Running   0               6h42m
calico-node-48tnk                         1/1     Running   0               6h46m
calico-node-jq7mr                         1/1     Running   0               6h46m
calico-node-pdwcr                         1/1     Running   0               6h46m
coredns-567c556887-99cqw                  1/1     Running   1 (6h44m ago)   10h
coredns-567c556887-9sbfp                  1/1     Running   1 (6h44m ago)   10h
etcd-master                               1/1     Running   1 (6h44m ago)   10h
kube-apiserver-master                     1/1     Running   1 (6h44m ago)   10h
kube-controller-manager-master            1/1     Running   1 (6h44m ago)   10h
kube-proxy-7dl5r                          1/1     Running   1 (6h50m ago)   10h
kube-proxy-pvbrg                          1/1     Running   1 (6h44m ago)   10h
kube-proxy-xsqt9                          1/1     Running   1 (6h50m ago)   10h
kube-scheduler-master                     1/1     Running   1 (6h44m ago)   10h
[root@master pod]# kubectl config set-context --current --namespace=default
Context "kubernetes-admin@kubernetes" modified.
[root@master pod]# kubectl get pod
NAME     READY   STATUS    RESTARTS   AGE
nginx1   1/1     Running   0          8m44s

2、設定名稱空間資源限額

  1. 就是不能超過這個名稱空間的限制

  2. 限制這個名稱空間所有pod的型別的限制

[root@master ns]# cat test.yaml 
apiVersion: v1
kind: ResourceQuota  #這個是資源配額
metadata:
  name: mem-cpu-qutoa
  namespace: test  
spec:
  hard:  #限制資源
     requests.cpu: "2" #最少2個cpu 
     requests.memory: 2Gi 
     limits.cpu: "4"   #最大4個cpu
     limits.memory: 4Gi

#檢視名稱空間詳細資訊
[root@master ns]# kubectl describe ns test
Name:         test
Labels:       kubernetes.io/metadata.name=test
Annotations:  <none>
Status:       Active

Resource Quotas
  Name:            mem-cpu-qutoa
  Resource         Used  Hard
  --------         ---   ---
  limits.cpu       0     4
  limits.memory    0     4Gi
  requests.cpu     0     2
  requests.memory  0     2Gi

No LimitRange resource.

#定義了名稱空間限制的話,建立Pod必須設定資源限制,否則會報錯
[root@master pod]# cat nginx.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx1
  namespace: test
  labels:
    app: nginx-pod
spec:
 containers:
 - name: nginx01
   image: docker.io/library/nginx:1.9.1
   imagePullPolicy: IfNotPresent
   resources:  #pod資源的限制,如果不做限制的話,pod出現了問題的話,一直吃記憶體的話,就會出現問題
     limits:
       memory: "2Gi"  #記憶體為2g
       cpu: "2m"  #單位為毫核,1000m=1核      

二、標籤

  1. 這個非常的重要,因為很多的資源型別都是靠這個標籤進行管理的(識別到了)

  2. 服務或者控制器等都是靠這個標籤來進行管理的

#打上標籤
[root@master /]# kubectl label pods nginx1 test=01
pod/nginx1 labeled
[root@master /]# kubectl get pod --show-labels 
NAME     READY   STATUS    RESTARTS   AGE   LABELS
nginx1   1/1     Running   0          45m   app=nginx-pod,test=01

#具有這個標籤的pod進行列出
[root@master /]# kubectl get pods -l app=nginx-pod
NAME     READY   STATUS    RESTARTS   AGE
nginx1   1/1     Running   0          48m

#檢視所有名稱空間和標籤
[root@master /]# kubectl get pods --all-namespaces --show-labels 

#檢視這個鍵app對應的值是什麼
[root@master /]# kubectl get pods -L app
NAME     READY   STATUS    RESTARTS   AGE   APP
nginx1   1/1     Running   0          50m   nginx-pod

#刪除這個標籤
[root@master ~]# kubectl label pod nginx1 app-
pod/nginx1 unlabeled
[root@master ~]# kubectl get pod --show-labels 
NAME     READY   STATUS    RESTARTS   AGE   LABELS
nginx1   1/1     Running   0          57m   test=01
s

三、親和性

1、node節點選擇器

就是根據主機名或者標籤進行pod的排程,屬於強制性的排程,不存在的也能進行排程,是pending的狀態

1、nodename

[root@master pod]# cat pod1.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod1
  namespace: test
spec:
  nodeName: node1  #排程到node1主機上面
  containers:
    - name: pod1
      image: docker.io/library/nginx
      imagePullPolicy: IfNotPresent 

[root@master pod]# kubectl get pod -n test -o wide
NAME     READY   STATUS    RESTARTS   AGE   IP               NODE    NOMINATED NODE   READINESS GATES
nginx1   1/1     Running   0          12h   10.244.104.5     node2   <none>           <none>
pod1     1/1     Running   0          34s   10.244.166.130   node1   <none>           <none>

2、nodeselector

#給主機名打上標籤,以便進行排程
[root@master ~]# kubectl label nodes node1 app=node1
node/node1 labeled
[root@master ~]# kubectl get nodes node1 --show-labels 
NAME    STATUS   ROLES    AGE   VERSION   LABELS
node1   Ready    <none>   23h   v1.26.0   app=node1,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node1,kubernetes.io/os=linux

[root@master pod]# cat pod2.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod2
  namespace: test
spec:
  nodeSelector:  #根據主機名的標籤進行排程
    app: node1   #這種鍵值的形式來表現出來
  containers:
  - name: pod2
    image: docker.io/library/nginx
    imagePullPolicy: IfNotPresent   

[root@master pod]# kubectl get pod -n test -o wide
NAME     READY   STATUS    RESTARTS   AGE     IP               NODE    NOMINATED NODE   READINESS GATES
nginx1   1/1     Running   0          12h     10.244.104.5     node2   <none>           <none>
pod1     1/1     Running   0          9m28s   10.244.166.130   node1   <none>           <none>
pod2     1/1     Running   0          12s     10.244.166.131   node1   <none>           <none>

2、node親和性

  1. 根據node上面的標籤進行排程

  2. 根據的是node和pod之間的關係進行排程的

1、軟親和性

  1. 如果沒有符合條件的,就隨機選擇一個進行排程
[root@master pod]# cat pod4.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod4
  namespace: test
spec:
  affinity:
    nodeAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:
      - preference:
          matchExpressions:   #匹配節點上面的標籤
          - key: app
            operator: In
            values: ["node1"]
        weight: 1   #根據權重來排程
  containers:
  - name: pod4
    image: docker.io/library/nginx
    imagePullPolicy: IfNotPresent

[root@master pod]# kubectl get pod -n test -o wide
NAME   READY   STATUS    RESTARTS   AGE     IP               NODE    NOMINATED NODE   READINESS GATES
pod3   1/1     Running   0          6m52s   10.244.166.133   node1   <none>           <none>
pod4   1/1     Running   0          40s     10.244.166.135   node1   <none>           <none>

2、硬親和性

[root@master pod]# cat pod3.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod3
  namespace: test
spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:  #硬限制
        nodeSelectorTerms:   #根據這個node上面的標籤來進行排程
        - matchExpressions:
          - key: app
            operator: In
            values: ["node1"]   #排程到上面有app=node1這個標籤的節點上面去
  containers:
  - name: pod3
    image: docker.io/library/nginx:1.9.1
    imagePullPolicy: IfNotPresent

3、pod親和性

  1. 就是幾個pod之間有依賴的關係,就放在一起,這樣效率就快一點,網站服務和資料庫服務就需要在一起,提高效率

  2. 根據正在執行的pod上面的標籤進行排程

1、軟親和性

apiVersion: v1
kind: Pod
metadata:
  name: pod7
  namespace: test
spec:
  affinity:
    podAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:
      - podAffinityTerm:
          labelSelector:
            matchExpressions:
            - key: app
              operator: In
              values: ["pod4"]
          topologyKey: app
        weight: 1
  containers:
  - name: pod7
    image: docker.io/library/nginx
    imagePullPolicy: IfNotPresent

[root@master pod]# kubectl get pod -n test -o wide
NAME   READY   STATUS    RESTARTS   AGE   IP               NODE    NOMINATED NODE   READINESS GATES
pod4   1/1     Running   0          24m   10.244.166.136   node1   <none>           <none>
pod5   1/1     Running   0          21m   10.244.166.137   node1   <none>           <none>
pod7   1/1     Running   0          51s   10.244.166.139   node1   <none>           <none>

2、硬親和性

[root@master pod]# cat pod5.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod5
  namespace: test
spec:
  affinity:
    podAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - key: app
            operator: In
            values: ["pod4"]
        topologyKey: kubernetes.io/hostname   #這個就是拓撲域,每個節點的這個都不一樣。node1,node2等
  containers:
  - name: pod5
    image: docker.io/library/nginx
    imagePullPolicy: IfNotPresent

#關於這個topologyKey的值的選擇,一般就是節點上面的標籤
apiVersion: v1
kind: Pod
metadata:
  name: pod6
  namespace: test
spec:
  affinity:
    podAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - key: app
            operator: In
            values: ["pod4"]
        topologyKey: app2  #這個是node2上面的標籤,排程到pod包含這個app=pod4這個標籤,並且節點是標籤是app2上面的節點上面
  containers:
  - name: pod6
    image: docker.io/library/nginx
    imagePullPolicy: IfNotPresent

[root@master pod]# cat pod5.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod6
  namespace: test
spec:
  affinity:
    podAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - key: app
            operator: In
            values: ["pod4"]
        topologyKey: app  #排程到pod包含了app的標籤,並且值在app節點上面去了
  containers:
  - name: pod6
    image: docker.io/library/nginx
    imagePullPolicy: IfNotPresent

# operator: DoesNotExist情況
apiVersion: v1
kind: Pod
metadata:
  name: pod6
  namespace: test
spec:
  affinity:
    podAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - key: app
            operator: DoesNotExist 
        topologyKey: app   #排程到key不包含app並且節點標籤為app的節點上面,還是排程到app節點上面去了
  containers:
  - name: pod6
    image: docker.io/library/nginx
    imagePullPolicy: IfNotPresent
 

4、pod反親和性

就是當2個都是佔記憶體比較高的Pod,就使用和這個反親和性進行分開

apiVersion: v1
kind: Pod
metadata:
  name: pod8
  namespace: test
spec:
  affinity:
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - key: app
            operator: In
            values: ["pod4"]
        topologyKey: kubernetes.io/hostname   #排程到不能包含app=pod4上面的節點,排程到node1上
  containers:
  - name: pod8
    image: docker.io/library/nginx
    imagePullPolicy: IfNotPresent

[root@master pod]# kubectl get pod -n test -o wide
NAME   READY   STATUS    RESTARTS   AGE     IP               NODE    NOMINATED NODE   READINESS GATES
pod4   1/1     Running   0          36m     10.244.166.136   node1   <none>           <none>
pod5   1/1     Running   0          33m     10.244.166.137   node1   <none>           <none>
pod6   1/1     Running   0          7m42s   10.244.166.140   node1   <none>           <none>
pod7   1/1     Running   0          12m     10.244.166.139   node1   <none>           <none>
pod8   1/1     Running   0          8s      10.244.104.6     node2   <none>           <none>=

5、汙點

  1. 在node上面進行打汙點

  2. kubectl explain node.spec.taints

  3. 手動打汙點, kubectl taint nodes node1 a=b:NoSchedule

  4. 汙點三個等級

    1. NoExecute 節點上面的pod都移除掉,不能排程到這個節點上

    2. NoSchedule 節點上面存在的pod保留,但是新建立的pod不能排程到這個節點上面

    3. PreferNoSchedule pod不到萬不得已的情況下,才能排程到這個節點上面

#給node1打上一個汙點
[root@master pod]# kubectl get pod -n test -o wide
NAME   READY   STATUS    RESTARTS   AGE     IP               NODE    NOMINATED NODE   READINESS GATES
pod4   1/1     Running   0          41m     10.244.166.136   node1   <none>           <none>
pod5   1/1     Running   0          37m     10.244.166.137   node1   <none>           <none>
pod6   1/1     Running   0          12m     10.244.166.140   node1   <none>           <none>
pod7   1/1     Running   0          17m     10.244.166.139   node1   <none>           <none>
pod8   1/1     Running   0          4m33s   10.244.104.6     node2   <none>           <none>

[root@master pod]# kubectl taint node node1 app=node1:NoExecute
node/node1 tainted
#發現這個節點上面的pod都銷燬了
[root@master pod]# kubectl get pod -n test -o wide
NAME   READY   STATUS    RESTARTS   AGE     IP             NODE    NOMINATED NODE   READINESS GATES
pod8   1/1     Running   0          6m21s   10.244.104.6   node2   <none>           <none>

#去除汙點
[root@master pod]# kubectl taint node node1 app-
node/node1 untainted
[root@master pod]# kubectl describe node node1 | grep -i taint
Taints:             <none>

6、容忍度

  1. 在pod上面進行容忍度,就是會容忍node上面的汙點,從而能進行排程

  2. kubectl explain pod.spec.tolerations

#就是節點上面有汙點但是pod上面有容忍度可以容忍這個汙點來進行排程到指定的節點上面去
#給node1打上汙點
[root@master pod]# kubectl taint node node1 app=node1:NoExecute
node/node1 tainted

#進行排程到node1上
apiVersion: v1
kind: Pod
metadata:
  name: pod10
  namespace: test
spec:
  tolerations:
  - key: "app"
    operator: Equal  #就是key和values,effect必須和node上面完全匹配才行 #exists,只要對應的鍵是存在的,其值被自動定義成萬用字元
    value: "node1"
    effect: NoExecute
  containers:
  - name: pod10
    image: docker.io/library/nginx:1.9.1  

[root@master pod]# kubectl get pod -n test -o wide
NAME    READY   STATUS    RESTARTS   AGE   IP               NODE    NOMINATED NODE   READINESS GATES
pod10   1/1     Running   0          58s   10.244.166.142   node1   <none>           <none>
pod8    1/1     Running   0          27m   10.244.104.6     node2   <none>           <none>


apiVersion: v1
kind: Pod
metadata:
  name: pod11
  namespace: test
spec:
  tolerations:
  - key: "app"
    operator: Exists   #容忍無論app,NoExecute的值為多少,都能進行排程
    value: ""
    effect: NoExecute
  containers:
  - name: pod11
    image: docker.io/library/nginx:1.9.1  

四:pod的生命週期

img

  1. init容器,初始化的容器,就是必須要經過這個階段才能執行主容器

  2. 主容器,裡面有啟動前鉤子和啟動後鉤子

1、初始化容器

[root@master pod]# cat init.yaml 
apiVersion: v1
kind: Pod
metadata:
  name:  init-pod
  namespace: test
spec:
  initContainers:
  - name: init-pod1
    image: docker.io/library/nginx:1.9.1 
    command: ["/bin/bash","-c","touch /11.txt"]
  containers:
  - name: main-pod
    image: docker.io/library/nginx:1.9.1 

[root@master pod]# kubectl get pod -n test -w
NAME       READY   STATUS    RESTARTS   AGE
init-pod   0/1     Pending   0          0s
init-pod   0/1     Pending   0          0s
init-pod   0/1     Init:0/1   0          0s
init-pod   0/1     Init:0/1   0          1s
init-pod   0/1     PodInitializing   0          2s
init-pod   1/1     Running           0          3s

#如果初始化錯誤的話,會一直陷入重啟的狀態,這個跟pod的重啟策略有關
[root@master pod]# cat init.yaml 
apiVersion: v1
kind: Pod
metadata:
  name:  init-pod
  namespace: test
spec:
  initContainers:
  - name: init-pod1
    image: docker.io/library/nginx:1.9.1 
    command: ["/bin/bash","-c","qwe /11.txt"]
  containers:
  - name: main-pod
    image: docker.io/library/nginx:1.9.1 

[root@master pod]# kubectl get pod -n test -w
NAME       READY   STATUS    RESTARTS   AGE
init-pod   0/1     Pending   0          0s
init-pod   0/1     Pending   0          0s
init-pod   0/1     Init:0/1   0          0s
init-pod   0/1     Init:0/1   0          0s
init-pod   0/1     Init:0/1   0          1s
init-pod   0/1     Init:Error   0          2s
init-pod   0/1     Init:Error   1 (2s ago)   3s
init-pod   0/1     Init:CrashLoopBackOff   1 (2s ago)   4s
init-pod   0/1     Init:Error              2 (14s ago)   16s

2、啟動前鉤子

  1. 就是在主容器執行的前,執行這個鉤子

  2. 失敗的話,會一直重啟(重啟策略決定的),就不會執行主容器了

  3. 有三種的寫法

1、exec

[root@master pod]# cat pre.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pre-pod
  namespace: test
spec:
  containers:
  - name: pre-pod
    image: docker.io/library/nginx:1.9.1
    lifecycle:
      postStart:
         exec:
           command: ["/bin/bash","-c","touch /11.txt"]

[root@master pod]# kubectl exec -n test -ti pre-pod -- /bin/bash
root@pre-pod:/# ls
11.txt	boot  etc   lib    media  opt	root  sbin  sys  usr
bin	dev   home  lib64  mnt	  proc	run   srv   tmp  var
root@pre-pod:/# cat 11.txt 

#如果啟動前鉤子鉤子報錯的話,後面的主容器不會執行了

3、啟動後鉤子

[root@master pod]# cat pre.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pre-pod
  namespace: test
spec:
  containers:
  - name: pre-pod
    image: docker.io/library/nginx:1.9.1
    lifecycle:
      preStop:
         exec:
           command: ["/bin/bash","-c","touch /11.txt"]

4、pod重啟策略和pod的狀態

  1. 用於設定pod的值

  2. Always,當容器出現任何狀況的話,就自動進行重啟,這個是預設的值

  3. OnFailure,當容器終止執行且退出碼不為0時,kubelet自動重啟該容器

  4. Never,不論容器的狀態如何,kubelet都不會重啟該容器

  5. pod的狀態

    1、pending,請求建立Pod時,條件不滿足,排程沒有進行完成沒有一個節點符合,或者是處於下載映象的情況

    1. running 就是已經排程到一個節點上面了,裡面的容器至少有一個建立出來了

    2. succeeded pod裡面的所有容器都成功的被終止了,並且不會在重啟了

    3. Failed 裡面的所有容器都已經終止了,並且至少有一個容器是因為失敗終止的,就是非0狀態重啟的

    4. Unknown 未知狀態,就是apiserver和kubelet出現了問題

    5. Evicted狀態,記憶體和硬碟資源不夠

    6. CrashLoopBackOff 容器曾經啟動了,但是又異常退出了

    7. Error pod啟動過程中發生了錯誤

    8. Completed 說明pod已經完成了工作,

#在容器裡面設定一個啟動前鉤子,鉤子會失敗,然後重啟策略設定為Never
apiVersion: v1
kind: Pod
metadata:
  name: pre-pod
  namespace: test
spec:
  restartPolicy: Never
  containers:
  - name: pre-pod
    image: docker.io/library/nginx:1.9.1
    lifecycle:
      postStart:
        exec:
          command: ["/bin/bash","-c","qwe /11.txt"]
#這個鉤子失敗了,然後pod不進行重啟策略
[root@master pod]# kubectl get pod -n test -w
NAME      READY   STATUS    RESTARTS   AGE
pre-pod   0/1     Pending   0          0s
pre-pod   0/1     Pending   0          0s
pre-pod   0/1     ContainerCreating   0          0s
pre-pod   0/1     ContainerCreating   0          0s
pre-pod   0/1     Completed           0          2s
pre-pod   0/1     Completed           0          3s
pre-pod   0/1     Completed           0          4s

#檢視詳細資訊
#正常退出了
Events:
  Type     Reason               Age   From               Message
  ----     ------               ----  ----               -------
  Normal   Scheduled            12m   default-scheduler  Successfully assigned test/pre-pod to node1
  Normal   Pulled               12m   kubelet            Container image "docker.io/library/nginx:1.9.1" already present on machine
  Normal   Created              12m   kubelet            Created container pre-pod
  Normal   Started              12m   kubelet            Started container pre-pod
  Warning  FailedPostStartHook  12m   kubelet            PostStartHook failed
  Normal   Killing              12m   kubelet       s     FailedPostStartHook

五、pod健康檢查(主要就是容器裡面)

1、liveness probe(存活探測)

  1. 用於檢測pod內的容器是否處於執行的狀態,當這個探測失效時,k8s會根據這個重啟策略決定是否重啟改容器

  2. 適用於在容器發生故障時進行重啟,web程式等

  3. 主要就是檢測pod是否執行的

  4. 支援三種格式,exec,tcp,httpget

  5. 探測結果有三個值,Success表示透過了檢測,Failure表示未透過檢測,Unknown表示檢測沒有正常的執行

  6. kubectl explain pod.spec.containers.livenessProbe

1、引數詳解

livenessProbe:
  initialDelaySeconds: #pod啟動後首次進行檢查的等待時間,單位為秒
  periodSeconds: #檢查的間隔時間,預設為10秒
  timeoutSeconds: #探針執行檢測請求後,等待響應的超時時間,預設為1秒
  successThreshold: #連續探測幾次成功,才認為探測成功,預設為1,在liveness中,必須為1,最小值為1
  failureThreshold: #探測失敗的重試次數,重試一定次數後將認為失敗,在readiness探針中,Pod會被標記未就緒,預設為3,最小值為1

2、exec格式

[root@master pod]# cat liveness.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: live1
  namespace: test
spec:
  containers:
  - name: live1
    image: docker.io/library/nginx:1.9.1
    livenessProbe:
      exec:
        command: ["/bin/bash","-c","touch /11.txt"]
      failureThreshold: 3  #失敗三次就認定為失敗
      initialDelaySeconds: 3  #進行探測的時候,等待三秒
      periodSeconds: 5   #檢查的時間間隔為10s
      successThreshold: 1 #必須為1,有1次成功即可
      timeoutSeconds: 10  #執行請求後,等待的時間為10s

[root@master pod]# kubectl get pod -n test -w
NAME      READY   STATUS      RESTARTS   AGE
pre-pod   0/1     Completed   0          4h45m
live1     0/1     Pending     0          0s
live1     0/1     Pending     0          0s
live1     0/1     ContainerCreating   0          0s
live1     0/1     ContainerCreating   0          1s
live1     1/1     Running             0          2s
live1     1/1     Running             0          30s

3、httpget格式

#格式說明
httpGet:
  scheme: #用於連線host的協議,預設為http
  host:  #要連線的主機名,預設為pod的ip,就是容器裡面的主機名
  port:  #容器上要訪問埠號或名稱
  path:   #http伺服器上的訪問url
  httpHeaders:   #自定義http請求headers,允許重複

[root@master pod]# cat liveness.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: live1
  namespace: test
spec:
  containers:
  - name: live1
    image: docker.io/library/nginx:1.9.1
    livenessProbe:
      httpGet:
        port: 80
        scheme: HTTP
        path: /index.html   #就是在容器內部curl localhost:80/index.html檢測
      failureThreshold: 3    #返回了一個成功的 HTTP 響應(狀態碼在 200-399 之間)就是成功的
      initialDelaySeconds: 3
      periodSeconds: 5
      successThreshold: 1 
      timeoutSeconds: 10
#可以執行
live1     0/1     ContainerCreating   0          0s
live1     0/1     ContainerCreating   0          1s
live1     1/1     Running             0          2s
live1     1/1     Running             0          42s

4、tcp方式健康檢查

[root@master pod]# cat liveness.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: live1
  namespace: test
spec:
  containers:
  - name: live1
    image: docker.io/library/nginx:1.9.1
    livenessProbe:
      tcpSocket:
        port: 80   #傳送一個探針,嘗試連線容器80埠
      failureThreshold: 3
      initialDelaySeconds: 3
      periodSeconds: 5
      successThreshold: 1 
      timeoutSeconds: 10

2、readiness probe(就緒性探測)

  1. 就是pod裡面的容器執行了,但是提供服務的程式,需要讀取這個網頁的配置檔案,才能提供服務

  2. 所以的話需要這個就緒性探測,伺服器起來了,就能提供這個服務了

  3. 防止Pod起來了,但是裡面的服務是假的服務這種情況

  4. 也支援三種

[root@master pod]# cat liveness.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: live1
  namespace: test
spec:
  containers:
  - name: live1
    image: docker.io/library/nginx:1.9.1
    readinessProbe:
      httpGet:
        port: 80   #傳送一個請求
      failureThreshold: 3
      initialDelaySeconds: 3
      periodSeconds: 5
      successThreshold: 1 
      timeoutSeconds: 10

#在檢測的時候的等待幾秒鐘
[root@master pod]# kubectl get pod -n test -w
NAME      READY   STATUS      RESTARTS   AGE
pre-pod   0/1     Completed   0          5h11m
live1     0/1     Pending     0          0s
live1     0/1     Pending     0          0s
live1     0/1     ContainerCreating   0          0s
live1     0/1     ContainerCreating   0          0s
live1     0/1     Running             0          1s
live1     1/1     Running             0          5s

3、startProbe(啟動探測)

  1. 探測容器中的應用是否已經啟動,如果提供了這個啟動探測,則禁用所有其他的探測,直到他成功為止

  2. 如果啟動探測失敗的話,kubelet將殺死容器,容器服從其重啟策略進行重啟,如果容器沒有提供啟動探測,則預設為狀態為success

  3. 可以自定義在pod啟動是是否執行這些檢測,如果不設定的,則檢測結果均預設為透過,如果設定,則順序為 startupProbe > readinessProbe > livenessProbe。後面的2個探針沒有啟動的順序

  4. 這個優先順序是最高的,先執行這個,在執行後面的探針

  5. 作用: 用於確定容器是否已經啟動並且可以接收流量。與就緒探針不同,啟動探針只有在容器啟動時進行一次檢查

apiVersion: v1
kind: Pod
metadata:
  name: start1
  namespace: test
spec:
  containers:
  - name: start1
    image: docker.io/library/nginx:1.9.1
    startupProbe:
      exec:     #檢測nginx是否啟動了
        command: ["/bin/bash","-c","ps -aux|grep nginx"]

[root@master ~]# kubectl get pod -n test -w
NAME      READY   STATUS      RESTARTS   AGE
live1     1/1     Running     0          17h
pre-pod   0/1     Completed   0          22h
start1    0/1     Pending     0          1s
start1    0/1     Pending     0          1s
start1    0/1     ContainerCreating   0          1s
start1    0/1     ContainerCreating   0          1s
start1    0/1     Running             0          2s
start1    0/1     Running             0          11s
start1    0/1     Running             0          11s
start1    1/1     Running             0          12s

4、三種方式一起使用

apiVersion: v1
kind: Service
metadata:
  name: springboot
  labels:
    app: springboot
spec:
  type: NodePort
  ports:
  - name: server
    port: 8080
    targetPort: 8080
    nodePort: 31180
  - name: management
    port: 8081
    targetPort: 8081
    nodePort: 31181
  selector:
    app: springboot
---
apiVersion: v1
kind: Pod
metadata:
  name: springboot-live
  labels:
    app: springboot
spec:
  containers:
  - name: springboot
    image: mydlqclub/springboot-helloworld:0.0.1
    imagePullPolicy: IfNotPresent
    ports:
    - name: server
      containerPort: 8080
    - name: management
      containerPort: 8081
    readinessProbe:   #這個是就緒性探針,裡面的服務是否啟動的
      initialDelaySeconds: 20    
      periodSeconds: 5          
      timeoutSeconds: 10   
      httpGet:
        scheme: HTTP
        port: 8081
        path: /actuator/health
    livenessProbe:   #存貨行探測,容器是否啟動
      initialDelaySeconds: 20   
      periodSeconds: 5          
      timeoutSeconds: 10   
      httpGet:
        scheme: HTTP
        port: 8081
        path: /actuator/health
    startupProbe:   #啟動探針,先執行這個探針
      initialDelaySeconds: 20     #檢測之前等待幾秒鐘
      periodSeconds: 5            #每個5秒進行檢測
      timeoutSeconds: 10       #發出請求後,超過10秒為超時
      httpGet:
        scheme: HTTP
        port: 8081
        path: /actuator/health
#如果容器出現了,問題,就根據重啟策略進行操作

相關文章