【K8S運維知識彙總】第7天2:交付Exporters至k8s叢集
文章目錄
交付kube-state-metrics
準備基礎映象
kube-state-metrics官方quay.io地址 https://quay.io/repository/coreos/kube-state-metrics?tab=info
[root@k8s7-200.host.com /opt/src]# docker image tag 91599517197a harbor.od.com/public/kube-state-metrics:v1.5.0
[root@k8s7-200.host.com /opt/src]# docker push harbor.od.com/public/kube-state-metrics:v1.5.0
準備資源配置清單
1.rbac.yaml
[root@k8s7-200.host.com /data/k8s-yaml/kube-state-metrics]# cat rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: kube-state-metrics
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: kube-state-metrics
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
- nodes
- pods
- services
- resourcequotas
- replicationcontrollers
- limitranges
- persistentvolumeclaims
- persistentvolumes
- namespaces
- endpoints
verbs:
- list
- watch
- apiGroups:
- extensions
resources:
- daemonsets
- deployments
- replicasets
verbs:
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- list
- watch
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- list
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: kube-state-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-state-metrics
subjects:
- kind: ServiceAccount
name: kube-state-metrics
namespace: kube-system
2.deployment.yaml
[root@k8s7-200.host.com /data/k8s-yaml/kube-state-metrics]# cat deployment.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "2"
labels:
grafanak8sapp: "true"
app: kube-state-metrics
name: kube-state-metrics
namespace: kube-system
spec:
selector:
matchLabels:
grafanak8sapp: "true"
app: kube-state-metrics
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
grafanak8sapp: "true"
app: kube-state-metrics
spec:
containers:
- image: harbor.od.com/public/kube-state-metrics:v1.5.0
name: kube-state-metrics
ports:
- containerPort: 8080
name: http-metrics
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
imagePullPolicy: IfNotPresent
imagePullSecrets:
- name: harbor
restartPolicy: Always
serviceAccount: kube-state-metrics
serviceAccountName: kube-state-metrics
應用資源配置清單
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/kube-state-metrics/rbac.yaml
serviceaccount/kube-state-metrics created
clusterrole.rbac.authorization.k8s.io/kube-state-metrics created
clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/kube-state-metrics/deployment.yaml
deployment.extensions/kube-state-metrics created
檢查驗證
[root@k8s7-22.host.com ~]# curl 172.7.21.16:8080/healthz
ok
交付node-exporter
準備基礎映象
[root@k8s7-200.host.com /opt/src]# docker load -i node-exporter-v0.15.0.tar
[root@k8s7-200.host.com /opt/src]# docker image tag 12d51ffa2b22 harbor.od.com/public/node-exporter:v0.15.0
[root@k8s7-200.host.com /opt/src]# docker push harbor.od.com/public/node-exporter:v0.15.0
準備資源配置清單
[root@k8s7-200.host.com /data/k8s-yaml/node-exporter]# cat daemonset.yaml
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: node-exporter
namespace: kube-system
labels:
daemon: "node-exporter"
grafanak8sapp: "true"
spec:
selector:
matchLabels:
daemon: "node-exporter"
grafanak8sapp: "true"
template:
metadata:
name: node-exporter
labels:
daemon: "node-exporter"
grafanak8sapp: "true"
spec:
volumes:
- name: proc
hostPath:
path: /proc
type: ""
- name: sys
hostPath:
path: /sys
type: ""
containers:
- name: node-exporter
image: harbor.od.com/public/node-exporter:v0.15.0
args:
- --path.procfs=/host_proc
- --path.sysfs=/host_sys
ports:
- name: node-exporter
hostPort: 9100
containerPort: 9100
protocol: TCP
volumeMounts:
- name: sys
readOnly: true
mountPath: /host_sys
- name: proc
readOnly: true
mountPath: /host_proc
imagePullSecrets:
- name: harbor
restartPolicy: Always
hostNetwork: true
應用資源配置清單
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/node-exporter/daemonset.yaml
daemonset.extensions/node-exporter created
檢查驗證
[root@k8s7-21.host.com ~]# netstat -lntup|grep 9100
tcp6 0 0 :::9100 :::* LISTEN 84172/node_exporter
[root@k8s7-21.host.com ~]# kubectl get pods -n kube-system -l daemon="node-exporter" -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
node-exporter-kbkt4 1/1 Running 0 2m46s 10.4.7.22 k8s7-22.host.com <none> <none>
node-exporter-p7gsp 1/1 Running 0 2m46s 10.4.7.21 k8s7-21.host.com <none> <none>
[root@k8s7-21.host.com ~]# curl -s 10.4.7.21:9100/metrics |head
# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds{quantile="0"} 0
go_gc_duration_seconds{quantile="0.25"} 0
go_gc_duration_seconds{quantile="0.5"} 0
go_gc_duration_seconds{quantile="0.75"} 0
go_gc_duration_seconds{quantile="1"} 0
go_gc_duration_seconds_sum 0
go_gc_duration_seconds_count 0
# HELP go_goroutines Number of goroutines that currently exist.
交付cadvisor
準備基礎映象
[root@k8s7-200.host.com /opt/src]# docker load -i cadvisor-v0.28.3.tar
[root@k8s7-200.host.com /opt/src]# docker image tag 75f88e3ec333 harbor.od.com/public/cadvisor-v0.28.3
[root@k8s7-200.host.com /opt/src]# docker push harbor.od.com/public/cadvisor-v0.28.3
準備資源配置清單
[root@k8s7-200.host.com /data/k8s-yaml/cadvisor]# cat daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cadvisor
namespace: kube-system
labels:
app: cadvisor
spec:
selector:
matchLabels:
name: cadvisor
template:
metadata:
labels:
name: cadvisor
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: cadvisor
image: harbor.od.com/public/cadvisor:v0.28.3
imagePullPolicy: IfNotPresent
volumeMounts:
- name: rootfs
mountPath: /rootfs
readOnly: true
- name: var-run
mountPath: /var/run
- name: sys
mountPath: /sys
readOnly: true
- name: docker
mountPath: /var/lib/docker
readOnly: true
ports:
- name: http
containerPort: 4194
protocol: TCP
readinessProbe:
tcpSocket:
port: 4194
initialDelaySeconds: 5
periodSeconds: 10
args:
- --housekeeping_interval=10s
- --port=4194
terminationGracePeriodSeconds: 30
volumes:
- name: rootfs
hostPath:
path: /
- name: var-run
hostPath:
path: /var/run
- name: sys
hostPath:
path: /sys
- name: docker
hostPath:
path: /data/docker
其它知識:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
汙點容忍(可以容忍)
打汙點
[root@k8s7-22.host.com ~]# kubectl taint node k8s7-21.host.com node-role.kubernetes.io/master=master:NoSchedule
node/k8s7-21.host.com tainted
修改運算節點軟連線
[root@hdss7-21 ~]# mount -o remount,rw /sys/fs/cgroup/
[root@hdss7-21 ~]# ln -s /sys/fs/cgroup/cpu,cpuacct /sys/fs/cgroup/cpuacct,cpu
[root@hdss7-21 ~]# ll /sys/fs/cgroup/ | grep cpu
total 0
lrwxrwxrwx 1 root root 11 Jan 28 22:41 cpu -> cpu,cpuacct
lrwxrwxrwx 1 root root 11 Jan 28 22:41 cpuacct -> cpu,cpuacct
lrwxrwxrwx 1 root root 27 May 5 11:15 cpuacct,cpu -> /sys/fs/cgroup/cpu,cpuacct/
drwxr-xr-x 8 root root 0 Apr 26 11:06 cpu,cpuacct
drwxr-xr-x 7 root root 0 Jan 28 22:41 cpuset
刪除汙點
[root@k8s7-21.host.com ~]# kubectl taint node k8s7-21.host.com node-role.kubernetes.io/master-
應用資源配置清單
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/cadvisor/daemonset.yaml
daemonset.apps/cadvisor created
檢查驗證
交付blackbox-exporter
準備基礎映象
https://hub.docker.com/r/prom/blackbox-exporter
https://github.com/prometheus/blackbox_exporter
[root@k8s7-200.host.com /opt/src]# docker image tag 81b70b6158be harbor.od.com/public/blackbox-exporter:v0.15.1
[root@k8s7-200.host.com /opt/src]# docker push harbor.od.com/public/blackbox-exporter:v0.15.1
準備資源配置清單
[root@k8s7-200.host.com /data/k8s-yaml/blackbox-exporter]# cat configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: blackbox-exporter
name: blackbox-exporter
namespace: kube-system
data:
blackbox.yml: |-
modules:
http_2xx:
prober: http
timeout: 2s
http:
valid_http_versions: ["HTTP/1.1", "HTTP/2"]
valid_status_codes: [200,301,302]
method: GET
preferred_ip_protocol: "ip4"
tcp_connect:
prober: tcp
timeout: 2s
[root@k8s7-200.host.com /data/k8s-yaml/blackbox-exporter]# cat deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: blackbox-exporter
namespace: kube-system
labels:
app: blackbox-exporter
annotations:
deployment.kubernetes.io/revision: 1
spec:
replicas: 1
selector:
matchLabels:
app: blackbox-exporter
template:
metadata:
labels:
app: blackbox-exporter
spec:
volumes:
- name: config
configMap:
name: blackbox-exporter
defaultMode: 420
containers:
- name: blackbox-exporter
image: harbor.od.com/public/blackbox-exporter:v0.15.1
imagePullPolicy: IfNotPresent
args:
- --config.file=/etc/blackbox_exporter/blackbox.yml
- --log.level=info
- --web.listen-address=:9115
ports:
- name: blackbox-port
containerPort: 9115
protocol: TCP
resources:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 100m
memory: 50Mi
volumeMounts:
- name: config
mountPath: /etc/blackbox_exporter
readinessProbe:
tcpSocket:
port: 9115
initialDelaySeconds: 5
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
[root@k8s7-200.host.com /data/k8s-yaml/blackbox-exporter]# cat service.yaml
# 沒有指定targetPort是因為Pod中暴露埠名稱為 blackbox-port
apiVersion: v1
kind: Service
metadata:
name: blackbox-exporter
namespace: kube-system
spec:
selector:
app: blackbox-exporter
ports:
- name: blackbox-port
protocol: TCP
port: 9115
[root@k8s7-200.host.com /data/k8s-yaml/blackbox-exporter]# cat ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: blackbox-exporter
namespace: kube-system
spec:
rules:
- host: blackbox.od.com
http:
paths:
- path: /
backend:
serviceName: blackbox-exporter
servicePort: blackbox-port
應用資源配置清單
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/configmap.yaml
configmap/blackbox-exporter created
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/deployment.yaml
deployment.extensions/blackbox-exporter created
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/service.yaml
service/blackbox-exporter created
[root@k8s7-22.host.com ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/ingress.yaml
ingress.extensions/blackbox-exporter created
DNS解析
[root@k8s7-11.host.name ~]# tail /var/named/od.com.zone
blackbox A 10.4.7.10
檢查驗證
相關文章
- 【K8S運維知識彙總】第6天6:dubbo服務提供者連線ApolloK8S運維
- Elasticsearch叢集運維相關知識Elasticsearch運維
- k8s叢集自動化維護PODK8S
- k8s命令彙總K8S
- 京東雲開發者|IoT運維 - 如何部署一套高可用K8S叢集運維K8S
- k8s之叢集管理K8S
- 多k8s叢集管理K8S
- k8s 叢集升級K8S
- 刪除k8s叢集K8S
- Mac + Docker + K8S 本地搭建K8S叢集MacDockerK8S
- web叢集都有哪些學習知識?Linux運維技術WebLinux運維
- 【IT運維小知識】如何通俗理解節點、叢集以及主從?運維
- 純手工搭建k8s叢集-(2)核心模組部署K8S
- Java知識彙總——思維導圖Java
- k8s知識點K8S
- mysql 下字符集知識彙總MySql
- kubeadm部署K8S叢集K8S
- prometheus監控k8s叢集PrometheusK8S
- Ubuntu 安裝k8s叢集UbuntuK8S
- Ansible部署K8s叢集K8S
- centos安裝k8s叢集CentOSK8S
- 教你如何搭建K8S叢集。K8S
- k8s——搭建叢集環境K8S
- python管理k8s叢集PythonK8S
- K8s叢集證書更新K8S
- 用 edgeadm 一鍵安裝邊緣 K8s 叢集和原生 K8s 叢集K8S
- k8s 部署生產vault叢集K8S
- 如何配置K8S儲存叢集?K8S
- 在K8S上搭建Redis叢集K8SRedis
- Kubeadm方式搭建K8S叢集K8S
- k8s叢集搭建--kubeadm方式K8S
- 關於k8s叢集容器日誌收集的總結K8S
- Redis Cluster叢集知識學習總結Redis
- k8s 相關問題彙總K8S
- 【K8S】基於單Master節點安裝K8S叢集K8SAST
- 簡單的方式搭建k8s叢集K8S
- 高階k8s HA 叢集搭建(一)K8S
- 如何使用 Kind 快速建立 K8s 叢集?K8S