k8s使用ceph實現動態持久化儲存

店家小二發表於2018-12-17

簡介

本文章介紹如何使用ceph為k8s提供動態申請pv的功能。ceph 提供底層儲存功能,cephfs方式支援k8s的pv的3種訪問模式ReadWriteOnce,ReadOnlyMany ,ReadWriteMany ,RBD支援ReadWriteOnce,ReadOnlyMany兩種模式

訪問模式只是能力描述,並不是強制執行的,對於沒有按pvc宣告的方式使用pv,儲存提供者應該負責訪問時的執行錯誤。例如如果設定pvc的訪問模式為ReadOnlyMany ,pod掛載後依然可寫,如果需要真正的不可寫,申請pvc是需要指定 readOnly: true 引數

部署

部署k8s

centos7使用kubeadm安裝k8s-1.11版本

部署ceph

centos7安裝ceph分散式儲存叢集

在k8s叢集中配置使用ceph

使用Ceph RBD

使用kubeadm安裝叢集的額外配置
# 如果使用kubeadm部署的叢集需要這些額外的步驟 # 由於使用動態儲存時 controller-manager 需要使用 rbd 命令建立 image # 所以 controller-manager 需要使用 rbd 命令 # 由於官方controller-manager映象裡沒有rbd命令 # 如果沒使用如下方式會報錯無法成功建立pvc # 相關 issue https://github.com/kubernetes/kubernetes/issues/38923
cat >external-storage-rbd-provisioner.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
 name: rbd-provisioner
 namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: rbd-provisioner
rules:
 - apiGroups: [""]
 resources: ["persistentvolumes"]
 verbs: ["get", "list", "watch", "create", "delete"]
 - apiGroups: [""]
 resources: ["persistentvolumeclaims"]
 verbs: ["get", "list", "watch", "update"]
 - apiGroups: ["storage.k8s.io"]
 resources: ["storageclasses"]
 verbs: ["get", "list", "watch"]
 - apiGroups: [""]
 resources: ["events"]
 verbs: ["create", "update", "patch"]
 - apiGroups: [""]
 resources: ["endpoints"]
 verbs: ["get", "list", "watch", "create", "update", "patch"]
 - apiGroups: [""]
 resources: ["services"]
 resourceNames: ["kube-dns"]
 verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: rbd-provisioner
subjects:
 - kind: ServiceAccount
 name: rbd-provisioner
 namespace: kube-system
roleRef:
 kind: ClusterRole
 name: rbd-provisioner
 apiGroup: rbac.authorization.k8s.io

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
 name: rbd-provisioner
 namespace: kube-system
rules:
- apiGroups: [""]
 resources: ["secrets"]
 verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
 name: rbd-provisioner
 namespace: kube-system
roleRef:
 apiGroup: rbac.authorization.k8s.io
 kind: Role
 name: rbd-provisioner
subjects:
- kind: ServiceAccount
 name: rbd-provisioner
 namespace: kube-system

---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
 name: rbd-provisioner
 namespace: kube-system
spec:
 replicas: 1
 strategy:
 type: Recreate
 template:
 metadata:
 labels:
 app: rbd-provisioner
 spec:
 containers:
 - name: rbd-provisioner
 image: "quay.io/external_storage/rbd-provisioner:v2.0.0-k8s1.11"
 env:
 - name: PROVISIONER_NAME
 value: ceph.com/rbd
 serviceAccount: rbd-provisioner
EOF
kubectl apply -f external-storage-rbd-provisioner.yaml

# 檢視狀態 等待running之後 再進行後續的操作
kubectl get pod -n kube-system
複製程式碼
配置 storageclass
# 在k8s叢集中所有節點安裝 ceph-common # 需要使用kubelet使用rdb命令map附加rbd建立的image
yum install -y ceph-common

# 建立 osd pool 在ceph的mon或者admin節點
ceph osd pool create kube 4096
ceph osd pool ls

# 建立k8s訪問ceph的使用者 在ceph的mon或者admin節點
ceph auth get-or-create client.kube mon `allow r` osd `allow class-read object_prefix rbd_children, allow rwx pool=kube` -o ceph.client.kube.keyring

# 檢視key 在ceph的mon或者admin節點
ceph auth get-key client.admin
ceph auth get-key client.kube

# 建立 admin secret # CEPH_ADMIN_SECRET 替換為 client.admin 獲取到的key export CEPH_ADMIN_SECRET=`AQBBAnRbSiSOFxAAEZXNMzYV6hsceccYLhzdWw==`
kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" 
--from-literal=key=$CEPH_ADMIN_SECRET 
--namespace=kube-system

# 在 default 名稱空間建立pvc用於訪問ceph的 secret # CEPH_KUBE_SECRET 替換為 client.kube 獲取到的key export CEPH_KUBE_SECRET=`AQBZK3VbTN/QOBAAIYi6CRLQcVevW5HM8lunOg==`
kubectl create secret generic ceph-user-secret --type="kubernetes.io/rbd" 
--from-literal=key=$CEPH_KUBE_SECRET 
--namespace=default

# 檢視 secret
kubectl get secret ceph-user-secret -o yaml
kubectl get secret ceph-secret -n kube-system -o yaml

# 配置 StorageClass # 如果使用kubeadm建立的叢集 provisioner 使用如下方式 # provisioner: ceph.com/rbd
cat >storageclass-ceph-rdb.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
 name: dynamic-ceph-rdb
provisioner: ceph.com/rbd
# provisioner: kubernetes.io/rbd
parameters:
 monitors: 11.11.11.111:6789,11.11.11.112:6789,11.11.11.113:6789
 adminId: admin
 adminSecretName: ceph-secret
 adminSecretNamespace: kube-system
 pool: kube
 userId: kube
 userSecretName: ceph-user-secret
 fsType: ext4
 imageFormat: "2"
 imageFeatures: "layering"
EOF

# 建立
kubectl apply -f storageclass-ceph-rdb.yaml

# 檢視
kubectl get sc
複製程式碼
測試使用
# 建立pvc測試
cat >ceph-rdb-pvc-test.yaml<<EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
 name: ceph-rdb-claim
spec:
 accessModes: 
 - ReadWriteOnce
 storageClassName: dynamic-ceph-rdb
 resources:
 requests:
 storage: 2Gi
EOF
kubectl apply -f ceph-rdb-pvc-test.yaml
 
# 檢視
kubectl get pvc
kubectl get pv
 
# 建立 nginx pod 掛載測試
cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
 name: nginx-pod1
 labels:
 name: nginx-pod1
spec:
 containers:
 - name: nginx-pod1
 image: nginx:alpine
 ports:
 - name: web
 containerPort: 80
 volumeMounts:
 - name: ceph-rdb
 mountPath: /usr/share/nginx/html
 volumes:
 - name: ceph-rdb
 persistentVolumeClaim:
 claimName: ceph-rdb-claim
EOF
kubectl apply -f nginx-pod.yaml
 
# 檢視
kubectl get pods -o wide
 
# 修改檔案內容
kubectl exec -ti nginx-pod1 -- /bin/sh -c `echo Hello World from Ceph RBD!!! > /usr/share/nginx/html/index.html` # 訪問測試
POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk `{print $(NF-1)}`)
curl http://$POD_ID # 清理
kubectl delete -f nginx-pod.yaml
kubectl delete -f ceph-rdb-pvc-test.yaml
複製程式碼

使用 CephFS

linux核心需要4.10+,否則會出現無法正常使用的問題,詳細issue資訊 github.com/kubernetes-… centos7升級核心

在ceph叢集建立CephFS
# 如下操作在ceph的mon或者admin節點 # CephFS需要使用兩個Pool來分別儲存資料和後設資料
ceph osd pool create fs_data 128
ceph osd pool create fs_metadata 128
ceph osd lspools

# 建立一個CephFS
ceph fs new cephfs fs_metadata fs_data

# 檢視
ceph fs ls
複製程式碼
部署cephfs-provisioner
# 官方沒有cephfs動態卷支援 # 使用社群提供的cephfs-provisioner
cat >external-storage-cephfs-provisioner.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
 name: cephfs-provisioner
 namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: cephfs-provisioner
rules:
 - apiGroups: [""]
 resources: ["persistentvolumes"]
 verbs: ["get", "list", "watch", "create", "delete"]
 - apiGroups: [""]
 resources: ["persistentvolumeclaims"]
 verbs: ["get", "list", "watch", "update"]
 - apiGroups: ["storage.k8s.io"]
 resources: ["storageclasses"]
 verbs: ["get", "list", "watch"]
 - apiGroups: [""]
 resources: ["events"]
 verbs: ["create", "update", "patch"]
 - apiGroups: [""]
 resources: ["endpoints"]
 verbs: ["get", "list", "watch", "create", "update", "patch"]
 - apiGroups: [""]
 resources: ["secrets"]
 verbs: ["create", "get", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: cephfs-provisioner
subjects:
 - kind: ServiceAccount
 name: cephfs-provisioner
 namespace: kube-system
roleRef:
 kind: ClusterRole
 name: cephfs-provisioner
 apiGroup: rbac.authorization.k8s.io

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
 name: cephfs-provisioner
 namespace: kube-system
rules:
 - apiGroups: [""]
 resources: ["secrets"]
 verbs: ["create", "get", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
 name: cephfs-provisioner
 namespace: kube-system
roleRef:
 apiGroup: rbac.authorization.k8s.io
 kind: Role
 name: cephfs-provisioner
subjects:
- kind: ServiceAccount
 name: cephfs-provisioner
 namespace: kube-system

---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
 name: cephfs-provisioner
 namespace: kube-system
spec:
 replicas: 1
 strategy:
 type: Recreate
 template:
 metadata:
 labels:
 app: cephfs-provisioner
 spec:
 containers:
 - name: cephfs-provisioner
 image: "quay.io/external_storage/cephfs-provisioner:v2.0.0-k8s1.11"
 env:
 - name: PROVISIONER_NAME
 value: ceph.com/cephfs
 command:
 - "/usr/local/bin/cephfs-provisioner"
 args:
 - "-id=cephfs-provisioner-1"
 serviceAccount: cephfs-provisioner
EOF
kubectl apply -f external-storage-cephfs-provisioner.yaml

# 檢視狀態 等待running之後 再進行後續的操作
kubectl get pod -n kube-system
複製程式碼

#####配置 storageclass

# 檢視key 在ceph的mon或者admin節點
ceph auth get-key client.admin

# 建立 admin secret # CEPH_ADMIN_SECRET 替換為 client.admin 獲取到的key # 如果在測試 ceph rbd 方式已經新增 可以略過此步驟 export CEPH_ADMIN_SECRET=`AQBBAnRbSiSOFxAAEZXNMzYV6hsceccYLhzdWw==`
kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" 
--from-literal=key=$CEPH_ADMIN_SECRET 
--namespace=kube-system

# 檢視 secret
kubectl get secret ceph-secret -n kube-system -o yaml

# 配置 StorageClass
cat >storageclass-cephfs.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
 name: dynamic-cephfs
provisioner: ceph.com/cephfs
parameters:
 monitors: 11.11.11.111:6789,11.11.11.112:6789,11.11.11.113:6789
 adminId: admin
 adminSecretName: ceph-secret
 adminSecretNamespace: "kube-system"
 claimRoot: /volumes/kubernetes
EOF

# 建立
kubectl apply -f storageclass-cephfs.yaml

# 檢視
kubectl get sc
複製程式碼
測試使用
# 建立pvc測試
cat >cephfs-pvc-test.yaml<<EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
 name: cephfs-claim
spec:
 accessModes: 
 - ReadWriteOnce
 storageClassName: dynamic-cephfs
 resources:
 requests:
 storage: 2Gi
EOF
kubectl apply -f cephfs-pvc-test.yaml
 
# 檢視
kubectl get pvc
kubectl get pv
 
# 建立 nginx pod 掛載測試
cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
 name: nginx-pod1
 labels:
 name: nginx-pod1
spec:
 containers:
 - name: nginx-pod1
 image: nginx:alpine
 ports:
 - name: web
 containerPort: 80
 volumeMounts:
 - name: cephfs
 mountPath: /usr/share/nginx/html
 volumes:
 - name: cephfs
 persistentVolumeClaim:
 claimName: cephfs-claim
EOF
kubectl apply -f nginx-pod.yaml
 
# 檢視
kubectl get pods -o wide
 
# 修改檔案內容
kubectl exec -ti nginx-pod1 -- /bin/sh -c `echo Hello World from CephFS!!! > /usr/share/nginx/html/index.html` # 訪問測試
POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk `{print $(NF-1)}`)
curl http://$POD_ID # 清理
kubectl delete -f nginx-pod.yaml
kubectl delete -f cephfs-pvc-test.yaml

本文轉自掘金-k8s使用ceph實現動態持久化儲存

相關文章