其他分享
首页 > 其他分享> > K8S使用ceph实现持久化存储

K8S使用ceph实现持久化存储

作者:互联网

一、概述

ceph为k8s提供存储服务主要有两种方式,cephfs和ceph rdb;cephfs方式支持k8s的pv的3种访问模式ReadWriteOnce,ReadOnlyMany,ReadWriteMany ,RBD支持ReadWriteOnce,ReadOnlyMany

二、部署k8s和ceph

2.1、部署k8s集群

部署方式见《基于kubeadmin安装Kubernetes集群》

2.2、部署ceph集群

部署方式见《ceph分布式集群的搭建》

三、在k8s集群中配置ceph

3.1、使用cephfs

(1) 在ceph集群创建cephfs

#以下操作在ceph集群的admin或者mon节点上执行
#创建pool来存储数据和元数据
ceph osd pool create cephfs_data 120
ceph osd pool create cephfs_metadata 120

#创建文件系统
ceph fs new cephfs cephfs_metadata cephfs_data

#查看文件系统
ceph fs ls

(2) 部署cephfs-provisioner

# 官方没有cephfs动态卷支持
# 使用社区提供的cephfs-provisioner,在k8s集群上操作
vim external-storage-cephfs-provisioner.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: cephfs-provisioner
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-provisioner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["create", "get", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-provisioner
subjects:
  - kind: ServiceAccount
    name: cephfs-provisioner
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: cephfs-provisioner
  apiGroup: rbac.authorization.k8s.io

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: cephfs-provisioner
  namespace: kube-system
rules:
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["create", "get", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: cephfs-provisioner
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: cephfs-provisioner
subjects:
- kind: ServiceAccount
  name: cephfs-provisioner
  namespace: kube-system

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: cephfs-provisioner
  namespace: kube-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app: cephfs-provisioner
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: cephfs-provisioner
    spec:
      containers:
      - name: cephfs-provisioner
        image: "quay.io/external_storage/cephfs-provisioner:latest"
        env:
        - name: PROVISIONER_NAME
          value: ceph.com/cephfs
        command:
        - "/usr/local/bin/cephfs-provisioner"
        args:
        - "-id=cephfs-provisioner-1"
      serviceAccount: cephfs-provisioner

#应用
kubectl apply -f external-storage-cephfs-provisioner.yaml

#查看状态 等待running之后 再进行后续的操作
kubectl get pod -n kube-system

(3) 创建secret

#查看key 在ceph的mon或者admin节点
ceph auth get-key client.admin

#获得key的加密串
ceph auth get-key client.admin | base64

#在k8s集群上创建admin secret
##将key的值,替换成上面获得的加密串
vim ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret
  namespace: kube-system
#type: kubernetes.io/rbd
data:
  key: QVFCa01vSmdlNnNVQXhBQWcyczd1K3M0cDN3RERRTk54MFRQOVE9PQ==

#应用创建
kubectl apply -f ceph-secret.yaml

#查看
kubectl get secret ceph-secret -n kube-system -o yaml

(4) 配置存储类storageclass

#配置存储类storageclass
vim storageclass-cephfs.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: cephfs
provisioner: ceph.com/cephfs
parameters:
    monitors: 192.168.80.37:6789,192.168.80.47:6789
    adminId: admin
    adminSecretName: ceph-secret
    adminSecretNamespace: "kube-system"
#    claimRoot: /volumes/kubernetes
#allowVolumeExpansion: true
#reclaimPolicy: Retain
#volumeBindingMode: Immediate

#应用创建
kubectl apply -f storageclass-cephfs.yaml

#查看
kubectl get sc

(5) 创建存储类声明PVC

#在k8s集群上执行
vim cephfs-pvc-test.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: claim
spec:
  accessModes:     
    - ReadWriteMany
  storageClassName: cephfs
  resources:
    requests:
      storage: 2Gi
      
#应用创建
kubectl apply -f cephfs-pvc-test.yaml

#查看
kubectl get pvc
kubectl get pv

(6) 测试使用

#创建nginx pod挂载测试
vim nginx-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: nginx-pod
  labels:
    name: nginx-pod
spec:
  containers:
  - name: nginx-pod
    image: nginx:alpine
    ports:
    - name: web
      containerPort: 80
    volumeMounts:
    - name: cephfs
      mountPath: /usr/share/nginx/html
  volumes:
  - name: cephfs
    persistentVolumeClaim:
      claimName: claim
      
#应用创建
kubectl apply -f nginx-pod.yaml

#查看
kubectl get pods -o wide
 
#修改文件内容
kubectl exec -ti nginx-pod -- /bin/sh -c 'echo Hello World from CephFS!!! > /usr/share/nginx/html/index.html'
 
# 访问测试
POD_ID=$(kubectl get pods -o wide | grep nginx-pod | awk '{print $(NF-1)}')
curl http://$POD_ID

# 清理
kubectl delete -f nginx-pod.yaml
kubectl delete -f cephfs-pvc-test.yaml

3.2、使用ceph rdb

(1) 使用kubeadm安装集群的额外配置

如果使用kubeadm部署的集群需要进行一些额外的配置,由于使用动态存储时controller-manager需要使用 rbd 命令创建 image,所以controller-manager需要使用rbd命令,由于官方controller-manager镜像里没有rbd命令,需要安装一些额外的插件:

vim external-storage-rbd-provisioner.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: rbd-provisioner
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["kube-dns"]
    verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
subjects:
  - kind: ServiceAccount
    name: rbd-provisioner
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: rbd-provisioner
  apiGroup: rbac.authorization.k8s.io

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: rbd-provisioner
  namespace: kube-system
rules:
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: rbd-provisioner
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: rbd-provisioner
subjects:
- kind: ServiceAccount
  name: rbd-provisioner
  namespace: kube-system

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: rbd-provisioner
  namespace: kube-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app: rbd-provisioner
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: rbd-provisioner
    spec:
      containers:
      - name: rbd-provisioner
        image: "quay.io/external_storage/rbd-provisioner:latest"
        env:
        - name: PROVISIONER_NAME
          value: ceph.com/rbd
      serviceAccount: rbd-provisioner

#应用创建
kubectl apply -f external-storage-rbd-provisioner.yaml

#查看状态 等待running之后 再进行后续的操作
kubectl get pod -n kube-system

#在k8s集群中所有节点安装ceph-common
yum install -y ceph-common

(2) 在ceph集群创建pool

#创建osd pool,在ceph的admin或者mon节点
ceph osd pool create kube 4096
ceph osd pool ls

#创建k8s访问ceph的用户,在ceph的admin或者mon节点
ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=kube' -o ceph.client.kube.keyring

# 查看key,在ceph的admin或者mon节点
ceph auth get-key client.admin
ceph auth get-key client.kube

#获得key的加密串,在ceph的admin或者mon节点
ceph auth get-key client.admin | base64
ceph auth get-key client.kube | base64

(3) 创建secret

#在k8s集群上,创建secret资源
#admin secret,注意替换key的值,替换成上面获得的admin的加密串
vim ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret
  namespace: kube-system
type: kubernetes.io/rbd
data:
  key: QVFCa01vSmdlNnNVQXhBQWcyczd1K3M0cDN3RERRTk54MFRQOVE9PQ==

#kube secret,在default命名空间创建用于访问ceph的 secret
##注意替换key的值,替换成上面获得的kube的加密串
vim ceph-user-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-user-secret
  namespace: default
type: kubernetes.io/rbd
data:
  key: QVFEVU00VmdtdzJPSHhBQWlaTHlUaGZNOHhOTXRoVms0YXkwM3c9PQ==

#创建
kubectl apply -f ceph-secret.yaml
kubectl apply -f ceph-user-secret.yaml

#查看secret
kubectl get secret ceph-user-secret -o yaml
kubectl get secret ceph-secret -n kube-system -o yaml

(4) 配置存储类storageclass

#配置存储类storageclass
vim storageclass-ceph-rdb.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: dynamic-ceph-rdb
provisioner: ceph.com/rbd
# provisioner: kubernetes.io/rbd
parameters:
  monitors: 192.168.80.37:6789,192.168.80.47:6789
  adminId: admin
  adminSecretName: ceph-secret
  adminSecretNamespace: kube-system
  pool: kube
  userId: kube
  userSecretName: ceph-user-secret
  fsType: ext4
  imageFormat: "2"
  imageFeatures: "layering"
#allowVolumeExpansion: true
#reclaimPolicy: Retain
#volumeBindingMode: Immediate

#创建
kubectl apply -f storageclass-ceph-rdb.yaml

#查看
kubectl get sc

(5) 创建存储类声明PVC

#在k8s集群上执行
vim ceph-rdb-pvc-test.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: ceph-rdb-claim
  namespace: kube-system
spec:
  accessModes:     
    - ReadWriteOnce
  storageClassName: dynamic-ceph-rdb
  resources:
    requests:
      storage: 2Gi

#创建
kubectl apply -f ceph-rdb-pvc-test.yaml

# 查看
kubectl get pvc
kubectl get pv

(6) 测试使用

#创建nginx pod挂载测试
vim nginx-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: nginx-pod
  labels:
    name: nginx-pod
spec:
  containers:
  - name: nginx-pod
    image: nginx:alpine
    ports:
    - name: web
      containerPort: 80
    volumeMounts:
    - name: ceph-rdb
      mountPath: /usr/share/nginx/html
  volumes:
  - name: cephfs
    persistentVolumeClaim:
      claimName: ceph-rdb-claim
      
#应用创建
kubectl apply -f nginx-pod.yaml

#查看
kubectl get pods -o wide
 
#修改文件内容
kubectl exec -ti nginx-pod -- /bin/sh -c 'echo Hello World from Ceph RBD!!! > /usr/share/nginx/html/index.html'
 
# 访问测试
POD_ID=$(kubectl get pods -o wide | grep nginx-pod | awk '{print $(NF-1)}')
curl http://$POD_ID

# 清理
kubectl delete -f nginx-pod.yaml
kubectl delete -f cephfs-rdb-pvc-test.yaml

3.3、第三方工具

Kuboard 是 Kubernetes 的一款图形化管理界面,相关安装和配置请参考官网 https://kuboard.cn/ ,ceph存储的配置操作见 https://kuboard.cn/learning/k8s-intermediate/persistent/ceph/k8s-config.html ,需要注意的是Ceph集群版本不低于 v15.2.3,且已经在集群中创建了一个 FileSystem。

标签:kubectl,存储,cephfs,name,get,ceph,provisioner,K8S
来源: https://www.cnblogs.com/hovin/p/14746016.html