Introduction to the

This article introduces how to use CEPH to provide dynamic pv application function for K8S. Ceph provides the underlying storage function. Cephfs supports three access modes of K8S PV: ReadWriteOnce, ReadOnlyMany, ReadWriteMany, and RBD supports ReadWriteOnce and ReadOnlyMany

The access pattern is only a capability description and is not enforced, and the storage provider is responsible for running errors when accessing a PV that is not used as declared by the PVC. For example, if the PVC access mode is set to ReadOnlyMany, the POD can still be writable after mounting. If the WRITable mode is required, you need to specify readOnly: true when applying for a PVC

The deployment of

Deploy k8s

Centos7 uses kubeadm to install version k8s-1.11

The deployment of ceph

Centos7 install the ceph distributed storage cluster

Configure using CEPh in K8S cluster

The use of Ceph RBD

Use kubeadm to install additional configurations for the cluster
# If the cluster deployed using kubeadm requires these additional steps
Controller-manager creates an image using the RBD command
So controller-manager needs to use the RBD command
There is no RBD command in the official controller-manager image
# If the following method is not used, an error will be reported and the PVC cannot be successfully created
Related issue # https://github.com/kubernetes/kubernetes/issues/38923
cat >external-storage-rbd-provisioner.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: rbd-provisioner
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get"."list"."watch"."create"."delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get"."list"."watch"."update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get"."list"."watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create"."update"."patch"]
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get"."list"."watch"."create"."update"."patch"]
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["kube-dns"]
    verbs: ["list"."get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
subjects:
  - kind: ServiceAccount
    name: rbd-provisioner
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: rbd-provisioner
  apiGroup: rbac.authorization.k8s.io

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: rbd-provisioner
  namespace: kube-system
rules:
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: rbd-provisioner
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: rbd-provisioner
subjects:
- kind: ServiceAccount
  name: rbd-provisioner
  namespace: kube-system

---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: rbd-provisioner
  namespace: kube-system
spec:
  replicas: 1
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: rbd-provisioner
    spec:
      containers:
      - name: rbd-provisioner
        image: "Quay. IO/external_storage/RBD - provisioner: v2.0.0 - k8s1. 11"
        env:
        - name: PROVISIONER_NAME
          value: ceph.com/rbd
      serviceAccount: rbd-provisioner
EOF
kubectl apply -f external-storage-rbd-provisioner.yaml

# Check the status and wait for running before performing subsequent operations
kubectl get pod -n kube-system
Copy the code
Configuration storageclass
Install cceph-common on all nodes in the K8S cluster
Kubelet = map; kubelet = map
yum install -y ceph-common

Create osd pools on the mon or admin node of ceph
ceph osd pool create kube 4096
ceph osd pool ls

Create k8s user to ceph on mon or admin node of ceph
ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=kube' -o ceph.client.kube.keyring

Ceph key on mon or admin node
ceph auth get-key client.admin
ceph auth get-key client.kube

Create admin secret
# CEPH_ADMIN_SECRET is replaced with the key obtained by client.admin
export CEPH_ADMIN_SECRET='AQBBAnRbSiSOFxAAEZXNMzYV6hsceccYLhzdWw=='
kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_ADMIN_SECRET \
--namespace=kube-system

Create secret in the default namespace for PVC to access ceph
# CEPH_KUBE_SECRET replaces the key obtained by client. Kube
export CEPH_KUBE_SECRET='AQBZK3VbTN/QOBAAIYi6CRLQcVevW5HM8lunOg=='
kubectl create secret generic ceph-user-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_KUBE_SECRET \
--namespace=default

# to check the secret
kubectl get secret ceph-user-secret -o yaml
kubectl get secret ceph-secret -n kube-system -o yaml

# configuration StorageClass
If you created a cluster provisioner using kubeadm, use the following
# provisioner: ceph.com/rbd
cat >storageclass-ceph-rdb.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: dynamic-ceph-rdb
provisioner: ceph.com/rbd
# provisioner: kubernetes.io/rbdThe parameters: monitors: 11.11.11.111:6789,11.11. 11.112:6789,11.11. 11.113:6789 adminId: admin adminSecretName: ceph-secret adminSecretNamespace: kube-system pool: kube userId: kube userSecretName: ceph-user-secret fsType: ext4 imageFormat:"2"
  imageFeatures: "layering"
EOF

# to create
kubectl apply -f storageclass-ceph-rdb.yaml

# check
kubectl get sc
Copy the code
Test using
Create a PVC test
cat >ceph-rdb-pvc-test.yaml<<EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: ceph-rdb-claim
spec:
  accessModes:     
    - ReadWriteOnce
  storageClassName: dynamic-ceph-rdb
  resources:
    requests:
      storage: 2Gi
EOF
kubectl apply -f ceph-rdb-pvc-test.yaml
 
# check
kubectl get pvc
kubectl get pv
 
Create nginx pod mount test
cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
  name: nginx-pod1
  labels:
    name: nginx-pod1
spec:
  containers:
  - name: nginx-pod1
    image: nginx:alpine
    ports:
    - name: web
      containerPort: 80
    volumeMounts:
    - name: ceph-rdb
      mountPath: /usr/share/nginx/html
  volumes:
  - name: ceph-rdb
    persistentVolumeClaim:
      claimName: ceph-rdb-claim
EOF
kubectl apply -f nginx-pod.yaml
 
# check
kubectl get pods -o wide
 
# Modify file contents
kubectl exec -ti nginx-pod1 -- /bin/sh -c 'echo Hello World from Ceph RBD!!! > /usr/share/nginx/html/index.html'
 
# Access test
POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk '{print $(NF-1)}')
curl http://$POD_ID

# to clean up
kubectl delete -f nginx-pod.yaml
kubectl delete -f ceph-rdb-pvc-test.yaml
Copy the code

Use the CephFS

Linux kernel needs 4.10+, otherwise it will not work properly, detailed issue information github.com/kubernetes-… Centos7 upgraded the kernel

Create CephFS in the CEPH cluster
The following operations are performed on the mon or admin node of ceph

# CephFS requires two pools to store data and metadata separately
ceph osd pool create fs_data 128
ceph osd pool create fs_metadata 128
ceph osd lspools

Create a CephFS
ceph fs new cephfs fs_metadata fs_data

# check
ceph fs ls
Copy the code
Deploy cephfs – provisioner
There is no official cePHFS dynamic volume support
# Use cephfs-provisioner provided by the community
cat >external-storage-cephfs-provisioner.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: cephfs-provisioner
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-provisioner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get"."list"."watch"."create"."delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get"."list"."watch"."update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get"."list"."watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create"."update"."patch"]
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get"."list"."watch"."create"."update"."patch"]
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["create"."get"."delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-provisioner
subjects:
  - kind: ServiceAccount
    name: cephfs-provisioner
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: cephfs-provisioner
  apiGroup: rbac.authorization.k8s.io

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: cephfs-provisioner
  namespace: kube-system
rules:
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["create"."get"."delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: cephfs-provisioner
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: cephfs-provisioner
subjects:
- kind: ServiceAccount
  name: cephfs-provisioner
  namespace: kube-system

---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: cephfs-provisioner
  namespace: kube-system
spec:
  replicas: 1
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: cephfs-provisioner
    spec:
      containers:
      - name: cephfs-provisioner
        image: "Quay. IO/external_storage/cephfs - provisioner: v2.0.0 - k8s1. 11"
        env:
        - name: PROVISIONER_NAME
          value: ceph.com/cephfs
        command:
        - "/usr/local/bin/cephfs-provisioner"
        args:
        - "-id=cephfs-provisioner-1"
      serviceAccount: cephfs-provisioner
EOF
kubectl apply -f external-storage-cephfs-provisioner.yaml

# Check the status and wait for running before performing subsequent operations
kubectl get pod -n kube-system
Copy the code

# # # # # configuration storageclass

Ceph key on mon or admin node
ceph auth get-key client.admin

Create admin secret
# CEPH_ADMIN_SECRET is replaced with the key obtained by client.admin
You can skip this step if cepH RBD mode has been added in the test
export CEPH_ADMIN_SECRET='AQBBAnRbSiSOFxAAEZXNMzYV6hsceccYLhzdWw=='
kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_ADMIN_SECRET \
--namespace=kube-system

# to check the secret
kubectl get secret ceph-secret -n kube-system -o yaml

# configuration StorageClasscat >storageclass-cephfs.yaml<<EOF kind: StorageClass apiVersion: storage.k8s.io/v1 metadata: name: dynamic-cephfs provisioner: ceph.com/cephfs parameters: monitors: 11.11.11.111:6789,11.11. 11.112:6789,11.11. 11.113:6789 adminId: admin adminSecretName: ceph - secret adminSecretNamespace:"kube-system"
    claimRoot: /volumes/kubernetes
EOF

# to create
kubectl apply -f storageclass-cephfs.yaml

# check
kubectl get sc
Copy the code
Test using
Create a PVC test
cat >cephfs-pvc-test.yaml<<EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: cephfs-claim
spec:
  accessModes:     
    - ReadWriteOnce
  storageClassName: dynamic-cephfs
  resources:
    requests:
      storage: 2Gi
EOF
kubectl apply -f cephfs-pvc-test.yaml
 
# check
kubectl get pvc
kubectl get pv
 
Create nginx pod mount test
cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
  name: nginx-pod1
  labels:
    name: nginx-pod1
spec:
  containers:
  - name: nginx-pod1
    image: nginx:alpine
    ports:
    - name: web
      containerPort: 80
    volumeMounts:
    - name: cephfs
      mountPath: /usr/share/nginx/html
  volumes:
  - name: cephfs
    persistentVolumeClaim:
      claimName: cephfs-claim
EOF
kubectl apply -f nginx-pod.yaml
 
# check
kubectl get pods -o wide
 
# Modify file contents
kubectl exec -ti nginx-pod1 -- /bin/sh -c 'echo Hello World from CephFS!!! > /usr/share/nginx/html/index.html'
 
# Access test
POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk '{print $(NF-1)}')
curl http://$POD_ID

# to clean up
kubectl delete -f nginx-pod.yaml
kubectl delete -f cephfs-pvc-test.yaml
Copy the code

Reference documentation

  • Kubernetes. IO/docs/concep…
  • Docs.openshift.com/container-p…
  • Ieevee.com/tech/2018/0…
  • Github.com/kubernetes-…
  • Github.com/kubernetes-…
  • Github.com/heketi/heke…
  • Github.com/gluster/glu…
  • Github.com/gluster/glu…
  • Jimmysong. IO/kubernetes -…
  • Kubernetes. IO/docs/concep…
  • Docs.openshift.com/enterprise/…