I. Introduction to the basic environment

  • Ali Cloud Server
  • Centos 7.9
[root@master ~]# cat /etc/hosts ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 127.0.0.1 Localhost localhost.localdomain localhost4 localhost4. Localdomain4 192.168.11.61 Master 192.168.11.62 node1 192.168.11.63 node2 # Storage 192.168.11.64 Node3 # StorageCopy the code

Heketi installed glusterFS configuration

1. Cluster disk information configured by heketi

[root@node1 ~]# heketi-cli --server http://localhost:8080 --user admin --secret admin@key topology info Cluster Id: 55c5c02540458dae0414e622ba063f0d File: true Block: true Volumes: Nodes: Node Id: 51456a927d994b71d0aa20d3eceed117 State: online Cluster Id: 55c5c02540458dae0414e622ba063f0d Zone: Management Hostnames: 192.168.11.63 Storage Hostnames: 192.168.11.63 Devices: Id:37fd5355b3bd1d51520e180600d0a2ad Name:/dev/vdb State:online Size (GiB):19 Used (GiB):0 Free (GiB):19 Bricks: Node Id: 706006259ae1e5b1de5e162339dfaddd State: online Cluster Id: 55c5c02540458dae0414e622ba063f0d Zone: 3 Management Hostnames: 192.168.11.64 Storage Hostnames: 192.168.11.64 Devices: Id:326cd6fa573e44e04421588654311f7e Name:/dev/vdb State:online Size (GiB):19 Used (GiB):0 Free (GiB):19 Bricks: Node Id: 828affec2ec522d9a99a2f2bd2f34761 State: online Cluster Id: 55c5c02540458dae0414e622ba063f0d Zone: 1 Management Hostnames: 192.168.11.62 Storage Hostnames: 192.168.11.62 Devices: Id:3632131cfe647d5d3b216bc36b883cfc Name:/dev/vdb State:online Size (GiB):19 Used (GiB):0 Free (GiB):19 Bricks:Copy the code

Glusterfs’ K8S dynamically stores information

[root@master ~]# cat gluster-heketi-storageclass.yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: glusterfs provisioner: kubernetes.io/glusterfs allowVolumeExpansion: true reclaimPolicy: Delete parameters: resturl: "Http://192.168.11.62:8080" restauthenabled: "true" restuser: "admin" secretNamespace: "default" secretName: "heketi-secret" volumetype: "Replicate :3" # Replicate 3, One copy of data per server [root@master ~]# kubectl get SC NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE glusterfs kubernetes.io/glusterfs Delete Immediate true 62sCopy the code

Iii. Tested PVC and POD resource files

  • test-pvc.yaml
[root@master ~]# cat test-pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-glusterfs
  annotations:
    volume.beta.kubernetes.io/storage-class: "glusterfs"
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Gi
Copy the code
  • pv-pod.yaml
[root@master ~]# cat pv-pod.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: task-pv-pod
spec:
  volumes:
    - name: task-pv-storage
      persistentVolumeClaim:
        claimName: test-glusterfs
  containers:
    - name: task-pv-container
      image: nginx:alpine
      ports:
        - containerPort: 80
          name: "http-server"
      volumeMounts:
        - mountPath: "/usr/share/nginx/html"
          name: task-pv-storage
Copy the code
  • To view
[root@master ~]# kubectl get pod NAME READY STATUS RESTARTS AGE task-pv-pod 1/1 Running 0 35s [root@master ~]# kubectl exec -it task-pv-pod sh kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl Exec [POD] -- [COMMAND] instead. / # df -h Filesystem Size Used Available Use% Mounted on overlay 39.2g 3.8g 3.6g 10% / TMPFS 64.0m 0 64.0m 0% /dev TMPFS 1.8g 0 1.8g 0% /sys/fs/cgroup /dev/vda1 39.2g 3.8g 33.6g 10% /dev/termination-log /dev/vda1 39.2g 3.8G 33.6G 10% /etc/resolv.conf /dev/vda1 39.2g 3.8G 33.6G 10% /etc/hostname /dev/vda1 39.2g 3.8G 33.6G 10% / etc/hosts SHM 64.0 M 64.0 M 0 0% / dev/SHM 192.168.11.64:4% vol_0c01bc93655b4ec989ee2372ca1dbbcf 1014.0 M, 42.8 M, 971.2 M / usr/share/TMPFS nginx/HTML 1.8 G 1.8 G 0% 12.0 K/run/secrets/kubernetes. IO/serviceaccount TMPFS 1.8 G 1.8 G 0 0% / proc/acpi TMPFS 64.0m 0 64.0m 0% /proc/kcore TMPFS 64.0m 0 64.0m 0% /proc/keys TMPFS 64.0m 0 64.0m 0% /proc/timer_list TMPFS 64.0m 0 64.0m 0% /proc/timer_stats TMPFS 64.0m 0 64.0m 0% /proc/sched_debug TMPFS 1.8G 0 1.8G 0% /proc/scsi TMPFS 1.8G 0 1.8 G 0% / sys/firmwareCopy the code

Test GlusterFS

1. Test pod writes and view data from all GlusterFS nodes

  • Operating in the pod
/usr/share/nginx/html # seq 10 > index.html
Copy the code
  • View the server data

[root@node1 ~]# ll /var/lib/heketi/mounts/vg_3632131cfe647d5d3b216bc36b883cfc/brick_6a7b9cc35798803c2ea0d7529992fdde/brick
total 4
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
[root@node2 ~]# ll /var/lib/heketi/mounts/vg_37fd5355b3bd1d51520e180600d0a2ad/brick_f03e793ffbcc6b2db4283d03df4dcc26/brick
total 4
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
[root@node3 ~]# ll /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick
total 4
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html

Copy the code

2. After stopping the node3 server, test write

  • Actions in POD after the console stops the Node3 server
/usr/share/nginx/html # echo down >node3. TXT The write will get stuck. It'll get stuck for a whileCopy the code
  • View data on node1 and Node2 servers (data can be written properly)
[root@node1 ~]# ll /var/lib/heketi/mounts/vg_3632131cfe647d5d3b216bc36b883cfc/brick_6a7b9cc35798803c2ea0d7529992fdde/brick
total 8
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
-rw-r--r-- 2 root 2000  5 Jun 11 10:33 node3.txt
[root@node2 ~]# ll /var/lib/heketi/mounts/vg_37fd5355b3bd1d51520e180600d0a2ad/brick_f03e793ffbcc6b2db4283d03df4dcc26/brick
total 8
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
-rw-r--r-- 2 root 2000  5 Jun 11 10:33 node3.txt

Copy the code

3. Stop the node3 server. Test new PVC

  • The new PVC
  • Conclusion: When the replication number of GlusterFS is set to 3 and there are only 3 storage servers. One server is disconnected. The newly built PVC cannot be used (it has been in the Pending state)
[root@master ~]# cat  test1-pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test1
  annotations:
    volume.beta.kubernetes.io/storage-class: "glusterfs"
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Gi

[root@master ~]# kubectl apply -f test1-pvc.yaml 
persistentvolumeclaim/test1 created
[root@master ~]# kubectl get pvc
NAME             STATUS    VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
test-glusterfs   Bound     pvc-8552824c-0632-4ccd-a89c-22d0bcf37146   1Gi        RWX            glusterfs      19m
test1            Pending     
Copy the code

4. Start the Node3 server and check whether the data is synchronized

  • After starting the node3 server, view the files on the node3 server
  • Conclusion: Data can be synchronized after glusterFS data node is restarted
[root@node3 ~]# ll /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick
total 8
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
-rw-r--r-- 2 root 2000  5 Jun 11 10:33 node3.txt
[root@node3 ~]# cat  /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick/node3.txt 
down
Copy the code

5. Write data to POD and view node3 data

  • Pod operation
  • Conclusion: Data can be synchronized normally
[root@master ~]# kubectl exec -it task-pv-pod  sh 
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # cd /usr/share/nginx/html
/usr/share/nginx/html # seq 5 > node3_test.txt
Copy the code
  • Node3 Server view
[root@node3 ~]# ll /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick/node3_test.txt 
-rw-r--r-- 2 root 2000 10 Jun 11 10:43 /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick/node3_test.txt
Copy the code

6. Check Pending PVC status after node3 is started

  • Check the PVC
  • Conclusion: The previously Pending PVC becomes normal
[root@master ~]# kubectl get pvc
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
test-glusterfs   Bound    pvc-8552824c-0632-4ccd-a89c-22d0bcf37146   1Gi        RWX            glusterfs      27m
test1            Bound    pvc-37906249-0572-459c-bed1-36eee19d7c25   1Gi        RWX            glusterfs      8m49s
Copy the code

7. Test to create PVC after starting node3 server

  • Creating a new PVC
  • Conclusion: PVC can be created and bound normally
[root@master ~]# cp test1-pvc.yaml test2-pvc.yaml [root@master ~]# sed -i "s#test1#test2#g" test2-pvc.yaml [root@master ~]# kubectl apply -f test2-pvc.yaml persistentvolumeclaim/test2 created [root@master ~]# kubectl apply -f test2-pvc.yaml  persistentvolumeclaim/test2 created [root@master ~]# kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE test-glusterfs Bound pvc-8552824c-0632-4ccd-a89c-22d0bcf37146 1Gi RWX glusterfs 29m test1 Bound pvc-37906249-0572-459c-bed1-36eee19d7c25 1Gi RWX glusterfs 10m test2 Bound pvc-d33240a5-4688-483c-adf4-79628675229e 1Gi RWX glusterfs 14sCopy the code

8. Stop the server test write on node2 and node3

  • Stop the operations after node2 and Node3 servers
  • Conclusion: Stop two servers. The GlusterFS storage is unavailable
[root@master ~]# kubectl exec -it task-pv-pod sh kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a Use kubectl exec [POD] -- [COMMAND] instead. / # CD /usr/share/nginx/html Sh: CD: can't CD to /usr/share/nginx/html: Socket not connected # The mount directory cannot be accessedCopy the code

9. Start node2 and Node3 servers. Test into the POD create file

  • Pod operation after node2 and Node3 servers are started
[root@master ~]# kubectl exec -it task-pv-pod  sh 
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # cd /usr/share/nginx/html
/usr/share/nginx/html # echo   node2 node3 > up.txt

Copy the code
  • View data on node2 and node3 servers
[root@node2 ~]# ll /var/lib/heketi/mounts/vg_37fd5355b3bd1d51520e180600d0a2ad/brick_f03e793ffbcc6b2db4283d03df4dcc26/brick/
total 16
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
-rw-r--r-- 2 root 2000 10 Jun 11 10:43 node3_test.txt
-rw-r--r-- 2 root 2000  5 Jun 11 10:33 node3.txt
-rw-r--r-- 2 root 2000 12 Jun 11 10:52 up.txt
[root@node2 ~]# cat  /var/lib/heketi/mounts/vg_37fd5355b3bd1d51520e180600d0a2ad/brick_f03e793ffbcc6b2db4283d03df4dcc26/brick/up.txt 
node2 node3

[root@node3 ~]# ll /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick
total 16
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
-rw-r--r-- 2 root 2000 10 Jun 11 10:43 node3_test.txt
-rw-r--r-- 2 root 2000  5 Jun 11 10:33 node3.txt
-rw-r--r-- 2 root 2000 12 Jun 11 10:52 up.txt
[root@node3 ~]# cat  /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick/up.txt 
node2 node3
Copy the code

10. After starting node2 and Node3 servers. Test create PVC

  • Conclusion: After the server is started, the PVC created can be bound normally
[root@master ~]# cp test2-pvc.yaml test4-pvc.yaml 
[root@master ~]# sed -i "s#test2#test4#g" test4-pvc.yaml 
[root@master ~]# kubectl  apply -f test4-pvc.yaml 
persistentvolumeclaim/test4 created
[root@master ~]# kubectl get pvc 
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
test-glusterfs   Bound    pvc-8552824c-0632-4ccd-a89c-22d0bcf37146   1Gi        RWX            glusterfs      37m
test1            Bound    pvc-37906249-0572-459c-bed1-36eee19d7c25   1Gi        RWX            glusterfs      18m
test2            Bound    pvc-d33240a5-4688-483c-adf4-79628675229e   1Gi        RWX            glusterfs      8m51s
test3            Bound    pvc-cfa37d9b-9079-43eb-a474-88d04ecc41b3   1Gi        RWX            glusterfs      4m45s
test4            Bound    pvc-1af68f5b-e00a-445b-8146-e7e8d6741653   1Gi        RWX            glusterfs      9s
Copy the code