Purely for the sake of torment 🙂

1 Environment Introduction

# Storage can also be called hyperfusion

OS: Virtual Environment 6.2-4 PVE231 Host PVE118 172.16.1.118 VM 1 PVE119 172.16.1.119 VM 2 PVE143 172.16.1.143 VM 3Copy the code

The configuration of the three VMS is as follows: memory, hard disk, CPU, network configuration is the same as pVE118, ==PVE installation process is not clear, a heap of network, default installation is ok==

First make Proxmox VE host support nesting dolls

#PVE virtual host cpus do not support VMX by default, which means they do not support nested virtualization, so we need to manually enable this function

root@pve231:~# cat /sys/module/kvm_intel/parameters/nested
N
Copy the code

# Stop all pVE VMS and run the following command:

modprobe -r kvm_intel 
modprobe kvm_intel nested=1
Copy the code

# Check whether netsed is enabled

root@pve231:~# cat /sys/module/kvm_intel/parameters/nested
Y
Copy the code

Set the nested command to automatically load

echo "options kvm_intel nested=1" >> /etc/modprobe.d/modprobe.conf
Copy the code

# At this point, the pVE nesting function is opened complete;

3 Use either of the following methods to enable the doll function on the VM

3.1 Method 1: The CLI mode is enabled, but it is invalid after the restart. You are advised to write it in the configuration

Use my Centos116 as an example to view the configuration, do not support virtualization:

root@pve231:~# qm showcmd 116
/usr/bin/kvm -id 116 -name centos116 -chardev 'socket,id=qmp,path=/var/run/qemu-server/116.qmp,server,nowait' -mon 'chardev=qmp,mode=control' -chardev 'socket,id=qmp-event,path=/var/run/qmeventd.sock,reconnect=5' -mon 'chardev=qmp-event,mode=control' -pidfile /var/run/qemu-server/116.pid -daemonize -smbios 'type=1,uuid=a820085d-d873-46b2-9d4a-88aa7ed1e1b6' -smp '1,sockets=1,cores=1,maxcpus=1'-nodefaults -boot 'menu=on,strict=on,reboot-timeout=1000,splash=/usr/share/qemu-server/bootsplash.jpg' -vnc unix:/var/run/qemu-server/116.vnc,password -cpu kvm64,enforce,+kvm_pv_eoi,+kvm_pv_unhalt,+lahf_lm,+sep -m 1024 -device 'pci-bridge,id=pci.1,chassis_nr=1,bus=pci.0,addr=0x1e' -device 'pci-bridge,id=pci.2,chassis_nr=2,bus=pci.0,addr=0x1f' -device 'vmgenid,guid=a218640f-b2f4-4641-8d21-1c09037cca4b' -device 'piix3 - usb - uhci, id = uhci, pci bus =. 0, addr = 0 x1. 0 x2' -device 'usb-tablet,id=tablet,bus=uhci.0,port=1' -device 'VGA,id=vga,bus=pci.0,addr=0x2' -device 'virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x3' -iscsi 'initiator-name=iqn.1993-08.org.debian:01:c4896a5ad946' -device 'virtio-scsi-pci,id=scsihw0,bus=pci.0,addr=0x5' -drive 'file=/dev/SATA3_Disk/vm-116-disk-0,if=none,id=drive-scsi0,format=raw,cache=none,aio=native,detect-zeroes=on' -device 'the SCSI hd, bus = scsihw0.0, channel = 0, SCSI id = 0, lun = 0, drive = drive - scsi0, id = scsi0, rotation_rate = 1, bootindex = 100' -drive 'file=/dev/SATA3_Disk/vm-116-disk-1,if=none,id=drive-scsi1,format=raw,cache=none,aio=native,detect-zeroes=on' -device 'SCSI hd, bus = scsihw0.0, channel = 0, SCSI id = 0, lun = 1, drive = drive - scsi1, id = scsi1' -netdev 'type=tap,id=net0,ifname=tap116i0,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown, vhost=on' -device 'virtio-net-pci,mac=A6:59:D2:72:9E:D7,netdev=net0,bus=pci.0,addr=0x12,id=net0,bootindex=300' -machine 'type=pc+pve0'
Copy the code

# Here to find the CPU Settings

-cpu kvm64,enforce,+kvm_pv_eoi,+kvm_pv_unhalt,+lahf_lm,+sep -m 1024
# Find this section of CPU configuration and add + VMX
-cpu kvm64,enforce,+kvm_pv_eoi,+kvm_pv_unhalt,+lahf_lm,+sep,+vmx -m 1024
# Then shut down the 116 VM, perform full replication, and restart the VM
root@pve231:~# qm stop 116
root@pve231:~# /usr/bin/kvm -id 116 -name centos116 -chardev 'socket,id=qmp,path=/var/run/qemu-server/116.qmp,server,nowait' -mon 'chardev=qmp,mode=control' -chardev 'socket,id=qmp-event,path=/var/run/qmeventd.sock,reconnect=5' -mon 'chardev=qmp-event,mode=control' -pidfile /var/run/qemu-server/116.pid -daemonize -smbios 'type=1,uuid=a820085d-d873-46b2-9d4a-88aa7ed1e1b6' -smp '1,sockets=1,cores=1,maxcpus=1'-nodefaults -boot 'menu=on,strict=on,reboot-timeout=1000,splash=/usr/share/qemu-server/bootsplash.jpg' -vnc unix:/var/run/qemu-server/116.vnc,password -cpu kvm64,enforce,+kvm_pv_eoi,+kvm_pv_unhalt,+lahf_lm,+sep,+vmx -m 1024 -device 'pci-bridge,id=pci.1,chassis_nr=1,bus=pci.0,addr=0x1e' -device 'pci-bridge,id=pci.2,chassis_nr=2,bus=pci.0,addr=0x1f' -device 'vmgenid,guid=a218640f-b2f4-4641-8d21-1c09037cca4b' -device 'piix3- USB-uhci,id= uhCI,bus=pci.0,addr=0x1.0x2' -device 'USB-tablet,id=tablet,bus=uhci.0,port=1' -device 'VGA,id=vga,bus=pci.0,addr=0x2' -device 'virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x3' -iscsi 'initiator-name=iqn.1993-08.org.debian:01:c4896a5ad946' -device 'virtio-scsi-pci,id=scsihw0,bus=pci.0,addr=0x5' -drive 'file=/dev/SATA3_Disk/vm-116-disk-0,if=none,id=drive-scsi0,format=raw,cache=none,aio=native,detect-zeroes=on' -device 'the SCSI hd, bus = scsihw0.0, channel = 0, SCSI id = 0, lun = 0, drive = drive - scsi0, id = scsi0, rotation_rate = 1, bootindex = 100' - drive 'file=/dev/SATA3_Disk/vm-116-disk-1,if=none,id=drive-scsi1,format=raw,cache=none,aio=native,detect-zeroes=on' -device 'the SCSI hd, bus = scsihw0.0, channel = 0, SCSI id = 0, lun = 1, drive = drive - scsi1, id = scsi1' - the netdev 'type=tap,id=net0,ifname=tap116i0,script=/var/lib/qemu-server/pve-bridge,downscript=/var/lib/qemu-server/pve-bridgedown, vhost=on' -device 'virtio-net-pci,mac=A6:59:D2:72:9E:D7,netdev=net0,bus=pci.0,addr=0x12,id=net0,bootindex=300' -machine 'type=pc+pve0'
Copy the code

# Check whether the 116 VM is running

root@pve231:~# qm list |grep 116
       116 centos116            running    1024              50.00 869792
Copy the code

# Log in to VM 116 to check whether VMX is enabled

root@centos116:~# egrep "vmx|svm" /proc/cpuinfo
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cpuid_fault pti ssbd ibrs ibpb stibp tpr_shadowvnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms xsaveopt arat umip arch_capabilities
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cpuid_fault pti ssbd ibrs ibpb stibp tpr_shadowvnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms xsaveopt arat umip arch_capabilities
Copy the code

3.2 Method 2: If the configuration on the Web UI is enabled, the configuration will not be lost after the restart

When creating a VM, select the CPU type ==host==.After the VM is created, check whether VMX is enabled on the VM

root@pve118:~# egrep "vmx|svm" /proc/cpuinfo
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cpuid_fault pti ssbd ibrs ibpb stibp tpr_shadowvnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms xsaveopt arat umip arch_capabilities
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cpuid_fault pti ssbd ibrs ibpb stibp tpr_shadowvnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms xsaveopt arat umip arch_capabilities
Copy the code

4 Create a cluster and add nodes

4.1 Cluster Creation

Pve 148 # Data center – Cluster – Create clusterSet the cluster name and select the cluster communication network connection IP address (172.16.1.143). Click “Create” to display “done”. # Cluster name ==pvvve==

4.2 Adding a Node to a Cluster

# In pVE143, find the key to join the cluster: # Data Center – cluster – Join information, click CopyEnter the root password of pVE143 and click join ‘pvvve’ to join the cluster# validation

5 CEPH setup and mount

Pve ceph cluster can be set up in two ways, one is web, the other is direct pve CLI command interface (recommended to use this way, because pVE source download speed is too slow, too easy to interrupt!)

5.1 Method 1: Creating a Web Interface

Ceph # pve143 ceph – installationAfter the ceph installation is complete, create the monitor and administrator on the interface, as shown below# Create OSD. The OSD hard disks are the two disks mounted on each server described in the environment just nowMethod 1 deployment is complete

5.2 Method 2: Creating a PVE ON the CLI (Recommended)

Install ceph on each node

root@pve118~# pveceph install --version luminous
Copy the code

# Do the same for the other twoAfter the installation, configure the ceph cluster storage network

root@pve148:~# pveceph init --network 172.16.1.0/24
Copy the code

# Create ceph cluster Mon monitor – execute on each

root@pve118:~# pveceph createmon
root@pve119:~# pveceph createmon
root@pve143:~# pveceph createmon
Copy the code

# Create ceph MGR – per unit execution

root@pve118:~# pveceph createmgr
root@pve119:~# pveceph createmgr
root@pve143:~# pveceph createmgr
Copy the code

# Create Ceph cluster OSDs – per machine execution

root@pve118:~# pveceph createosd /dev/sdb
root@pve118:~# pveceph createosd /dev/sdc
root@pve119:~# pveceph createosd /dev/sdb
root@pve119:~# pveceph createosd /dev/sdc
root@pve143:~# pveceph createosd /dev/sdb
root@pve143:~# pveceph createosd /dev/sdc
Copy the code

# Create a storage cluster resource pool ceph osd pool create the name of the resource pool 128 128

root@pve143:~# ceph osd pool create pvepool1 128 128
pool 'pvepool1' created
Copy the code

== here 128 is not written casually, you need to calculate, if you exceed the error ==

Formula for calculating the number of PG PGP in a Pool: Official calculation address Total PGs = (Total_number_of_OSD * Target PGs per OSD)/max_Replication_count)/pool_count Target PGs per OSD It’s usually set to 100 and you end up with a power of two and the nearest number is 128

Activate pVEpool1 as the RBD storage pool for storing pVE disk images and containers

root@pve143:~# ceph osd pool application enable pvepool1 rbd
enabled application 'rbd' on pool 'pvepool1'
Copy the code

# Finally check the cluster status

root@pve143:~# ceph -s
  cluster:
    id:     79475a09-72be-418a-921f-243241b0c5e3
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum pve143,pve118,pve119 (age 4h)
    mgr: pve143(active, since 3h), standbys: pve119, pve118
    osd: 6 osds: 6 up (since 4h), 6 in (since 4h)

  data:
    pools:   1 pools, 128 pgs
    objects: 0 objects, 0 B
    usage:   6.0 GiB used, 240 GiB / 246 GiB avail
    pgs:     128 active+clean

Copy the code

5.3 Mounting Disks Ceph RBD

# Add RBD in data center – storage – Add RBD in data center – Storage – Add RBD in data center – Storage – Add RBD in data center – Storage – Add RBD in data center – Storage – Add RBD in data Center – Storage – Add RBD# The final effect is as follows:

6 Enable the HIGH availability (HA) function of the PVE

6.1 Creating an HA Group

This group is used to set the nodes that participate in HA, and set the priority that can be migrated when the node device fails# Create completeThe HA cluster status is displayed

6.2 Creating Objects that Participate in HA

# Data center -HA- Resources – Add containers or VMS to participate in HA (== the VMS must be stored in the cephrBD disk ==)

6.3 Verifying HA Failover

# Vm100 has been successfully migrated to PVE119 and started successfully

This is the end of the ~