# deploy 2 guests, rhel7u5 # following to be done on both nodes cat >/etc/yum.repos.d/base.repo <<EOT [rhel-7.5] name=rhel-7.5 baseurl=http://192.168.4.1/repos/rhel-7.5 [rhel-7.5ha] name=rhel-7.5ha baseurl=http://192.168.4.1/repos/rhel-7.5/addons/HighAvailability/ [rhel-7.5res] name=rhel-7.5res baseurl=http://192.168.4.1/repos/rhel-7.5/addons/ResilientStorage/ EOT yum groupinstall -y ha yum install -y lvm2-cluster gfs2-utils systemctl enable --now pcsd.service echo -n redhat | passwd hacluster --stdin # to be done on one node pcs cluster auth rhel7u5a.local rhel7u5b.local pcs cluster setup --start --name my_cluster rhel7u5a.local rhel7u5b.local # pcs property set stonith-enabled=false pcs cluster enable --all pcs cluster status ### fencing # setup hypervisor as per https://access.redhat.com/solutions/55920 pcs stonith list pcs stonith describe [..] [root@rhel7u5a ~]# fence_xvm -o list fed e9ba894d-ae8e-41dd-ad56-c10dcdbf0741 off master cb678f0c-8817-4ecd-9d16-c814e8749daa off rhel4u8 ccc352a1-5bfb-4bf4-9473-3f5cedd65599 off rhel5u11 9e720e71-b583-4ef9-86f1-c00d7f36e07f off pcs stonith create xvmfence fence_xvm key_file=/etc/cluster/fence_xvm.key pcs stonith # status # shared disk, iscsi from hypervisor # ensure /etc/iscsi/initiatorname.iscsi on your nodes is different! md5sum /etc/iscsi/initi* systemctl enable --now iscsi iscsiadm --mode discovery --type sendtargets --portal 192.168.4.16 iscsiadm -m node --target iqn.2003-01.org.x:disk1 -p 192.168.4.16 -l # setup gfs2 pcs property set no-quorum-policy=freeze pcs resource create dlm ocf:pacemaker:controld op monitor \ interval=30s on-fail=fence clone interleave=true ordered=true mkfs.gfs2 -p lock_dlm -j 2 -t my_cluster:data1 /dev/sda mount /dev/sda /mnt