Site Tools


Sidebar

Action disabled: revisions
software:cluster:rhcs_pacemaker_rhel8

Setup of a simple RHEL8 cluster with pacemaker.

# following to be done on both nodes
subscription-manager register
subscription-manager list --all --available >allsubs.txt
less allsubs.txt
# choose right pool with HA
subscription-manager subscribe --pool=<poolid>

subscription-manager repos --disable='*' \
        --enable=rhel-8-for-x86_64-baseos-rpms \
        --enable=rhel-8-for-x86_64-appstream-rpms \
        --enable=rhel-8-for-x86_64-highavailability-rpms \
        --enable=rhel-8-for-x86_64-resilientstorage-rpms

# ensure /etc/hosts on both nodes has entries of both nodes
echo '192.168.4.23    node2.local node2' >>/etc/hosts
echo '192.168.4.22    node1.local node1' >>/etc/hosts

yum install -y pcs pacemaker fence-agents-all
systemctl enable --now pcsd
echo -n redhat | passwd hacluster --stdin

# execute only on first node
pcs host auth node1.local node2.local
pcs cluster setup my_cluster --start node1.local node2.local

systemctl enable --now pcsd corosync pacemaker

### fencing
# setup hypervisor as per 
# https://access.redhat.com/solutions/917833
# https://access.redhat.com/solutions/55920
pcs stonith list

# At this point, we should have file /etc/cluster/fence_xvm.key
# on both nodes.

[root@node1 ~]# fence_xvm -o list
ahub                             75d52bd3-a5cb-429e-94e6-47c509c8f109 off
atower                           e5692f79-1f68-4a62-96a4-37b3bfde2561 off
fedora34c                        bd1538fa-b051-49b5-9f07-b7a57788fa8b off
node1                            87ebc6c6-3015-43a6-9c49-00044d8f3d7c on
node2                            a40a6534-4a8e-4e58-9f69-5e2b9b81615e on
rhel7.9                          4bba326d-08dc-4289-aa12-7f9f6c9f6a3e off
rhel8.4                          daf830c0-c947-4373-a10e-12e3b6c3fa84 off

# https://www.golinuxcloud.com/what-is-fencing-configure-kvm-cluster-fencing/
pcs stonith create fence-node1 fence_xvm port=node1 pcmk_host_list=node1.local
pcs stonith create fence-node2 fence_xvm port=node2 pcmk_host_list=node2.local
pcs stonith status

# shared disk, iscsi from hypervisor
# as per https://fluxcoil.net/wiki/software/iscsi/target

# ensure /etc/iscsi/initiatorname.iscsi on your nodes is different!
md5sum /etc/iscsi/initi*
systemctl enable --now iscsi
iscsiadm --mode discovery --type sendtargets --portal 192.168.4.1
iscsiadm -m node --target iqn.2003-01.org.x:disk1 -p 192.168.4.1 -l

# I have no other volume groups on both nodes, so have
# this in /etc/lvm/lvm.conf on both nodes:
auto_activation_volume_list = []
system_id_source = "uname"

# at this point, both nodes should see a common /dev/sda
# create a single partition, just to follow best practices:
parted /dev/sda 'mklabel gpt'
parted /dev/sda 'mkpart 1 2048s -1'

pvcreate /dev/sda1
vgcreate httpd_vg /dev/sda1
lvcreate -L450 -n httpd_lv httpd_vg
mkfs.ext4 /dev/httpd_vg/httpd_lv

vgs --noheadings -o vg_name  # should show the VG just on one node

# then, rebuild initrd and reboot
dracut -H -f /boot/initramfs-$(uname -r).img $(uname -r)
reboot

# after coming up, cluster should have started already
# pcs cluster start --all
pcs status

# This should show the nodename now in the VG:
vgs -o+systemid

# create LVM cluster resource
pcs resource create my_lvm ocf:heartbeat:LVM-activate \
  vgname=httpd_vg vg_access_mode=system_id --group apachegroup
pcs resource status

pcs status # gives complete status
pcs resource move my_lvm node1.local # moves resource to node1
software/cluster/rhcs_pacemaker_rhel8.txt ยท Last modified: 2022/11/13 12:06 by 127.0.0.1