This shows you the differences between two versions of the page.
Both sides previous revisionPrevious revision | |||
software:cluster:rhcs_pacemaker_rhel8 [2021/09/15 04:34] – chris | software:cluster:rhcs_pacemaker_rhel8 [2022/11/13 12:06] (current) – external edit 127.0.0.1 | ||
---|---|---|---|
Line 1: | Line 1: | ||
+ | Setup of a simple RHEL8 cluster with pacemaker. | ||
+ | * install 2 nodes with rhel8.4 | ||
+ | * https:// | ||
+ | |||
+ | < | ||
+ | # following to be done on both nodes | ||
+ | subscription-manager register | ||
+ | subscription-manager list --all --available > | ||
+ | less allsubs.txt | ||
+ | # choose right pool with HA | ||
+ | subscription-manager subscribe --pool=< | ||
+ | |||
+ | subscription-manager repos --disable=' | ||
+ | --enable=rhel-8-for-x86_64-baseos-rpms \ | ||
+ | --enable=rhel-8-for-x86_64-appstream-rpms \ | ||
+ | --enable=rhel-8-for-x86_64-highavailability-rpms \ | ||
+ | --enable=rhel-8-for-x86_64-resilientstorage-rpms | ||
+ | |||
+ | # ensure /etc/hosts on both nodes has entries of both nodes | ||
+ | echo ' | ||
+ | echo ' | ||
+ | |||
+ | yum install -y pcs pacemaker fence-agents-all | ||
+ | systemctl enable --now pcsd | ||
+ | echo -n redhat | passwd hacluster --stdin | ||
+ | |||
+ | # execute only on first node | ||
+ | pcs host auth node1.local node2.local | ||
+ | pcs cluster setup my_cluster --start node1.local node2.local | ||
+ | |||
+ | systemctl enable --now pcsd corosync pacemaker | ||
+ | |||
+ | ### fencing | ||
+ | # setup hypervisor as per | ||
+ | # https:// | ||
+ | # https:// | ||
+ | pcs stonith list | ||
+ | |||
+ | # At this point, we should have file / | ||
+ | # on both nodes. | ||
+ | |||
+ | [root@node1 ~]# fence_xvm -o list | ||
+ | ahub | ||
+ | atower | ||
+ | fedora34c | ||
+ | node1 87ebc6c6-3015-43a6-9c49-00044d8f3d7c on | ||
+ | node2 a40a6534-4a8e-4e58-9f69-5e2b9b81615e on | ||
+ | rhel7.9 | ||
+ | rhel8.4 | ||
+ | |||
+ | # https:// | ||
+ | pcs stonith create fence-node1 fence_xvm port=node1 pcmk_host_list=node1.local | ||
+ | pcs stonith create fence-node2 fence_xvm port=node2 pcmk_host_list=node2.local | ||
+ | pcs stonith status | ||
+ | |||
+ | # shared disk, iscsi from hypervisor | ||
+ | # as per https:// | ||
+ | |||
+ | # ensure / | ||
+ | md5sum / | ||
+ | systemctl enable --now iscsi | ||
+ | iscsiadm --mode discovery --type sendtargets --portal 192.168.4.1 | ||
+ | iscsiadm -m node --target iqn.2003-01.org.x: | ||
+ | |||
+ | # I have no other volume groups on both nodes, so have | ||
+ | # this in / | ||
+ | auto_activation_volume_list = [] | ||
+ | system_id_source = " | ||
+ | |||
+ | # at this point, both nodes should see a common /dev/sda | ||
+ | # create a single partition, just to follow best practices: | ||
+ | parted /dev/sda ' | ||
+ | parted /dev/sda ' | ||
+ | |||
+ | pvcreate /dev/sda1 | ||
+ | vgcreate httpd_vg /dev/sda1 | ||
+ | lvcreate -L450 -n httpd_lv httpd_vg | ||
+ | mkfs.ext4 / | ||
+ | |||
+ | vgs --noheadings -o vg_name | ||
+ | |||
+ | # then, rebuild initrd and reboot | ||
+ | dracut -H -f / | ||
+ | reboot | ||
+ | |||
+ | # after coming up, cluster should have started already | ||
+ | # pcs cluster start --all | ||
+ | pcs status | ||
+ | |||
+ | # This should show the nodename now in the VG: | ||
+ | vgs -o+systemid | ||
+ | |||
+ | # create LVM cluster resource | ||
+ | pcs resource create my_lvm ocf: | ||
+ | vgname=httpd_vg vg_access_mode=system_id --group apachegroup | ||
+ | pcs resource status | ||
+ | |||
+ | pcs status # gives complete status | ||
+ | pcs resource move my_lvm node1.local # moves resource to node1 | ||
+ | </ |