User Tools

Site Tools


Sidebar

software:cluster:rhcs_setup_gfs_clvm_rhel5

What issit?

Testing redhat-cluster on centos, shared discs from a debian iscsi-target. Based on centos/rhel5.

debianish iscsi-target

  • we need no fancy stuff like scsi-persistant-disc-reservation so will use the simple target. I.e. risingtide provides a more advanced one
apt-get install tgt
tgtadm --lld iscsi --op new --mode target --tid 1 -T iqn.2009-11.com.example:storage.disk1
tgtadm --lld iscsi --op show --mode target

dd if=/dev/zero of=iscsibackfile0 bs=256M count=1
dd if=/dev/zero of=iscsibackfile1 bs=256M count=1
losetup -f iscsibackfile0
losetup -f iscsibackfile1
tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun 1 -b /dev/loop0
tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun 2 -b /dev/loop1

# now lets accept access from everywhere
tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL

centos iscsi-initiator

rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-5
for i in acpid apmd auditd autofs avahi-daemon avahi-dnsconfd bluetooth cups gpm \
   haldaemon hidd hplip ip6tables iptables irqbalance isdn microcode_ctl pcscd smartd xfs; do 
   chkconfig $i off;
done
yum -y install iscsi-initiator-utils
service iscsi start; chkconfig iscsi on; service iptables stop

# does the target offer anything for us?
iscsiadm --mode discovery --type sendtargets --portal 10.0.22.1
# now lets login
iscsiadm --mode node --targetname iqn.2009-11.com.example:storage.disk1 --portal 10.0.22.1:3260 --login

# now lets check both nodes can write/read properly:
canta$ echo 'this is centas sda' >/dev/sda
centa$ echo 'this is centas sdb' >/dev/sdb
centb$ dd if=/dev/sda bs=18 count=1
this is centas sda [...]
centb$ dd if=/dev/sdb bs=18 count=1
this is centas sdb [...]
cantb$ echo 'this is centbs sda' >/dev/sda
centb$ echo 'this is centbs sdb' >/dev/sdb
centa$ dd if=/dev/sda bs=18 count=1
this is centbs sda [...]
centa$ dd if=/dev/sdb bs=18 count=1
this is centbs sdb [...]

rh-cluster

# 1)   install nodes
# 2)   vi /etc/hosts # make sure both nodes are in this file on both nodes
# 3)   make sure the box can use a proper yum-repo to install software, i.e. here with http-shared centos-dvd:
cat >>/etc/yum.repos.d/nexus.repo<<EOT
[base5u3]
name=CentOS-$releasever - Base
baseurl=http://10.0.1.1/centos
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-5
EOT
# 4)   make sure ntp is configured, routes, duplex-modes of network-cards
# 5)   check multipathing, make cure the discs shared between nodes are accessable, here its 2 iscsi-discs for testin
# 6)   lets install clustersoft
          yum -y install cluster-snmp rgmanager system-config-cluster modcluster
# 7) install fencing, make sure a fencing-agent like fence_ipmilan or fence_rsa works, skipping here for testing
# 8) conf cluster with all nodes, all fencings, connect he fencings with the nodes
           system-config-cluster
# 9) deliver /etc/cluster/cluster.conf to other nodes
#10) start cluster
      tail -f /var/log/messages & # on both nodes
      service cman start # on both nodes
      clustat # now shows all nodes online
      chkconfig cman on # on both nodes
#11) sed -i 's,2345 55 25,2345 20 25,' /etc/init.d/sshd # on all nodes
#12) chkconfig sshd off; chkconfig sshd on
#13) fencenode <other_nodes_name> # now if fencing got configured also this should work, fencing by cluster
#14) yum -y install lvm2-cluster gfs-utils # on all nodes
#15) lvmconf --enable-cluster           # on all nodes, enables cluster-lvm
#16) service clvmd start # on all nodes
     chkconfig clvmd on  # on all nodes
#17) monitoring of mirrored volumes..
     # echo "vgchange --monitor y" >> /etc/rc.local 
     # should be better 'service lvm2-monitor start; chkconfig lvm2-monitor on'

rh-cluster way 2

# 1)   install nodes
# 2)   vi /etc/hosts # make sure both nodes are in this file on both nodes
# 3)   make sure the box can use a proper yum-repo to install software, i.e. here with http-shared centos-dvd:
cat >>/etc/yum.repos.d/nexus.repo<<EOT
[base5u3]
name=CentOS-$releasever - Base
baseurl=http://10.0.1.1/centos
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-5
EOT
# 4)   make sure ntp is configured, routes, duplex-modes of network-cards
# 5)   check multipathing, make cure the discs shared between nodes are accessable, here its 2 iscsi-discs for testin
# 6)   on node2: yum -y install ricci; service ricci start; chkconfig ricci on
       on node1: yum -y install luci ricci; service ricci start; chkconfig ricci on
                 luci_admin init; service luci restart; chkconfig luci on
       on other box: open https://node1L:8084 in webbrowser
                 login with admin/password you provided
                 add the two nodes w/ 'add a system'
                 click 'cluster', 'create new cluster', create a new cluster with your nodes
                 this installs software, reboots the nodes
# 7) vi /etc/init.d/sshd # change to chkconfig: 2345 20 25 (needed to start sshd bevore errorprone cman)
# 8) chkconfig sshd off; chkconfig sshd on
# 9) on both nodes: service cman start; chkconfig cman on
#10) install fencing (with system-config-cluster or luci-webinterface), 
     make sure a fencing-agent like fence_ipmilan or fence_rsa works, skipping here for testing
#11) fencenode <other_nodes_name> # now if fencing got configured also this should work, fencing by cluster
------
#12) lvmconf --enable-cluster           # on all nodes, enables cluster-lvm
#13) service clvmd start # on all nodes
     chkconfig clvmd on  # on all nodes
#14) monitoring of mirrored volumes..
     # echo "vgchange --monitor y" >> /etc/rc.local 
     # should be better 'service lvm2-monitor start; chkconfig lvm2-monitor on'

setup lvm-mirrored volume simple

# make sure you have a small san-lun (128M) for logging. 
vi /etc/multipath.conf # create new alias for logging-lun, distribute and take into service on all nodes
pvcreate /dev/mapper/san_*; vgcreate -c y vg_gfscluster00 /dev/mapper/san_*
lvcreate -m1 -l 6425 -n lvtest vg_gfscluster00 /dev/mapper/san_location0_lun0 /dev/mapper/san_location1_lun0 \
  /dev/mapper/san_location0_lun32_logging:0 # create small mirrored volume in logger-lun slot0
lvcreate -m1 -l 6425 -n lvtest1 vg_gfscluster00 /dev/mapper/san_location0_lun1 /dev/mapper/san_location1_lun1 \
  /dev/mapper/san_location0_lun32_logging:1 # create small mirrored volume in logger-lun slot1
lvs --all -o+devices # watch initial sync

using more than 1 lun on each side

# order of pvs is important, alternate between the san-boxes:
lvcreate -m1 -l 12850 -n lvtest2 vg_gfscluster00 \
  /dev/mapper/san_location0_lun2 /dev/mapper/san_location1_lun2 \
  /dev/mapper/san_location0_lun3 /dev/mapper/san_location1_lun3 \
     /dev/mapper/san_location0_lun32_logging:2
#lvcreate -m onemirror -l number_of_pvs -n nameit-lvtest2 vg_gfscluster00 \
#  path-to-first-lun  path-to-mirror-of-first-lun \
#  path-to-second-lun path-to-mirror-of-second-lun \
#    path-to-loglun:next_free_slot_on_loglun
lvs --all -o+devices # watch output on what dm-devices are under your mirrorsides

create a gfs

# wait for 'lvs --all -o+devices' to show 100% for the lv, so they are synced!
lvcreate -m1 -l 6425 -n lv_gfscluster00 vg_gfscluster00 /dev/mapper/san_location0_lun0 /dev/mapper/san_location1_lun0 \
  /dev/mapper/san_location0_lun32_logging:0
mkfs.gfs -p lock_dlm -t gfscluster00:lvtest -j 4 /dev/vg_gfscluster00/lv_gfscluster00
mkdir /application
mount -t gfs /dev/vg_gfscluster00/lvtest /application
echo '/dev/vg_gfscluster00/lv_gfscluster00  /application gfs defaults  0 0'>>/etc/fstab
service gfs start; chkconfig gfs on
software/cluster/rhcs_setup_gfs_clvm_rhel5.txt ยท Last modified: 2021/03/23 12:22 (external edit)