===== What issit? =====
Testing redhat-cluster on centos, shared discs from a debian iscsi-target. Based on centos/rhel5.
===== debianish iscsi-target =====
* we need no fancy stuff like scsi-persistant-disc-reservation so will use the simple target. I.e. risingtide provides a more advanced one
apt-get install tgt
tgtadm --lld iscsi --op new --mode target --tid 1 -T iqn.2009-11.com.example:storage.disk1
tgtadm --lld iscsi --op show --mode target
dd if=/dev/zero of=iscsibackfile0 bs=256M count=1
dd if=/dev/zero of=iscsibackfile1 bs=256M count=1
losetup -f iscsibackfile0
losetup -f iscsibackfile1
tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun 1 -b /dev/loop0
tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun 2 -b /dev/loop1
# now lets accept access from everywhere
tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL
===== centos iscsi-initiator =====
rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-5
for i in acpid apmd auditd autofs avahi-daemon avahi-dnsconfd bluetooth cups gpm \
haldaemon hidd hplip ip6tables iptables irqbalance isdn microcode_ctl pcscd smartd xfs; do
chkconfig $i off;
done
yum -y install iscsi-initiator-utils
service iscsi start; chkconfig iscsi on; service iptables stop
# does the target offer anything for us?
iscsiadm --mode discovery --type sendtargets --portal 10.0.22.1
# now lets login
iscsiadm --mode node --targetname iqn.2009-11.com.example:storage.disk1 --portal 10.0.22.1:3260 --login
# now lets check both nodes can write/read properly:
canta$ echo 'this is centas sda' >/dev/sda
centa$ echo 'this is centas sdb' >/dev/sdb
centb$ dd if=/dev/sda bs=18 count=1
this is centas sda [...]
centb$ dd if=/dev/sdb bs=18 count=1
this is centas sdb [...]
cantb$ echo 'this is centbs sda' >/dev/sda
centb$ echo 'this is centbs sdb' >/dev/sdb
centa$ dd if=/dev/sda bs=18 count=1
this is centbs sda [...]
centa$ dd if=/dev/sdb bs=18 count=1
this is centbs sdb [...]
===== rh-cluster =====
# 1) install nodes
# 2) vi /etc/hosts # make sure both nodes are in this file on both nodes
# 3) make sure the box can use a proper yum-repo to install software, i.e. here with http-shared centos-dvd:
cat >>/etc/yum.repos.d/nexus.repo< # now if fencing got configured also this should work, fencing by cluster
#14) yum -y install lvm2-cluster gfs-utils # on all nodes
#15) lvmconf --enable-cluster # on all nodes, enables cluster-lvm
#16) service clvmd start # on all nodes
chkconfig clvmd on # on all nodes
#17) monitoring of mirrored volumes..
# echo "vgchange --monitor y" >> /etc/rc.local
# should be better 'service lvm2-monitor start; chkconfig lvm2-monitor on'
===== rh-cluster way 2 =====
# 1) install nodes
# 2) vi /etc/hosts # make sure both nodes are in this file on both nodes
# 3) make sure the box can use a proper yum-repo to install software, i.e. here with http-shared centos-dvd:
cat >>/etc/yum.repos.d/nexus.repo< # now if fencing got configured also this should work, fencing by cluster
------
#12) lvmconf --enable-cluster # on all nodes, enables cluster-lvm
#13) service clvmd start # on all nodes
chkconfig clvmd on # on all nodes
#14) monitoring of mirrored volumes..
# echo "vgchange --monitor y" >> /etc/rc.local
# should be better 'service lvm2-monitor start; chkconfig lvm2-monitor on'
===== setup lvm-mirrored volume simple =====
# make sure you have a small san-lun (128M) for logging.
vi /etc/multipath.conf # create new alias for logging-lun, distribute and take into service on all nodes
pvcreate /dev/mapper/san_*; vgcreate -c y vg_gfscluster00 /dev/mapper/san_*
lvcreate -m1 -l 6425 -n lvtest vg_gfscluster00 /dev/mapper/san_location0_lun0 /dev/mapper/san_location1_lun0 \
/dev/mapper/san_location0_lun32_logging:0 # create small mirrored volume in logger-lun slot0
lvcreate -m1 -l 6425 -n lvtest1 vg_gfscluster00 /dev/mapper/san_location0_lun1 /dev/mapper/san_location1_lun1 \
/dev/mapper/san_location0_lun32_logging:1 # create small mirrored volume in logger-lun slot1
lvs --all -o+devices # watch initial sync
===== using more than 1 lun on each side =====
# order of pvs is important, alternate between the san-boxes:
lvcreate -m1 -l 12850 -n lvtest2 vg_gfscluster00 \
/dev/mapper/san_location0_lun2 /dev/mapper/san_location1_lun2 \
/dev/mapper/san_location0_lun3 /dev/mapper/san_location1_lun3 \
/dev/mapper/san_location0_lun32_logging:2
#lvcreate -m onemirror -l number_of_pvs -n nameit-lvtest2 vg_gfscluster00 \
# path-to-first-lun path-to-mirror-of-first-lun \
# path-to-second-lun path-to-mirror-of-second-lun \
# path-to-loglun:next_free_slot_on_loglun
lvs --all -o+devices # watch output on what dm-devices are under your mirrorsides
===== create a gfs =====
# wait for 'lvs --all -o+devices' to show 100% for the lv, so they are synced!
lvcreate -m1 -l 6425 -n lv_gfscluster00 vg_gfscluster00 /dev/mapper/san_location0_lun0 /dev/mapper/san_location1_lun0 \
/dev/mapper/san_location0_lun32_logging:0
mkfs.gfs -p lock_dlm -t gfscluster00:lvtest -j 4 /dev/vg_gfscluster00/lv_gfscluster00
mkdir /application
mount -t gfs /dev/vg_gfscluster00/lvtest /application
echo '/dev/vg_gfscluster00/lv_gfscluster00 /application gfs defaults 0 0'>>/etc/fstab
service gfs start; chkconfig gfs on