Install
# ALL nodes
cd /etc/yum.repos.d/
wget http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/glusterfs-epel.repo
yum install glusterfs-server -y
service glusterd start && chkconfig glusterd on
# Ensure that TCP ports 111, 24007,24008, 24009-(24009 + number of bricks across all volumes) are open on all Gluster servers.
# If you will be using NFS, open additional ports 38465 to 38467.
# You can use the following chains with iptables:
iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 24007:24047 -j ACCEPT
iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 111 -j ACCEPT
iptables -A RH-Firewall-1-INPUT -m state --state NEW -m udp -p udp --dport 111 -j ACCEPT
iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 38465:38467 -j ACCEPT
service iptables save
service iptables restart
# Note: You need one open port, starting at 24009 for each brick. This example opens enough ports for 20 storage servers and three bricks.
# On first node
gluster peer probe <second_host>
# Check peers status
gluster peer status
Config
# Create brick disk partition table with single partition
parted
select /dev/xvdf
mklabel gpt
mkpart primary 0 -1
print
quit
# Update kernel partition table
partprobe /dev/xvdf
# Create filesystem
mkfs.ext4 /dev/xvdf1
# Mount brick
mkdir -p /export/xvdf1
echo "/dev/xvdf1 /export/xvdf1 ext4 defaults 0 0" >> /etc/fstab
mount /export/xvdf1
mkdir -p /export/xvdf1/brick
# Exec on single node
# NB! Ensure that both hostnames are resolvable (plain IP wont do)
# Create volume
gluster volume create gv0 replica 2 <node1>:/export/xvdf1/brick <node2>:/export/xvdf1/brick
# Start volume
gluster volume start gv0
# Verify
gluster volume info
gluster volume status
OpenNode OpenVZ overlay
# Mounting volume on clients
mount -t nfs <nodeX>:/gv0 /mnt
# Migrate /vz
mkdir /mnt/vz
rsync -av /storage/local/vz/ /mnt/
umount /mnt
# Mount GlusterFS overlay
echo "nodeX:/gv0 /storage/local/vz nfs defaults 0 0" >> /etc/fstab
mount /storage/local/vz