2 # vi: ft=ruby:et:ts=2:sts=2:sw=2
4 VAGRANTFILE_API_VERSION = 2
10 # Defaults for Configuration data.
11 # Will be overridden from the settings file
12 # and (possibly later) from commandline parameters.
24 network_opts = [ :type, :link, :flags, :hwaddr, :name, :ipv4, :ipv6 ]
26 libvirt_network_parms = {
46 #:hostname => 'gluno1',
48 #:box => 'local-fedora-rawhide-64',
49 #:box => 'purpleidea-fedora-21',
50 #:box => 'local-fedora-21.2',
53 :container_name => 'gluno1',
54 #:container_name => 'node1',
57 :box => 'local-fedora-21.2',
61 :internal_if => 'virbr1',
65 :ipv4 => '172.20.10.30',
69 # #:ipv4 => '10.111.222.201',
76 # Load the config, if it exists,
77 # possibly override with commandline args,
78 # (currently none supported yet)
79 # and then store the config.
82 projectdir = File.expand_path File.dirname(__FILE__)
83 f = File.join(projectdir, 'vagrant.yaml')
85 settings = YAML::load_file f
87 if settings[:vms].is_a?(Array)
90 puts "Loaded settings from #{f}."
93 # TODO(?): ARGV-processing
99 File.open(f, 'w') do |file|
100 file.write settings.to_yaml
102 puts "Wrote settings to #{f}."
108 defaults.keys.each do |cat|
109 next if not vm.has_key?(cat)
110 defaults[cat].keys.each do |subcat|
111 next if not vm[cat].has_key?(subcat)
112 defaults[cat][subcat].keys.each do |key|
113 if not vm[cat][subcat].has_key?(key)
114 vm[cat][subcat][key] = defaults[cat][subcat][key]
120 #if not vm[:provider][:libvirt].has_key?(:prefix)
121 # vm[:provider][:libvirt][:prefix] = default_libvirt_prefix
124 vm[:networks].each do |net|
125 net_default.keys.each do |key|
126 if not net.has_key?(key)
127 net[key] = net_default[key]
134 # compose the list of cluster internal ips
136 cluster_internal_ips = vms.map do |vm|
138 vm[:networks].each do |n|
139 if n[:link] == vm[:internal_if]
149 #print "internal ips: "
150 #print cluster_internal_ips
153 #PROVISION_SCRIPT = <<SCRIPT
154 #yum -y install make samba
158 SELINUX_SCRIPT = <<SCRIPT
160 setenforce permissive
162 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
164 FILE=/etc/selinux/config
166 sed -i${BACKUP_SUFFIX} -e 's/^SELINUX=.*$/SELINUX=disabled/g' ${FILE}
175 NET_FIX_ALWAYS_SCRIPT = <<SCRIPT
178 # eth1 is not brought up automatically
179 # by 'vagrant up' of the existing vm
180 # because eth1 is not up, glusterd can
181 # not be started and gluster volumes can
182 # not be mountd. fix it all up here until
183 # we have a correctly working environment
189 for MOUNTPT in $MOUNTPTS
191 grep -q -s "${MOUNTPT}" /etc/fstab && {
192 # already provisioned...
193 systemctl start glusterd
194 # sleep to give glusterd some time to start up
197 mount | grep -q -s "${MOUNTPT}" && {
198 echo "${MOUNTPT} is already mounted."
200 echo "Mounting ${MOUNTPT}."
204 # not provisioned yet
205 echo "${MOUNTPT} not set up yet. Not mounting."
211 NET_FIX_INITIAL_SCRIPT = <<SCRIPT
213 # Fix dhclient running on private network IF
215 systemctl restart NetworkManager
220 INSTALL_SCRIPT = <<SCRIPT
223 echo "Installing software..."
225 yum -y install xfsprogs
226 yum -y install glusterfs{,-server,-fuse,-geo-replication}
227 yum -y install ctdb samba{,-client,-vfs-glusterfs}
230 XFS_SCRIPT = <<SCRIPT
235 DISKDEV="/dev/${DEVICE}"
236 DISKPARTDEV="/dev/${PARTDEV}"
238 MOUNTP=/export/${PARTDEV}
239 BRICKD=${MOUNTP}/brick
241 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
243 parted -s ${DISKDEV} print > /dev/null 2>&1 && {
244 echo "Label exists on ${DISKDEV}."
246 echo "Creating label on ${DISKDEV}."
247 parted -s ${DISKDEV} mklabel msdos
250 parted -s ${DISKDEV} print 1 > /dev/null 2>&1 && {
251 echo "Partition ${DISKPARTDEV} exists."
253 echo "Creating partition ${DISKPARTDEV}."
254 parted -s ${DISKDEV} mkpart primary 1 100%
257 blkid -s TYPE ${DISKPARTDEV} | grep -q -s 'TYPE="xfs"' && {
258 echo "Partition ${DISKPARTDEV} contains xfs file system."
260 echo "Creating xfs filesystem on ${DISKPARTDEV}."
261 mkfs.xfs -f ${DISKPARTDEV}
268 grep -q -s ${DISKPARTDEV} ${FILE} && {
269 echo "Mount entry for ${DISKPARTDEV} is present in ${FILE}."
271 echo "Creating mount entry for ${DISKPARTDEV} in ${FILE}."
272 test -f ${FILE} || touch ${FILE}
273 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
275 ${DISKPARTDEV} ${MOUNTP} xfs defaults 0 0
279 mount | grep ${MOUNTP} && {
280 echo "${MOUNTP} is already mounted."
282 echo "Mounting ${MOUNTP}."
289 GLUSTER_START_SCRIPT = <<SCRIPT
291 systemctl start glusterd.service
294 #GLUSTER_PROBE_SCRIPT = <<SCRIPT
299 #gluster peer probe ${PEER_IP}
302 GLUSTER_PROBE_SCRIPT = <<SCRIPT
307 for PEER_IP in ${PEER_IPS}
309 # try for some time to reach the other node:
310 for COUNT in $(seq 1 20)
312 gluster peer probe ${PEER_IP} 2> /dev/null && {
321 GLUSTER_WAIT_PEERS_SCRIPT = <<SCRIPT
326 echo "Waiting for $NUM_NODES peers."
328 for count in $(seq 1 60)
330 PEERS=$(gluster pool list | grep -v ^UUID | wc -l)
331 [ "$PEERS" = "$NUM_NODES" ] && {
332 echo "Done waiting: $NUM_NODES peers connected."
339 echo "TIMEOUT waiting for $NUM_NODES peers."
344 GLUSTER_CREATEVOL_SCRIPT = <<SCRIPT
353 MSG="$(gluster volume status ${VOLNAME} 2>&1 1>/dev/null)"
355 [ $RET -eq 0 ] && break
356 [ "$MSG" = 'Another transaction is in progress. Please try again after sometime.' ] || break
361 echo "gluster volume ${VOLNAME} already exists and is active."
365 [ "$MSG" = "Volume ${VOLNAME} does not exist" ] && {
366 echo "Creating gluster volume ${VOLNAME}."
367 echo "cmd: gluster volume create $VOLNAME rep $REP transport tcp $@"
369 MSG=$(gluster volume create $VOLNAME rep $REP transport tcp $@ 2>&1 1>/dev/null)
371 [ $RET -eq 0 ] && break
372 [ "$MSG" = "volume create: ${VOLNAME}: failed: Volume ${VOLNAME} already exists" ] && {
376 [ "$MSG" = "volume create: $VOLNAME: failed: Another transaction is in progress. Please try again after sometime." ] || break
380 echo "gluster volume create $VOLNAME failed ('$MSG')- trying to force."
383 MSG=$(gluster volume create $VOLNAME rep $REP transport tcp $@ force 2>&1 1>/dev/null)
385 [ $RET -eq 0 ] && break
386 [ "$MSG" = "volume create: ${VOLNAME}: failed: Volume ${VOLNAME} already exists" ] && {
390 [ "$MSG" = "volume create: $VOLNAME: failed: Another transaction is in progress. Please try again after sometime." ] || break
395 echo "gluster volume create $VOLNAME failed with force ('$MSG')- giving up"
400 MSG="$(gluster volume status ${VOLNAME} 2>&1 1>/dev/null)"
402 [ $RET -eq 0 ] && break
403 [ "$MSG" = 'Another transaction is in progress. Please try again after sometime.' ] || break
408 echo "gluster volume ${VOLNAME} is already started."
413 [ "$MSG" = "Volume ${VOLNAME} is not started" ] && {
414 echo "starting gluster volume ${VOLNAME}."
416 MSG=$(gluster volume start ${VOLNAME} 2>&1 1> /dev/null)
418 [ $RET -eq 0 ] && break
419 [ "$MSG" = "volume start: ${VOLNAME}: failed: Volume ${VOLNAME} already started" ] && {
423 [ "$MSG" = "volume start: ${VOLNAME}: failed: Another transaction is in progress. Please try again after sometime." ] || break
427 echo "gluster volume start ${VOLNAME} failed ('$MSG')."
431 echo "Error: 'gluster volume status ${VOLNAME}' gave '$MSG' ($RET)"
439 GLUSTER_MOUNT_SCRIPT = <<SCRIPT
447 MOUNTDEV="127.0.0.1:/${VOLNAME}"
451 #mount -t glusterfs ${MOUNTDEV} ${MOUNTPT}
453 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
457 grep -q -s "${MOUNTPT}" ${FILE} || {
458 test -f ${FILE} || touch ${FILE}
459 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
462 ${MOUNTDEV} ${MOUNTPT} glusterfs defaults,selinux 0 0
466 mount | grep -q -s ${MOUNTPT} && {
467 echo "${MOUNTPT} is already mounted."
469 echo "Mounting ${MOUNTPT}."
476 CTDB_STOP_SCRIPT = <<SCRIPT
478 systemctl stop ctdb.service
481 CTDB_CREATE_NODES_SCRIPT = <<SCRIPT
484 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
489 test -f ${FILE} || touch ${FILE}
490 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
493 for IP in ${NODES_IPS}
495 echo "$IP" >> ${FILE}
499 CTDB_CREATE_PUBADDRS_SCRIPT = <<SCRIPT
502 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
506 FILE=/etc/ctdb/public_addresses
507 test -f ${FILE} || touch ${FILE}
508 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
513 echo ${IP} >> ${FILE}
517 CTDB_CREATE_CONF_SCRIPT = <<SCRIPT
520 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
522 RECLOCKDIR=/gluster/gv0/ctdb
523 mkdir -p ${RECLOCKDIR}
524 RECLOCKFILE=${RECLOCKDIR}/reclock
526 PUBLIC_ADDRESSES_FILE=/etc/ctdb/public_addresses
527 NODES_FILE=/etc/ctdb/nodes
529 FILE=/etc/sysconfig/ctdb
530 test -f ${FILE} || touch ${FILE}
531 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
535 CTDB_NODES=${NODES_FILE}
536 #CTDB_PUBLIC_ADDRESSES=${PUBLIC_ADDRESSES_FILE}
537 CTDB_RECOVERY_LOCK=${RECLOCKFILE}
538 CTDB_MANAGES_SAMBA="yes"
539 CTDB_SAMBA_SKIP_SHARE_CHECK="yes"
540 #CTDB_MANAGES_WINBIND="yes"
544 SAMBA_CREATE_CONF_SCRIPT = <<SCRIPT
547 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
553 mkdir -p ${GLUSTER_VOL_MOUNT}/share1
554 chmod -R 0777 ${GLUSTER_VOL_MOUNT}/share1
556 mkdir -p ${GLUSTER_VOL_MOUNT}/share2
557 chmod -R 0777 ${GLUSTER_VOL_MOUNT}/share2
559 FILE=/etc/samba/smb.conf
560 test -f ${FILE} || touch ${FILE}
561 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
566 netbios name = sambacluster
575 vfs objects = acl_xattr glusterfs
576 glusterfs:volume = ${GLUSTER_VOL}
577 kernel share modes = no
581 path = ${GLUSTER_VOL_MOUNT}/share2
582 vfs objects = acl_xattr
587 CTDB_START_SCRIPT = <<SCRIPT
589 systemctl start ctdb.service
592 # The vagrant machine definitions
595 Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
597 if Vagrant.has_plugin?("vagrant-cachier")
598 config.cache.scope = :box
601 # just let one node do the probing
604 vms.each do |machine|
605 config.vm.define machine[:hostname] do |node|
606 node.vm.box = machine[:provider][:libvirt][:box]
607 node.vm.hostname = machine[:hostname]
609 node.vm.provider :libvirt do |libvirt|
610 libvirt.default_prefix = machine[:provider][:libvirt][:prefix]
611 libvirt.memory = 1024
612 libvirt.storage :file, :size => '64M', :device => 'vdb'
613 libvirt.storage :file, :size => '10G', :device => 'vdc'
615 machine[:networks].each do |net|
616 if not net[:ipv4] == ''
617 node.vm.network :private_network, :ip => net[:ipv4]
623 node.vm.provision "selinux", type: "shell" do |s|
624 s.inline = SELINUX_SCRIPT
627 # There is some problem with the fedora base box:
628 # Upon first boot, ifdown eth1 fails and the dhclient
629 # keep being active. Simply bringing down and up again
630 # the interface is not sufficient. We need to restart
631 # NetworkManager in order to teach it to not feel
632 # responsible for the interface any more.
633 node.vm.provision "net_fix_initial", type: "shell" do |s|
634 s.inline = NET_FIX_INITIAL_SCRIPT
637 node.vm.provision "install", type: "shell" do |s|
638 s.inline = INSTALL_SCRIPT
641 # There is some problem with the fedora base box:
642 # We need to up the interface on reboots.
643 # It does not come up automatically.
644 node.vm.provision "net_fix_always", type: "shell", run: "always" do |s|
645 s.inline = NET_FIX_ALWAYS_SCRIPT
646 s.args = [ '/gluster/gv0', '/gluster/gv1' ]
649 # multiple privisioners with same name possible?
650 node.vm.provision "xfs_0", type: "shell" do |s|
651 s.inline = XFS_SCRIPT
652 #s.args = [ "vdb", "/export/gluster/brick1" ]
656 node.vm.provision "xfs_1", type: "shell" do |s|
657 s.inline = XFS_SCRIPT
658 #s.args = [ "vdc" , "/export/gluster/brick2" ]
662 node.vm.provision "gluster_start", type: "shell" do |s|
663 s.inline = GLUSTER_START_SCRIPT
668 node.vm.provision "gluster_probe", type: "shell" do |s|
669 s.inline = GLUSTER_PROBE_SCRIPT
670 s.args = cluster_internal_ips
674 node.vm.provision "gluster_wait_peers", type: "shell" do |s|
675 s.inline = GLUSTER_WAIT_PEERS_SCRIPT
676 s.args = [ cluster_internal_ips.length ]
679 node.vm.provision "gluster_createvol_0", type: "shell" do |s|
680 mount_points = cluster_internal_ips.map do |ip|
681 "#{ip}:/export/vdb1/brick"
683 s.inline = GLUSTER_CREATEVOL_SCRIPT
684 s.args = [ "gv0", "3" ] + mount_points
687 node.vm.provision "gluster_mount_0", type: "shell" do |s|
688 s.inline = GLUSTER_MOUNT_SCRIPT
689 s.args = [ "gv0", "/gluster/gv0" ]
692 node.vm.provision "gluster_createvol_1", type: "shell" do |s|
693 mount_points = cluster_internal_ips.map do |ip|
694 "#{ip}:/export/vdc1/brick"
696 s.inline = GLUSTER_CREATEVOL_SCRIPT
697 s.args = [ "gv1", "3" ] + mount_points
700 node.vm.provision "gluster_mount_1", type: "shell" do |s|
701 s.inline = GLUSTER_MOUNT_SCRIPT
702 s.args = [ "gv1", "/gluster/gv1" ]
706 # ctdb / samba config
709 node.vm.provision "ctdb_stop", type: "shell" do |s|
710 s.inline = CTDB_STOP_SCRIPT
713 node.vm.provision "ctdb_create_nodes", type: "shell" do |s|
714 s.inline = CTDB_CREATE_NODES_SCRIPT
715 s.args = cluster_internal_ips
718 #node.vm.provision "ctdb_create_pubaddrs", type: "shell" do |s|
719 # s.inline = CTDB_CREATE_PUBADDRS_SCRIPT
723 node.vm.provision "ctdb_create_conf", type: "shell" do |s|
724 s.inline = CTDB_CREATE_CONF_SCRIPT
727 node.vm.provision "samba_create_conf", type: "shell" do |s|
728 s.inline = SAMBA_CREATE_CONF_SCRIPT
729 s.args = [ "gv1", "/gluster/gv1" ]
732 node.vm.provision "ctdb_start", type: "shell" do |s|
733 s.inline = CTDB_START_SCRIPT