2 # vi: ft=ruby:et:ts=2:sts=2:sw=2
4 VAGRANTFILE_API_VERSION = 2
10 # Defaults for Configuration data.
11 # Will be overridden from the settings file
12 # and (possibly later) from commandline parameters.
24 network_opts = [ :type, :link, :flags, :hwaddr, :name, :ipv4, :ipv6 ]
26 libvirt_network_parms = {
46 #:hostname => 'gluno1',
48 #:box => 'local-fedora-rawhide-64',
49 #:box => 'purpleidea-fedora-21',
50 #:box => 'local-fedora-21.2',
53 :container_name => 'gluno1',
54 #:container_name => 'node1',
57 :box => 'local-fedora-21.2',
61 :internal_if => 'virbr1',
65 :ipv4 => '172.20.10.30',
69 # #:ipv4 => '10.111.222.201',
76 # Load the config, if it exists,
77 # possibly override with commandline args,
78 # (currently none supported yet)
79 # and then store the config.
82 projectdir = File.expand_path File.dirname(__FILE__)
83 f = File.join(projectdir, 'vagrant.yaml')
85 settings = YAML::load_file f
87 if settings[:vms].is_a?(Array)
90 puts "Loaded settings from #{f}."
93 # TODO(?): ARGV-processing
99 File.open(f, 'w') do |file|
100 file.write settings.to_yaml
102 puts "Wrote settings to #{f}."
108 defaults.keys.each do |cat|
109 next if not vm.has_key?(cat)
110 defaults[cat].keys.each do |subcat|
111 next if not vm[cat].has_key?(subcat)
112 defaults[cat][subcat].keys.each do |key|
113 if not vm[cat][subcat].has_key?(key)
114 vm[cat][subcat][key] = defaults[cat][subcat][key]
120 #if not vm[:provider][:libvirt].has_key?(:prefix)
121 # vm[:provider][:libvirt][:prefix] = default_libvirt_prefix
124 vm[:networks].each do |net|
125 net_default.keys.each do |key|
126 if not net.has_key?(key)
127 net[key] = net_default[key]
134 # compose the list of cluster internal ips
136 cluster_internal_ips = vms.map do |vm|
138 vm[:networks].each do |n|
139 if n[:link] == vm[:internal_if]
149 #print "internal ips: "
150 #print cluster_internal_ips
153 #PROVISION_SCRIPT = <<SCRIPT
154 #yum -y install make samba
158 NET_FIX_ALWAYS_SCRIPT = <<SCRIPT
161 # eth1 is not brought up automatically
162 # by 'vagrant up' of the existing vm.
163 # because eth1 is not up, glusterd can
164 # not be started and gluster volumes can
165 # not be mounted. fix it all up here until
166 # we have a correctly working environment.
172 for MOUNTPT in $MOUNTPTS
174 grep -q -s "${MOUNTPT}" /etc/fstab && {
175 # already provisioned...
176 systemctl start glusterd
177 # sleep to give glusterd some time to start up
180 mount | grep -q -s "${MOUNTPT}" && {
181 echo "${MOUNTPT} is already mounted."
183 echo "Mounting ${MOUNTPT}."
189 # not provisioned yet
190 echo "${MOUNTPT} not set up yet. Not mounting."
196 NET_FIX_INITIAL_SCRIPT = <<SCRIPT
198 # Fix dhclient running on private network IF
200 systemctl restart NetworkManager
206 GLUSTER_WAIT_PEERS_SCRIPT = <<SCRIPT
212 echo "Waiting for $NUM_NODES peers."
214 for count in $(seq 1 ${TIMEOUT})
216 PEERS=$(gluster pool list | grep -v ^UUID | wc -l)
217 [ "$PEERS" = "$NUM_NODES" ] && {
218 echo "Done waiting: $NUM_NODES peers connected."
225 echo "TIMEOUT waiting for $NUM_NODES peers."
230 GLUSTER_CREATEVOL_SCRIPT = <<SCRIPT
239 MSG="$(gluster volume status ${VOLNAME} 2>&1 1>/dev/null)"
241 [ $RET -eq 0 ] && break
242 [ "${MSG}" != "${MSG#Another transaction is in progress}" ] || break
247 echo "gluster volume ${VOLNAME} already exists and is active."
251 [ "$MSG" = "Volume ${VOLNAME} does not exist" ] && {
252 echo "Creating gluster volume ${VOLNAME}."
253 echo "cmd: gluster volume create $VOLNAME rep $REP transport tcp $@"
255 MSG=$(gluster volume create $VOLNAME rep $REP transport tcp $@ 2>&1 1>/dev/null)
257 [ $RET -eq 0 ] && break
258 [ "$MSG" = "volume create: ${VOLNAME}: failed: Volume ${VOLNAME} already exists" ] && {
262 [ "${MSG}" != "${MSG#Another transaction is in progress}" ] || break
266 echo "gluster volume create $VOLNAME failed ('$MSG')- trying to force."
269 MSG=$(gluster volume create $VOLNAME rep $REP transport tcp $@ force 2>&1 1>/dev/null)
271 [ $RET -eq 0 ] && break
272 [ "$MSG" = "volume create: ${VOLNAME}: failed: Volume ${VOLNAME} already exists" ] && {
276 [ "${MSG}" != "${MSG#Another transaction is in progress}" ] || break
281 echo "gluster volume create $VOLNAME failed with force ('$MSG')- giving up"
286 MSG="$(gluster volume status ${VOLNAME} 2>&1 1>/dev/null)"
288 [ $RET -eq 0 ] && break
289 [ "${MSG}" != "${MSG#Another transaction is in progress}" ] || break
294 echo "gluster volume ${VOLNAME} is already started."
299 [ "$MSG" = "Volume ${VOLNAME} is not started" ] && {
300 echo "starting gluster volume ${VOLNAME}."
302 MSG=$(gluster volume start ${VOLNAME} 2>&1 1> /dev/null)
304 [ $RET -eq 0 ] && break
305 [ "$MSG" = "volume start: ${VOLNAME}: failed: Volume ${VOLNAME} already started" ] && {
309 [ "${MSG}" != "${MSG#Another transaction is in progress}" ] || break
313 echo "gluster volume start ${VOLNAME} failed ('$MSG')."
317 echo "Error: 'gluster volume status ${VOLNAME}' gave '$MSG' ($RET)"
325 GLUSTER_MOUNT_SCRIPT = <<SCRIPT
333 MOUNTDEV="127.0.0.1:/${VOLNAME}"
337 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
341 grep -q -s "${MOUNTPT}" ${FILE} || {
342 test -f ${FILE} || touch ${FILE}
343 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
346 ${MOUNTDEV} ${MOUNTPT} glusterfs defaults,selinux 0 0
350 mount | grep -q -s ${MOUNTPT} && {
351 echo "${MOUNTPT} is already mounted."
353 echo "Mounting ${MOUNTPT}."
359 CTDB_CREATE_NODES_SCRIPT = <<SCRIPT
362 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
367 test -f ${FILE} || touch ${FILE}
368 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
371 for IP in ${NODES_IPS}
373 echo "$IP" >> ${FILE}
377 CTDB_CREATE_PUBADDRS_SCRIPT = <<SCRIPT
380 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
384 FILE=/etc/ctdb/public_addresses
385 test -f ${FILE} || touch ${FILE}
386 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
391 echo ${IP} >> ${FILE}
395 CTDB_CREATE_CONF_SCRIPT = <<SCRIPT
398 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
400 RECLOCKDIR=/gluster/gv0/ctdb
401 mkdir -p ${RECLOCKDIR}
402 RECLOCKFILE=${RECLOCKDIR}/reclock
404 PUBLIC_ADDRESSES_FILE=/etc/ctdb/public_addresses
405 NODES_FILE=/etc/ctdb/nodes
407 FILE=/etc/sysconfig/ctdb
408 test -f ${FILE} || touch ${FILE}
409 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
413 CTDB_NODES=${NODES_FILE}
414 #CTDB_PUBLIC_ADDRESSES=${PUBLIC_ADDRESSES_FILE}
415 CTDB_RECOVERY_LOCK=${RECLOCKFILE}
416 CTDB_MANAGES_SAMBA="yes"
417 CTDB_SAMBA_SKIP_SHARE_CHECK="yes"
418 #CTDB_MANAGES_WINBIND="yes"
422 SAMBA_CREATE_CONF_SCRIPT = <<SCRIPT
425 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
431 mkdir -p ${GLUSTER_VOL_MOUNT}/share1
432 chmod -R 0777 ${GLUSTER_VOL_MOUNT}/share1
434 mkdir -p ${GLUSTER_VOL_MOUNT}/share2
435 chmod -R 0777 ${GLUSTER_VOL_MOUNT}/share2
437 FILE=/etc/samba/smb.conf
438 test -f ${FILE} || touch ${FILE}
439 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
444 netbios name = sambacluster
453 vfs objects = acl_xattr glusterfs
454 glusterfs:volume = ${GLUSTER_VOL}
455 kernel share modes = no
459 path = ${GLUSTER_VOL_MOUNT}/share2
460 vfs objects = acl_xattr
466 # The vagrant machine definitions
469 Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
471 config.vm.synced_folder ".", "/vagrant", disabled: true
473 #if Vagrant.has_plugin?("vagrant-cachier")
474 # config.cache.scope = :machine
475 # #config.cache.scope = :box
477 # config.cache.synced_folder_opts = {
479 # # The nolock option can be useful for an NFSv3 client that wants to avoid the
480 # # NLM sideband protocol. Without this option, apt-get might hang if it tries
481 # # to lock files needed for /var/cache/* operations. All of this can be avoided
482 # # by using NFSv4 everywhere. Please note that the tcp option is not the default.
483 # #mount_options: ['rw', 'vers=3', 'tcp', 'nolock']
487 # just let one node do the probing
490 vms.each do |machine|
491 config.vm.define machine[:hostname] do |node|
492 node.vm.box = machine[:provider][:libvirt][:box]
493 node.vm.hostname = machine[:hostname]
495 node.vm.provider :libvirt do |libvirt|
496 libvirt.default_prefix = machine[:provider][:libvirt][:prefix]
497 libvirt.memory = 1024
498 libvirt.storage :file, :size => '64M', :device => 'vdb'
499 libvirt.storage :file, :size => '10G', :device => 'vdc'
501 machine[:networks].each do |net|
502 if not net[:ipv4] == ''
503 node.vm.network :private_network, :ip => net[:ipv4]
509 node.vm.provision "selinux", type: "shell" do |s|
510 s.path = "provision/shell/sys/selinux-off.sh"
513 # There is some problem with the fedora base box:
514 # Upon first boot, ifdown eth1 fails and the dhclient
515 # keep being active. Simply bringing down and up again
516 # the interface is not sufficient. We need to restart
517 # NetworkManager in order to teach it to not feel
518 # responsible for the interface any more.
519 ###node.vm.provision "net_fix_initial", type: "shell" do |s|
520 ### s.inline = NET_FIX_INITIAL_SCRIPT
523 node.vm.provision "install", type: "shell" do |s|
524 s.path = "provision/shell/sys/install-yum.sh"
525 s.args = [ "xfsprogs",
529 "glusterfs-geo-replication",
533 "samba-vfs-glusterfs" ]
536 # There is some problem with the fedora base box:
537 # We need to up the interface on reboots.
538 # It does not come up automatically.
539 ###node.vm.provision "net_fix_always", type: "shell", run: "always" do |s|
540 ### s.inline = NET_FIX_ALWAYS_SCRIPT
541 ### s.args = [ '/gluster/gv0', '/gluster/gv1' ]
544 # multiple privisioners with same name possible?
545 node.vm.provision "xfs_0", type: "shell" do |s|
546 s.path = "provision/shell/gluster/create-brick.sh"
547 s.args = [ "vdb", "/export" ]
550 node.vm.provision "xfs_1", type: "shell" do |s|
551 s.path = "provision/shell/gluster/create-brick.sh"
552 s.args = [ "vdc", "/export" ]
555 node.vm.provision "gluster_start", type: "shell" do |s|
556 s.path = "provision/shell/gluster/gluster-start.sh"
561 node.vm.provision "gluster_probe", type: "shell" do |s|
562 s.path = "provision/shell/gluster/gluster-probe.sh"
563 s.args = cluster_internal_ips
568 node.vm.provision "gluster_wait_peers", type: "shell" do |s|
569 s.inline = GLUSTER_WAIT_PEERS_SCRIPT
570 s.args = [ cluster_internal_ips.length, 300]
573 node.vm.provision "gluster_createvol_0", type: "shell" do |s|
574 mount_points = cluster_internal_ips.map do |ip|
575 "#{ip}:/export/vdb1/brick"
577 s.inline = GLUSTER_CREATEVOL_SCRIPT
578 s.args = [ "gv0", "3" ] + mount_points
581 node.vm.provision "gluster_mount_0", type: "shell" do |s|
582 s.inline = GLUSTER_MOUNT_SCRIPT
583 s.args = [ "gv0", "/gluster/gv0" ]
586 node.vm.provision "gluster_createvol_1", type: "shell" do |s|
587 mount_points = cluster_internal_ips.map do |ip|
588 "#{ip}:/export/vdc1/brick"
590 s.inline = GLUSTER_CREATEVOL_SCRIPT
591 s.args = [ "gv1", "3" ] + mount_points
594 node.vm.provision "gluster_mount_1", type: "shell" do |s|
595 s.inline = GLUSTER_MOUNT_SCRIPT
596 s.args = [ "gv1", "/gluster/gv1" ]
600 # ctdb / samba config
603 node.vm.provision "ctdb_stop", type: "shell" do |s|
604 s.path = "provision/shell/ctdb/ctdb-stop.sh"
607 node.vm.provision "ctdb_create_nodes", type: "shell" do |s|
608 s.inline = CTDB_CREATE_NODES_SCRIPT
609 s.args = cluster_internal_ips
612 #node.vm.provision "ctdb_create_pubaddrs", type: "shell" do |s|
613 # s.inline = CTDB_CREATE_PUBADDRS_SCRIPT
617 node.vm.provision "ctdb_create_conf", type: "shell" do |s|
618 s.inline = CTDB_CREATE_CONF_SCRIPT
621 node.vm.provision "samba_create_conf", type: "shell" do |s|
622 s.inline = SAMBA_CREATE_CONF_SCRIPT
623 s.args = [ "gv1", "/gluster/gv1" ]
626 node.vm.provision "ctdb_start", type: "shell" do |s|
627 s.path = "provision/shell/ctdb/ctdb-start.sh"