2 # vi: ft=ruby:et:ts=2:sts=2:sw=2
4 VAGRANTFILE_API_VERSION = 2
10 # Defaults for Configuration data.
11 # Will be overridden from the settings file
12 # and (possibly later) from commandline parameters.
24 network_opts = [ :type, :link, :flags, :hwaddr, :name, :ipv4, :ipv6 ]
26 libvirt_network_parms = {
39 :box => 'fedora/23-cloud-base',
43 :box => 'fedora/23-cloud-base',
51 #:hostname => 'gluno1',
53 #:box => 'local-fedora-rawhide-64',
54 #:box => 'purpleidea-fedora-21',
55 #:box => 'local-fedora-21.2',
58 :container_name => 'gluno1',
59 #:container_name => 'node1',
65 :internal_if => 'virbr1',
69 :ipv4 => '172.20.10.30',
73 # #:ipv4 => '10.111.222.201',
81 # Load the config, if it exists,
82 # possibly override with commandline args,
83 # (currently none supported yet)
84 # and then store the config.
87 projectdir = File.expand_path File.dirname(__FILE__)
88 f = File.join(projectdir, 'vagrant.yaml')
90 settings = YAML::load_file f
92 if settings[:vms].is_a?(Array)
95 puts "Loaded settings from #{f}."
98 # TODO(?): ARGV-processing
104 File.open(f, 'w') do |file|
105 file.write settings.to_yaml
107 puts "Wrote settings to #{f}."
113 defaults.keys.each do |cat|
114 next if not vm.has_key?(cat)
115 defaults[cat].keys.each do |subcat|
116 next if not vm[cat].has_key?(subcat)
117 defaults[cat][subcat].keys.each do |key|
118 if not vm[cat][subcat].has_key?(key)
119 vm[cat][subcat][key] = defaults[cat][subcat][key]
125 #if not vm[:provider][:libvirt].has_key?(:prefix)
126 # vm[:provider][:libvirt][:prefix] = default_libvirt_prefix
129 vm[:networks].each do |net|
130 net_default.keys.each do |key|
131 if not net.has_key?(key)
132 net[key] = net_default[key]
139 # compose the list of cluster internal ips
141 cluster_internal_ips = vms.map do |vm|
143 vm[:networks].each do |n|
144 if n[:link] == vm[:internal_if]
154 #print "internal ips: "
155 #print cluster_internal_ips
159 NET_FIX_ALWAYS_SCRIPT = <<SCRIPT
162 # eth1 is not brought up automatically
163 # by 'vagrant up' of the existing vm.
164 # because eth1 is not up, glusterd can
165 # not be started and gluster volumes can
166 # not be mounted. fix it all up here until
167 # we have a correctly working environment.
173 for MOUNTPT in $MOUNTPTS
175 grep -q -s "${MOUNTPT}" /etc/fstab && {
176 # already provisioned...
177 systemctl start glusterd
178 # sleep to give glusterd some time to start up
181 mount | grep -q -s "${MOUNTPT}" && {
182 echo "${MOUNTPT} is already mounted."
184 echo "Mounting ${MOUNTPT}."
190 # not provisioned yet
191 echo "${MOUNTPT} not set up yet. Not mounting."
197 NET_FIX_INITIAL_SCRIPT = <<SCRIPT
199 # Fix dhclient running on private network IF
201 systemctl restart NetworkManager
207 SAMBA_CREATE_CONF_SCRIPT = <<SCRIPT
210 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
216 mkdir -p ${GLUSTER_VOL_MOUNT}/share1
217 chmod -R 0777 ${GLUSTER_VOL_MOUNT}/share1
219 mkdir -p ${GLUSTER_VOL_MOUNT}/share2
220 chmod -R 0777 ${GLUSTER_VOL_MOUNT}/share2
222 FILE=/etc/samba/smb.conf
223 test -f ${FILE} || touch ${FILE}
224 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
229 netbios name = sambacluster
238 vfs objects = acl_xattr glusterfs
239 glusterfs:volume = ${GLUSTER_VOL}
240 kernel share modes = no
244 path = ${GLUSTER_VOL_MOUNT}/share2
245 vfs objects = acl_xattr
252 # disks: hard-coded for all nodes for now:
253 # TODO: make (some of) these configurable ...
266 driveletters = ('b'..'z').to_a
268 #brick_mount_prefix = "/export"
269 brick_mount_prefix = "/bricks"
270 brick_path_suffix = "brick"
271 gluster_volume_prefix = "gv"
272 gluster_mount_prefix = "/gluster"
274 disks.each_with_index do |disk,disk_num|
275 disk[:number] = disk_num
276 disk[:volume_name] = "#{gluster_volume_prefix}#{disk[:number]}"
277 disk[:volume_mount_point] = "#{gluster_mount_prefix}/#{disk[:volume_name]}"
279 :libvirt => "vd#{driveletters[disk[:number]]}",
280 :virtualbox => "sd#{driveletters[disk[:number]]}",
282 disk[:dev_name] = "sd#{driveletters[disk[:number]]}"
283 disk[:brick_name] = "brick0"
284 disk[:label] = "#{disk[:volume_name]}-#{disk[:brick_name]}"
285 disk[:brick_mount_point] = "#{brick_mount_prefix}/#{disk[:label]}"
286 disk[:brick_path] = "#{disk[:brick_mount_point]}/#{brick_path_suffix}"
289 # /dev/{sv}db --> xfs filesys (on /dev/{sv}db1)
290 # --> mount unter /bricks/gv0
291 # --> dir /bricks/gv0/brick --> dir for gluster createvol gv0
292 # --> gluster/fuse mount /gluster/gv0
296 :provider => :libvirt,
300 # The vagrant machine definitions
303 Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
305 config.vm.synced_folder ".", "/vagrant", disabled: true
306 #config.vm.synced_folder './', '/vagrant', type: '9p', disabled: false, accessmode: "squash", owner: "vagrant"
308 #if Vagrant.has_plugin?("vagrant-cachier")
309 # config.cache.scope = :machine
310 # #config.cache.scope = :box
312 # config.cache.synced_folder_opts = {
314 # # The nolock option can be useful for an NFSv3 client that wants to avoid the
315 # # NLM sideband protocol. Without this option, apt-get might hang if it tries
316 # # to lock files needed for /var/cache/* operations. All of this can be avoided
317 # # by using NFSv4 everywhere. Please note that the tcp option is not the default.
318 # #mount_options: ['rw', 'vers=3', 'tcp', 'nolock']
322 # just let one node do the probing
325 vms.each_with_index do |machine,machine_num|
326 config.vm.define machine[:hostname] do |node|
327 node.vm.box = machine[:provider][:libvirt][:box]
328 node.vm.hostname = machine[:hostname]
330 print "machine #{machine_num}: #{machine[:hostname]}\n"
332 node.vm.provider :libvirt do |lv|
333 lv.default_prefix = machine[:provider][:libvirt][:prefix]
337 node.vm.provider :virtualbox do |vb|
342 node.vm.provider :libvirt do |lv|
343 print " [libvirt] attaching disk ##{disk[:number]}: #{disk[:dev_name]}\n"
344 lv.storage :file, :size => "#{disk[:size]}G", :device => "#{disk[:dev_names][:libvirt]}"
345 #lv.storage :file, :size => "#{disk[:size]}G", :bus => "sata" , :device => "#{disk[:dev_name]}"
347 node.vm.provider :virtualbox do |vb|
348 disk_size = disk[:size]*1024
349 #disk_file = "disk-#{machine_num}-#{disk[:dev_names][:virtualbox]}.vdi"
350 #print " [virtualbox] disk ##{disk[:number]}: #{disk[:dev_names][:virtualbox]}\n"
351 disk_file = "disk-#{machine_num}-#{disk[:dev_name]}.vdi"
352 print " [virtualbox] attaching disk ##{disk[:number]}: #{disk[:dev_name]}\n"
353 vb.customize [ "createhd", "--filename", disk_file, "--size", disk_size ]
354 vb.customize [ "storageattach", :id, "--storagectl", "SATA Controller", "--port", 3+disk[:number], "--device", 0, "--type", "hdd", "--medium", disk_file ]
358 machine[:networks].each do |net|
359 if not net[:ipv4] == ''
360 node.vm.network :private_network, :ip => net[:ipv4]
364 node.vm.provision "selinux", type: "shell" do |s|
365 s.path = "provision/shell/sys/selinux-off.sh"
368 # There is some problem with the fedora base box:
369 # Upon first boot, ifdown eth1 fails and the dhclient
370 # keep being active. Simply bringing down and up again
371 # the interface is not sufficient. We need to restart
372 # NetworkManager in order to teach it to not feel
373 # responsible for the interface any more.
374 ###node.vm.provision "net_fix_initial", type: "shell" do |s|
375 ### s.inline = NET_FIX_INITIAL_SCRIPT
378 node.vm.provision "install", type: "shell" do |s|
379 s.path = "provision/shell/sys/install-yum.sh"
380 s.args = [ "xfsprogs",
384 "glusterfs-geo-replication",
388 "samba-vfs-glusterfs" ]
391 # There is some problem with the fedora base box:
392 # We need to up the interface on reboots.
393 # It does not come up automatically.
394 ###node.vm.provision "net_fix_always", type: "shell", run: "always" do |s|
395 ### s.inline = NET_FIX_ALWAYS_SCRIPT
396 ### s.args = [ '/gluster/gv0', '/gluster/gv1' ]
400 print " create_brick: size #{disk[:size]}G, label #{disk[:label]} under #{disk[:brick_mount_point]}\n"
401 node.vm.provision "create_brick_#{disk[:number]}", type: "shell" do |s|
402 s.path = "provision/shell/gluster/create-brick.v2.sh"
403 s.args = [ "#{disk[:size]}G", disk[:label], disk[:brick_mount_point], brick_path_suffix ]
408 node.vm.provision "gluster_start", type: "shell" do |s|
409 s.path = "provision/shell/gluster/gluster-start.sh"
414 node.vm.provision "gluster_probe", type: "shell" do |s|
415 s.path = "provision/shell/gluster/gluster-probe.sh"
416 s.args = cluster_internal_ips
420 node.vm.provision "gluster_wait_peers", type: "shell" do |s|
421 s.path = "provision/shell/gluster/gluster-wait-peers.sh"
422 s.args = [ cluster_internal_ips.length, 300 ]
427 brick_mount_points = cluster_internal_ips.map do |ip|
428 "#{ip}:#{disk[:brick_path]}"
431 print " brick directories: #{brick_mount_points}\n"
433 node.vm.provision "gluster_createvol_#{disk[:number]}", type: "shell" do |s|
434 s.path = "provision/shell/gluster/gluster-create-volume.sh"
435 s.args = [ disk[:volume_name], "3" ] + brick_mount_points
438 node.vm.provision "gluster_mount_#{disk[:number]}", type: "shell" do |s|
439 s.path = "provision/shell/gluster/gluster-mount-volume.sh"
440 s.args = [ disk[:volume_name], disk[:volume_mount_point] ]
445 # ctdb / samba config
448 node.vm.provision "ctdb_stop", type: "shell" do |s|
449 s.path = "provision/shell/ctdb/ctdb-stop.sh"
452 node.vm.provision "ctdb_create_nodes", type: "shell" do |s|
453 s.path = "provision/shell/ctdb/ctdb-create-nodes.sh"
454 s.args = cluster_internal_ips
457 #node.vm.provision "ctdb_create_pubaddrs", type: "shell" do |s|
458 # s.path = "provision/shell/ctdb/ctdb-create-pubaddrs.sh"
462 node.vm.provision "ctdb_create_conf", type: "shell" do |s|
463 s.path = "provision/shell/ctdb/ctdb-create-conf.sh"
464 s.args = [ "/gluster/gv0/ctdb" ]
467 node.vm.provision "samba_create_conf", type: "shell" do |s|
468 s.inline = SAMBA_CREATE_CONF_SCRIPT
469 s.args = [ "gv1", "/gluster/gv1" ]
472 node.vm.provision "ctdb_start", type: "shell" do |s|
473 s.path = "provision/shell/ctdb/ctdb-start.sh"