2 # vi: ft=ruby:et:ts=2:sts=2:sw=2
4 VAGRANTFILE_API_VERSION = 2
10 # Defaults for Configuration data.
11 # Will be overridden from the settings file
12 # and (possibly later) from commandline parameters.
24 network_opts = [ :type, :link, :flags, :hwaddr, :name, :ipv4, :ipv6 ]
26 libvirt_network_parms = {
46 #:hostname => 'gluno1',
48 #:box => 'local-fedora-rawhide-64',
49 #:box => 'purpleidea-fedora-21',
50 #:box => 'local-fedora-21.2',
53 :container_name => 'gluno1',
54 #:container_name => 'node1',
57 :box => 'local-fedora-21.2',
61 :internal_if => 'virbr1',
65 :ipv4 => '172.20.10.30',
69 # #:ipv4 => '10.111.222.201',
76 # Load the config, if it exists,
77 # possibly override with commandline args,
78 # (currently none supported yet)
79 # and then store the config.
82 projectdir = File.expand_path File.dirname(__FILE__)
83 f = File.join(projectdir, 'vagrant.yaml')
85 settings = YAML::load_file f
87 if settings[:vms].is_a?(Array)
90 puts "Loaded settings from #{f}."
93 # TODO(?): ARGV-processing
99 File.open(f, 'w') do |file|
100 file.write settings.to_yaml
102 puts "Wrote settings to #{f}."
108 defaults.keys.each do |cat|
109 next if not vm.has_key?(cat)
110 defaults[cat].keys.each do |subcat|
111 next if not vm[cat].has_key?(subcat)
112 defaults[cat][subcat].keys.each do |key|
113 if not vm[cat][subcat].has_key?(key)
114 vm[cat][subcat][key] = defaults[cat][subcat][key]
120 #if not vm[:provider][:libvirt].has_key?(:prefix)
121 # vm[:provider][:libvirt][:prefix] = default_libvirt_prefix
124 vm[:networks].each do |net|
125 net_default.keys.each do |key|
126 if not net.has_key?(key)
127 net[key] = net_default[key]
134 # compose the list of cluster internal ips
136 cluster_internal_ips = vms.map do |vm|
138 vm[:networks].each do |n|
139 if n[:link] == vm[:internal_if]
149 #print "internal ips: "
150 #print cluster_internal_ips
154 NET_FIX_ALWAYS_SCRIPT = <<SCRIPT
157 # eth1 is not brought up automatically
158 # by 'vagrant up' of the existing vm.
159 # because eth1 is not up, glusterd can
160 # not be started and gluster volumes can
161 # not be mounted. fix it all up here until
162 # we have a correctly working environment.
168 for MOUNTPT in $MOUNTPTS
170 grep -q -s "${MOUNTPT}" /etc/fstab && {
171 # already provisioned...
172 systemctl start glusterd
173 # sleep to give glusterd some time to start up
176 mount | grep -q -s "${MOUNTPT}" && {
177 echo "${MOUNTPT} is already mounted."
179 echo "Mounting ${MOUNTPT}."
185 # not provisioned yet
186 echo "${MOUNTPT} not set up yet. Not mounting."
192 NET_FIX_INITIAL_SCRIPT = <<SCRIPT
194 # Fix dhclient running on private network IF
196 systemctl restart NetworkManager
202 SAMBA_CREATE_CONF_SCRIPT = <<SCRIPT
205 BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
211 mkdir -p ${GLUSTER_VOL_MOUNT}/share1
212 chmod -R 0777 ${GLUSTER_VOL_MOUNT}/share1
214 mkdir -p ${GLUSTER_VOL_MOUNT}/share2
215 chmod -R 0777 ${GLUSTER_VOL_MOUNT}/share2
217 FILE=/etc/samba/smb.conf
218 test -f ${FILE} || touch ${FILE}
219 cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
224 netbios name = sambacluster
233 vfs objects = acl_xattr glusterfs
234 glusterfs:volume = ${GLUSTER_VOL}
235 kernel share modes = no
239 path = ${GLUSTER_VOL_MOUNT}/share2
240 vfs objects = acl_xattr
246 # The vagrant machine definitions
249 Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
251 config.vm.synced_folder ".", "/vagrant", disabled: true
253 #if Vagrant.has_plugin?("vagrant-cachier")
254 # config.cache.scope = :machine
255 # #config.cache.scope = :box
257 # config.cache.synced_folder_opts = {
259 # # The nolock option can be useful for an NFSv3 client that wants to avoid the
260 # # NLM sideband protocol. Without this option, apt-get might hang if it tries
261 # # to lock files needed for /var/cache/* operations. All of this can be avoided
262 # # by using NFSv4 everywhere. Please note that the tcp option is not the default.
263 # #mount_options: ['rw', 'vers=3', 'tcp', 'nolock']
267 # just let one node do the probing
270 vms.each do |machine|
271 config.vm.define machine[:hostname] do |node|
272 node.vm.box = machine[:provider][:libvirt][:box]
273 node.vm.hostname = machine[:hostname]
275 node.vm.provider :libvirt do |libvirt|
276 libvirt.default_prefix = machine[:provider][:libvirt][:prefix]
277 libvirt.memory = 1024
278 libvirt.storage :file, :size => '64M', :device => 'vdb'
279 libvirt.storage :file, :size => '10G', :device => 'vdc'
281 machine[:networks].each do |net|
282 if not net[:ipv4] == ''
283 node.vm.network :private_network, :ip => net[:ipv4]
289 node.vm.provision "selinux", type: "shell" do |s|
290 s.path = "provision/shell/sys/selinux-off.sh"
293 # There is some problem with the fedora base box:
294 # Upon first boot, ifdown eth1 fails and the dhclient
295 # keep being active. Simply bringing down and up again
296 # the interface is not sufficient. We need to restart
297 # NetworkManager in order to teach it to not feel
298 # responsible for the interface any more.
299 ###node.vm.provision "net_fix_initial", type: "shell" do |s|
300 ### s.inline = NET_FIX_INITIAL_SCRIPT
303 node.vm.provision "install", type: "shell" do |s|
304 s.path = "provision/shell/sys/install-yum.sh"
305 s.args = [ "xfsprogs",
309 "glusterfs-geo-replication",
313 "samba-vfs-glusterfs" ]
316 # There is some problem with the fedora base box:
317 # We need to up the interface on reboots.
318 # It does not come up automatically.
319 ###node.vm.provision "net_fix_always", type: "shell", run: "always" do |s|
320 ### s.inline = NET_FIX_ALWAYS_SCRIPT
321 ### s.args = [ '/gluster/gv0', '/gluster/gv1' ]
324 # multiple privisioners with same name possible?
325 node.vm.provision "xfs_0", type: "shell" do |s|
326 s.path = "provision/shell/gluster/create-brick.sh"
327 s.args = [ "vdb", "/export" ]
330 node.vm.provision "xfs_1", type: "shell" do |s|
331 s.path = "provision/shell/gluster/create-brick.sh"
332 s.args = [ "vdc", "/export" ]
335 node.vm.provision "gluster_start", type: "shell" do |s|
336 s.path = "provision/shell/gluster/gluster-start.sh"
341 node.vm.provision "gluster_probe", type: "shell" do |s|
342 s.path = "provision/shell/gluster/gluster-probe.sh"
343 s.args = cluster_internal_ips
348 node.vm.provision "gluster_wait_peers", type: "shell" do |s|
349 s.path = "provision/shell/gluster/gluster-wait-peers.sh"
350 s.args = [ cluster_internal_ips.length, 300]
353 node.vm.provision "gluster_createvol_0", type: "shell" do |s|
354 mount_points = cluster_internal_ips.map do |ip|
355 "#{ip}:/export/vdb1/brick"
357 s.path = "provision/shell/gluster/gluster-create-volume.sh"
358 s.args = [ "gv0", "3" ] + mount_points
361 node.vm.provision "gluster_mount_0", type: "shell" do |s|
362 s.path = "provision/shell/gluster/gluster-mount-volume.sh"
363 s.args = [ "gv0", "/gluster/gv0" ]
366 node.vm.provision "gluster_createvol_1", type: "shell" do |s|
367 mount_points = cluster_internal_ips.map do |ip|
368 "#{ip}:/export/vdc1/brick"
370 s.path = "provision/shell/gluster/gluster-create-volume.sh"
371 s.args = [ "gv1", "3" ] + mount_points
374 node.vm.provision "gluster_mount_1", type: "shell" do |s|
375 s.path = "provision/shell/gluster/gluster-mount-volume.sh"
376 s.args = [ "gv1", "/gluster/gv1" ]
380 # ctdb / samba config
383 node.vm.provision "ctdb_stop", type: "shell" do |s|
384 s.path = "provision/shell/ctdb/ctdb-stop.sh"
387 node.vm.provision "ctdb_create_nodes", type: "shell" do |s|
388 s.path = "provision/shell/ctdb/ctdb-create-nodes.sh"
389 s.args = cluster_internal_ips
392 #node.vm.provision "ctdb_create_pubaddrs", type: "shell" do |s|
393 # s.path = "provision/shell/ctdb/ctdb-create-pubaddrs.sh"
397 node.vm.provision "ctdb_create_conf", type: "shell" do |s|
398 s.path = "provision/shell/ctdb/ctdb-create-conf.sh"
399 s.args = [ "/gluster/gv0/ctdb" ]
402 node.vm.provision "samba_create_conf", type: "shell" do |s|
403 s.inline = SAMBA_CREATE_CONF_SCRIPT
404 s.args = [ "gv1", "/gluster/gv1" ]
407 node.vm.provision "ctdb_start", type: "shell" do |s|
408 s.path = "provision/shell/ctdb/ctdb-start.sh"