:provider => {
:libvirt => {
:prefix => 'vagrant',
+ :box => 'fedora/23-cloud-base',
+ },
+ :virtualbox => {
+ :prefix => 'vagrant',
+ :box => 'fedora/23-cloud-base',
},
},
}
#:container_name => 'node1',
},
:libvirt => {
- :box => 'local-fedora-21.2',
:prefix => 'gluster',
},
},
},
]
+
#
# Load the config, if it exists,
# possibly override with commandline args,
#print cluster_internal_ips
#print "\n"
-#PROVISION_SCRIPT = <<SCRIPT
-#yum -y install make samba
-#SCRIPT
NET_FIX_ALWAYS_SCRIPT = <<SCRIPT
set -e
+
# eth1 is not brought up automatically
-# by 'vagrant up' of the existing vm
+# by 'vagrant up' of the existing vm.
+# because eth1 is not up, glusterd can
+# not be started and gluster volumes can
+# not be mounted. fix it all up here until
+# we have a correctly working environment.
+ifdown eth1
ifup eth1
+
+MOUNTPTS="$@"
+
+for MOUNTPT in $MOUNTPTS
+do
+ grep -q -s "${MOUNTPT}" /etc/fstab && {
+ # already provisioned...
+ systemctl start glusterd
+ # sleep to give glusterd some time to start up
+ sleep 2
+
+ mount | grep -q -s "${MOUNTPT}" && {
+ echo "${MOUNTPT} is already mounted."
+ } || {
+ echo "Mounting ${MOUNTPT}."
+ mount ${MOUNTPT}
+ }
+
+ systemctl start ctdb
+ } || {
+ # not provisioned yet
+ echo "${MOUNTPT} not set up yet. Not mounting."
+ }
+done
+
SCRIPT
NET_FIX_INITIAL_SCRIPT = <<SCRIPT
# Fix dhclient running on private network IF
ifdown eth1
systemctl restart NetworkManager
+ifdown eth1
ifup eth1
SCRIPT
-INSTALL_SCRIPT = <<SCRIPT
-set -e
-yum -y install xfsprogs
-yum -y install glusterfs{,-server,-fuse,-geo-replication}
-yum -y install ctdb samba
-SCRIPT
-XFS_SCRIPT = <<SCRIPT
+SAMBA_CREATE_CONF_SCRIPT = <<SCRIPT
set -e
-DEVICE=$1
-PARTDEV=${DEVICE}1
-DISKDEV="/dev/${DEVICE}"
-DISKPARTDEV="/dev/${PARTDEV}"
-##MOUNTP=$2
-MOUNTP=/export/${PARTDEV}
-BRICKD=${MOUNTP}/brick
-
BACKUP_SUFFIX=".orig.$(date +%Y%m%d-%H%M%S)"
-parted -s ${DISKDEV} mklabel msdos
-parted -s ${DISKDEV} mkpart primary 1 100%
-mkfs.xfs -f ${DISKPARTDEV}
+GLUSTER_VOL=$1
+
+GLUSTER_VOL_MOUNT=$2
-mkdir -p ${MOUNTP}
+mkdir -p ${GLUSTER_VOL_MOUNT}/share1
+chmod -R 0777 ${GLUSTER_VOL_MOUNT}/share1
-FILE=/etc/fstab
+mkdir -p ${GLUSTER_VOL_MOUNT}/share2
+chmod -R 0777 ${GLUSTER_VOL_MOUNT}/share2
+
+FILE=/etc/samba/smb.conf
test -f ${FILE} || touch ${FILE}
cp -f -a ${FILE} ${FILE}${BACKUP_SUFFIX}
+echo -n > ${FILE}
cat <<EOF >> ${FILE}
-${DISKPARTDEV} ${MOUNTP} xfs defaults 0 0
+[global]
+ netbios name = sambacluster
+ workgroup = vagrant
+ security = user
+
+ clustering = yes
+ #include = registry
+
+[share1]
+ path = /share1
+ vfs objects = acl_xattr glusterfs
+ glusterfs:volume = ${GLUSTER_VOL}
+ kernel share modes = no
+ read only = no
+
+[share2]
+ path = ${GLUSTER_VOL_MOUNT}/share2
+ vfs objects = acl_xattr
+ read only = no
EOF
-
-mount ${MOUNTP}
-
-mkdir ${BRICKD}
SCRIPT
-GLUSTER_START_SCRIPT = <<SCRIPT
-set -e
-systemctl start glusterd.service
-SCRIPT
-#GLUSTER_PROBE_SCRIPT = <<SCRIPT
-#set -e
#
-#PEER_IP=$1
+# disks: hard-coded for all nodes for now:
+# TODO: make (some of) these configurable ...
#
-#gluster peer probe ${PEER_IP}
-#SCRIPT
-
-GLUSTER_PROBE_SCRIPT = <<SCRIPT
-set -e
-
-PEER_IPS="$@"
+disks = [
+ {
+ :size => 1, # in GB
+ #:volname => "gv0",
+ },
+ {
+ :size => 10,
+ #:volname => "gv1",
+ },
+]
-for PEER_IP in ${PEER_IPS}
-do
- gluster peer probe ${PEER_IP}
-done
-SCRIPT
+driveletters = ('b'..'z').to_a
+
+#brick_mount_prefix = "/export"
+brick_mount_prefix = "/bricks"
+brick_path_suffix = "brick"
+gluster_volume_prefix = "gv"
+gluster_mount_prefix = "/gluster"
+
+disks.each_with_index do |disk,disk_num|
+ disk[:number] = disk_num
+ disk[:volume_name] = "#{gluster_volume_prefix}#{disk[:number]}"
+ disk[:volume_mount_point] = "#{gluster_mount_prefix}/#{disk[:volume_name]}"
+ disk[:dev_names] = {
+ :libvirt => "vd#{driveletters[disk[:number]]}",
+ :virtualbox => "sd#{driveletters[disk[:number]]}",
+ }
+ disk[:dev_name] = "sd#{driveletters[disk[:number]]}"
+ disk[:brick_name] = "brick0"
+ disk[:label] = "#{disk[:volume_name]}-#{disk[:brick_name]}"
+ disk[:brick_mount_point] = "#{brick_mount_prefix}/#{disk[:label]}"
+ disk[:brick_path] = "#{disk[:brick_mount_point]}/#{brick_path_suffix}"
+end
-GLUSTER_CREATEVOL_SCRIPT = <<SCRIPT
-set -e
-VOLNAME=$1
-shift
-REP=$1
-shift
+# /dev/{sv}db --> xfs filesys (on /dev/{sv}db1)
+# --> mount unter /bricks/gv0
+# --> dir /bricks/gv0/brick --> dir for gluster createvol gv0
+# --> gluster/fuse mount /gluster/gv0
-echo "gluster volume create $VOLNAME rep $REP transport tcp $@"
-gluster volume create $VOLNAME rep $REP transport tcp $@
-gluster volume start $VOLNAME
-SCRIPT
+my_config = {
+ :provider => :libvirt,
+}
#
# The vagrant machine definitions
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- if Vagrant.has_plugin?("vagrant-cachier")
- config.cache.scope = :box
- end
+ config.vm.synced_folder ".", "/vagrant", disabled: true
+ #config.vm.synced_folder './', '/vagrant', type: '9p', disabled: false, accessmode: "squash", owner: "vagrant"
+
+ #if Vagrant.has_plugin?("vagrant-cachier")
+ # config.cache.scope = :machine
+ # #config.cache.scope = :box
+
+ # config.cache.synced_folder_opts = {
+ # type: :nfs,
+ # # The nolock option can be useful for an NFSv3 client that wants to avoid the
+ # # NLM sideband protocol. Without this option, apt-get might hang if it tries
+ # # to lock files needed for /var/cache/* operations. All of this can be avoided
+ # # by using NFSv4 everywhere. Please note that the tcp option is not the default.
+ # #mount_options: ['rw', 'vers=3', 'tcp', 'nolock']
+ # }
+ #end
- vms.each do |machine|
+ # just let one node do the probing
+ probing = false
+
+ vms.each_with_index do |machine,machine_num|
config.vm.define machine[:hostname] do |node|
node.vm.box = machine[:provider][:libvirt][:box]
node.vm.hostname = machine[:hostname]
- node.vm.provider :libvirt do |libvirt|
- libvirt.default_prefix = machine[:provider][:libvirt][:prefix]
- libvirt.memory = 1024
- libvirt.storage :file, :size => '64M', :device => 'vdb'
- libvirt.storage :file, :size => '10G', :device => 'vdc'
+ print "machine #{machine_num}: #{machine[:hostname]}\n"
+
+ node.vm.provider :libvirt do |lv|
+ lv.default_prefix = machine[:provider][:libvirt][:prefix]
+ lv.memory = 1024
+ end
+
+ node.vm.provider :virtualbox do |vb|
+ vb.memory = 1024
+ end
- machine[:networks].each do |net|
- if not net[:ipv4] == ''
- node.vm.network :private_network, :ip => net[:ipv4]
- end
+ disks.each do |disk|
+ node.vm.provider :libvirt do |lv|
+ print " [libvirt] attaching disk ##{disk[:number]}: #{disk[:dev_name]}\n"
+ lv.storage :file, :size => "#{disk[:size]}G", :device => "#{disk[:dev_names][:libvirt]}"
+ #lv.storage :file, :size => "#{disk[:size]}G", :bus => "sata" , :device => "#{disk[:dev_name]}"
+ end
+ node.vm.provider :virtualbox do |vb|
+ disk_size = disk[:size]*1024
+ #disk_file = "disk-#{machine_num}-#{disk[:dev_names][:virtualbox]}.vdi"
+ #print " [virtualbox] disk ##{disk[:number]}: #{disk[:dev_names][:virtualbox]}\n"
+ disk_file = "disk-#{machine_num}-#{disk[:dev_name]}.vdi"
+ print " [virtualbox] attaching disk ##{disk[:number]}: #{disk[:dev_name]}\n"
+ vb.customize [ "createhd", "--filename", disk_file, "--size", disk_size ]
+ vb.customize [ "storageattach", :id, "--storagectl", "SATA Controller", "--port", 3+disk[:number], "--device", 0, "--type", "hdd", "--medium", disk_file ]
end
end
- # There is some problem with the fedora base box:
- # We need to up the interface on reboots.
- # It does not come up automatically.
- node.vm.provision "net_fix_always", type: "shell", run: "always" do |s|
- s.inline = NET_FIX_ALWAYS_SCRIPT
+ machine[:networks].each do |net|
+ if not net[:ipv4] == ''
+ node.vm.network :private_network, :ip => net[:ipv4]
+ end
+ end
+
+ node.vm.provision "selinux", type: "shell" do |s|
+ s.path = "provision/shell/sys/selinux-off.sh"
end
# There is some problem with the fedora base box:
# the interface is not sufficient. We need to restart
# NetworkManager in order to teach it to not feel
# responsible for the interface any more.
- node.vm.provision "net_fix_initial", type: "shell" do |s|
- s.inline = NET_FIX_INITIAL_SCRIPT
+ ###node.vm.provision "net_fix_initial", type: "shell" do |s|
+ ### s.inline = NET_FIX_INITIAL_SCRIPT
+ ###end
+
+ node.vm.provision "install", type: "shell" do |s|
+ s.path = "provision/shell/sys/install-yum.sh"
+ s.args = [ "xfsprogs",
+ "glusterfs",
+ "glusterfs-server",
+ "glusterfs-fuse",
+ "glusterfs-geo-replication",
+ "ctdb",
+ "samba",
+ "samba-client",
+ "samba-vfs-glusterfs" ]
+ end
+
+ # There is some problem with the fedora base box:
+ # We need to up the interface on reboots.
+ # It does not come up automatically.
+ ###node.vm.provision "net_fix_always", type: "shell", run: "always" do |s|
+ ### s.inline = NET_FIX_ALWAYS_SCRIPT
+ ### s.args = [ '/gluster/gv0', '/gluster/gv1' ]
+ ###end
+
+ disks.each do |disk|
+ print " create_brick: size #{disk[:size]}G, label #{disk[:label]} under #{disk[:brick_mount_point]}\n"
+ node.vm.provision "create_brick_#{disk[:number]}", type: "shell" do |s|
+ s.path = "provision/shell/gluster/create-brick.v2.sh"
+ s.args = [ "#{disk[:size]}G", disk[:label], disk[:brick_mount_point], brick_path_suffix ]
+ end
end
+
- node.vm.provision :shell do |s|
- s.inline = INSTALL_SCRIPT
+ node.vm.provision "gluster_start", type: "shell" do |s|
+ s.path = "provision/shell/gluster/gluster-start.sh"
end
- # multiple privisioners with same name possible?
- node.vm.provision "xfs", type: "shell" do |s|
- s.inline = XFS_SCRIPT
- #s.args = [ "vdb", "/export/gluster/brick1" ]
- s.args = [ "vdb" ]
+ if !probing
+ probing = true
+ node.vm.provision "gluster_probe", type: "shell" do |s|
+ s.path = "provision/shell/gluster/gluster-probe.sh"
+ s.args = cluster_internal_ips
+ end
end
- node.vm.provision "xfs", type: "shell" do |s|
- s.inline = XFS_SCRIPT
- #s.args = [ "vdc" , "/export/gluster/brick2" ]
- s.args = [ "vdc" ]
+ node.vm.provision "gluster_wait_peers", type: "shell" do |s|
+ s.path = "provision/shell/gluster/gluster-wait-peers.sh"
+ s.args = [ cluster_internal_ips.length, 300 ]
end
- node.vm.provision "gluster_start", type: "shell" do |s|
- s.inline = GLUSTER_START_SCRIPT
+
+ disks.each do |disk|
+ brick_mount_points = cluster_internal_ips.map do |ip|
+ "#{ip}:#{disk[:brick_path]}"
+ end
+
+ print " brick directories: #{brick_mount_points}\n"
+
+ node.vm.provision "gluster_createvol_#{disk[:number]}", type: "shell" do |s|
+ s.path = "provision/shell/gluster/gluster-create-volume.sh"
+ s.args = [ disk[:volume_name], "3" ] + brick_mount_points
+ end
+
+ node.vm.provision "gluster_mount_#{disk[:number]}", type: "shell" do |s|
+ s.path = "provision/shell/gluster/gluster-mount-volume.sh"
+ s.args = [ disk[:volume_name], disk[:volume_mount_point] ]
+ end
end
- node.vm.provision "gluster_probe", type: "shell" do |s|
- s.inline = GLUSTER_PROBE_SCRIPT
+ #
+ # ctdb / samba config
+ #
+
+ node.vm.provision "ctdb_stop", type: "shell" do |s|
+ s.path = "provision/shell/ctdb/ctdb-stop.sh"
+ end
+
+ node.vm.provision "ctdb_create_nodes", type: "shell" do |s|
+ s.path = "provision/shell/ctdb/ctdb-create-nodes.sh"
s.args = cluster_internal_ips
end
- node.vm.provision "gluster_createvol", type: "shell" do |s|
- mount_points = cluster_internal_ips.map { |ip| "#{ip}:/export/vdb1/brick" }
- s.inline = GLUSTER_CREATEVOL_SCRIPT
- s.args = [ "gv0", "3" ] + mount_points
+ #node.vm.provision "ctdb_create_pubaddrs", type: "shell" do |s|
+ # s.path = "provision/shell/ctdb/ctdb-create-pubaddrs.sh"
+ # s.arg =
+ #end
+
+ node.vm.provision "ctdb_create_conf", type: "shell" do |s|
+ s.path = "provision/shell/ctdb/ctdb-create-conf.sh"
+ s.args = [ "/gluster/gv0/ctdb" ]
end
+
+ node.vm.provision "samba_create_conf", type: "shell" do |s|
+ s.inline = SAMBA_CREATE_CONF_SCRIPT
+ s.args = [ "gv1", "/gluster/gv1" ]
+ end
+
+ node.vm.provision "ctdb_start", type: "shell" do |s|
+ s.path = "provision/shell/ctdb/ctdb-start.sh"
+ end
+
end
end