scripts: Add autocluster-test-ctdb script master autocluster-2.0
authorMartin Schwenke <martin@meltin.net>
Fri, 26 Jun 2020 11:48:40 +0000 (21:48 +1000)
committerMartin Schwenke <martin@meltin.net>
Thu, 13 Aug 2020 00:55:08 +0000 (10:55 +1000)
samba_version.py by Amitay Isaacs <amitay@ozlabs.org>

Signed-off-by: Martin Schwenke <martin@meltin.net>
Makefile
autocluster.spec.in
test-ctdb/README.md [new file with mode: 0644]
test-ctdb/autocluster-test-ctdb.sh [new file with mode: 0755]
test-ctdb/build.yml [new file with mode: 0644]
test-ctdb/ipv6ify [new file with mode: 0755]
test-ctdb/samba_version.py [new file with mode: 0755]
test-ctdb/test.yml [new file with mode: 0644]

index 8b520d11c62045206bec670a5dd0e00351ea5ce3..c816d34e158f8f1a03896016cc2fedcb34e15f46 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -8,7 +8,7 @@ prefix  = /usr/local
 datadir        = ${prefix}/share/autocluster
 bindir = ${prefix}/bin
 
-datas  = COPYING defaults.yml ansible vagrant
+datas  = COPYING defaults.yml ansible vagrant test-ctdb
 hacks  = autocluster.hack
 genpkg  = debian/changelog autocluster.spec ChangeLog
 
@@ -22,6 +22,9 @@ install: all
        cp -a $(datas) $(DESTDIR)$(datadir)/
        mkdir -p $(DESTDIR)$(bindir)
        install -m 755 autocluster.hack $(DESTDIR)$(bindir)/autocluster
+       ln -s \
+               $(datadir)/test-ctdb/autocluster-test-ctdb.sh \
+               $(DESTDIR)$(bindir)/autocluster-test-ctdb
 
 debian/changelog: debian/changelog.in always
        sed -e "s/@@DATE@@/$$(date '+%a, %e %b %Y %T %z')/" -e "s/@@VERSION@@/$(version)/" $< > $@
index a7dc7cacec10e14e24b34adcf4b9c1e0991f1dab..c1036964c941c1b8b446e1622a4b5b5a814cc32c 100644 (file)
@@ -49,3 +49,4 @@ rm -rf $RPM_BUILD_ROOT
 %dir %{_prefix}/share/autocluster
 %{_prefix}/share/autocluster/*
 %{_bindir}/autocluster
+%{_bindir}/autocluster-test-ctdb
diff --git a/test-ctdb/README.md b/test-ctdb/README.md
new file mode 100644 (file)
index 0000000..9a4909a
--- /dev/null
@@ -0,0 +1,44 @@
+What is this?
+-------------
+
+This is a wrapper script around autocluster and some configuration
+fragments to build and test CTDB from some specified Samba git
+branches.
+
+How do I use this?
+------------------
+
+1. Create `test-repos.yml`
+
+   This should contain an autocluster repositories list needed to
+   install non-standard packages when building clusters.  This file
+   can be empty.
+
+2. Create a `git/` subdirectory (or symlink)
+
+   Populate this with any necessary remotes for the branches being
+   tested.
+
+3. Create a BRANCHES file
+
+   This contains branch names, 1 per line, comment lines start with '#'.
+
+4. Run `./test_ctdb.sh`.
+
+Branches can also be specified on the command-line.  For example:
+
+```
+./test_ctdb.sh origin/master origin/v4-12-test
+```
+
+Dependencies
+------------
+
+There should be very few dependencies, since the host machine only
+needs to be a tarball.  The most likely dependencies will relate to
+DocBook.
+
+On CentOS 7 the following were needed:
+
+* docbook-dtds
+* docbook-style-xsl
diff --git a/test-ctdb/autocluster-test-ctdb.sh b/test-ctdb/autocluster-test-ctdb.sh
new file mode 100755 (executable)
index 0000000..aaee950
--- /dev/null
@@ -0,0 +1,421 @@
+#!/bin/sh
+
+# Test CTDB in specified Samba git branches
+
+# Copyright (C) Martin Schwenke  2020
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+
+# 1. Build a tarball for each branch
+#
+# 2. Batch builds binary tarballs for each branch (if more than 1)
+#
+# 3. For each branch:
+#
+#    a. Build test cluster for each branch with 2 test nodes
+#    b. Run CTDB local tests on 1 test node
+#    c. Run CTDB cluster tests from other test node
+#
+#    (b) and (c) are run in parallel
+
+set -e
+
+scriptdir=$(dirname "$(realpath "$0")")
+basedir="$PWD"
+
+gitdir="${basedir}/git"
+testsdir="${basedir}/tests"
+logdir="${basedir}/log"
+
+mkdir -pv "$testsdir" "$logdir"
+
+build_cluster="build"
+
+test_cluster="test"
+ltest_node="${test_cluster}test1"
+ctest_node="${test_cluster}test2"
+
+test_ctdb_state=".test_ctdb"
+mkdir -p "$test_ctdb_state"
+
+####################
+
+# Update git repository in $PWD to build branch $_branch
+#
+# The repository directory must have remotes set up for desired
+# branches
+update_repo ()
+{
+       _branch="$1"
+
+       git checkout master
+       git branch -D build || true
+       git remote update -p "${_branch%%/*}" || return 1
+       git checkout -b build "$_branch" || return 1
+       git clean -fx
+}
+
+# Build tarball for given $_branch, write info to stdout
+#
+# A versioned subdirectory is created under $testsdir/.builds, with a
+# symlink named after the branch pointing to it
+build_tarball ()
+{
+       _branch="$1"
+
+       cd "$gitdir"
+
+       _flat=$(echo "$_branch" | tr '/' '-')
+       _log="${logdir}/${_flat}-update-repo.log"
+       if ! update_repo "$_branch" >"$_log" 2>&1 ; then
+               echo "ERROR: Failed to update git repo for \"${_branch}\":" >&2
+               cat "$_log" >&2
+               rm "$_log"
+               exit 1
+       fi
+       rm "$_log"
+
+       _version=$("${scriptdir}/samba_version.py")
+       _prefix="samba-${_version}"
+       _subdir=".builds/${_prefix}"
+       _dir="${testsdir}/${_subdir}"
+       mkdir -p "$_dir"
+
+       _tarball="${_prefix}.tar.gz"
+       _dest="${_dir}/${_tarball}"
+
+       # Early return if destination tarball already exists
+       if [ -r "$_dest" ] ; then
+               # Print tarball info
+               echo "${_branch} ${_dir} ${_prefix} ${_dest}"
+               return
+       fi
+
+       _log="${logdir}/${_flat}-make-dist.log"
+       if ! make dist >"$_log" 2>&1 ; then
+               echo "ERROR: Failed to create tarball for \"${_branch}\":" >&2
+               cat "$_log" >&2
+               rm "$_log"
+               exit 1
+       fi
+
+       if [ ! -r "$_tarball" ] ; then
+               echo "ERROR: Unable to read tarball \"${_tarball}\":" >&2
+               cat "$_log" >&2
+               rm "$_log"
+               exit 1
+       fi
+       rm "$_log"
+
+       mv "$_tarball" "$_dest"
+
+       # Create link for branch name
+       _t="${testsdir}/${_flat}"
+       rm -f "$_t"
+       ln -s "${_subdir}" "${_t}"
+
+       # Print tarball info
+       echo "${_branch} ${_dir} ${_prefix} ${_dest}"
+}
+
+# Create tarballs for a multiple branches, printing summary
+# information to stdout
+build_tarballs ()
+{
+       # Create tarballs
+       for _branch ; do
+               (build_tarball "$_branch") || exit $?
+       done
+}
+
+####################
+
+binary_tarball_status ()
+{
+       _tarball_info="$1"
+
+       # shellcheck disable=SC2094
+       # Nothing is writing to $_tarball_info here
+       while read -r _branch _dir _prefix _tarball ; do
+               _binary="${_dir}/${_prefix}.build/binary.tar.gz"
+               # shellcheck disable=SC2039
+               # -nt is non-portable but useful here ;-)
+               if [ ! -r "$_binary" ] ; then
+                       _str="MISSING"
+               elif [ "$_binary" -nt "$_tarball_info" ] ; then
+                       _str="OK"
+               else
+                       _str="SKIPPED"
+               fi
+               printf '  %-10s %-25s %s\n' "$_str" "$_branch" "$_prefix"
+       done <"$_tarball_info"
+}
+
+build_binaries_batch ()
+{
+       _tarball_info="$1"
+
+       # Create build-tarballs.yml
+       {
+               echo "tarballs:"
+               while read -r _branch _dir _prefix _tarball ; do
+                       echo "- ${_tarball}"
+               done <"$_tarball_info"
+       } >"${test_ctdb_state}/build-tarballs.yml"
+
+       # Build binary tarballs
+       _log="${logdir}/build.log"
+       _begin=$(date '+%s')
+       autocluster cluster "$build_cluster" build >"$_log" 2>&1
+       _status=$?
+       _end=$(date '+%s')
+       #autocluster cluster "$build_cluster" destroy >>"$_log" 2>&1
+       _seconds=$((_end - _begin))
+       _time_taken=$(TZ=UTC date --date="@${_seconds}" '+%T')
+
+       if [ $_status -ne 0 ] ; then
+               echo "BUILD FAILED after ${_time_taken}, for details see ${_log}"
+               exit 1
+       fi
+
+       # Summarise builds
+       binary_tarball_status "$_tarball_info"
+       echo
+       echo "Batch build took ${_time_taken}"
+}
+
+
+####################
+
+cluster_hacks ()
+{
+    # A hack to switch CTDB's NFS call-out to use systemd services
+    # Needed for versions <= 4.10
+    for n in t1n1 t1n2 t1n3 t1n4 ; do
+       ssh "$n" tee -a /etc/sysconfig/ctdb <<EOF
+CTDB_NFS_DISTRO_STYLE="systemd-redhat"
+export CTDB_NFS_DISTRO_STYLE
+EOF
+    done
+
+    if [ "$AUTOBUILD_USE_IPV6" = "yes" ] ; then
+           "${scriptdir}/ipv6ify" "$test_cluster"
+    fi
+}
+
+create_test_cluster ()
+{
+       _tarball="$1"
+
+       echo "tarball: ${_tarball}" >"${test_ctdb_state}/test-tarball.yml"
+
+       autocluster cluster "$test_cluster" build && \
+               cluster_hacks
+}
+
+test_failures ()
+{
+       _log="$1"
+
+       _failpat='^(\*FAILED\*|\*ERROR\*|\*TIMEDOUT\*)'
+       sed -r -n -e "s@${_failpat} .*/ctdb/tests/(.*)@      \\1 \\2@p" "$_log"
+}
+
+run_ctdb_tests ()
+{
+       _branch="$1"
+       _prefix="$2"
+       _log_dir="$3"
+       _time_start="$4"
+
+       _llog_pass="${_log_dir}/local.pass"
+       _llog_time="${_llog_pass%.pass}.timeout"
+       _llog_fail="${_llog_pass%.pass}.fail"
+       (
+               # Add -L for local daemons tests if it exists
+               _extra_opts=""
+               if ssh -n "$ltest_node" "ctdb_run_tests -h" | grep -Fq -- '  -L'
+               then
+                       _extra_opts='-L'
+               fi
+               timeout 90m ssh -n "$ltest_node" \
+                       "ctdb_run_tests -C ${_extra_opts}" >"$_llog_pass" 2>&1
+               _status=$?
+               if [ $_status -eq 124 ]; then
+                       mv "$_llog_pass" "$_llog_time"
+               elif [ $_status -ne 0 ]; then
+                       mv "$_llog_pass" "$_llog_fail"
+               fi
+       ) &
+       _lpid=$!
+
+       _clog_pass="${_log_dir}/cluster.pass"
+       _clog_time="${_clog_pass%.pass}.timeout"
+       _clog_fail="${_clog_pass%.pass}.fail"
+       (
+               timeout 180m ssh -n "$ctest_node" \
+                       "ctdb_run_cluster_tests -C" >"$_clog_pass" 2>&1
+               _status=$?
+               if [ $_status -eq 124 ]; then
+                       mv "$_clog_pass" "$_clog_time"
+               elif [ $_status -ne 0 ]; then
+                       mv "$_clog_pass" "$_clog_fail"
+               fi
+       ) &
+       _cpid=$!
+
+       wait $_lpid $_cpid
+
+       _time_end=$(date '+%s')
+       _seconds=$((_time_end - _time_start))
+       _time_taken=$(TZ=UTC date --date="@${_seconds}" '+%T')
+
+       if [ ! -r "$_llog_pass" ] || [ ! -r "$_clog_pass" ] ; then
+               _status=0
+               _status_str="FAILED"
+       else
+               _status=1
+               _status_str="PASSED"
+       fi
+       printf '  %-10s %-25s %-35s [%s]\n' \
+              "$_status_str" "$_branch" "$_prefix" "$_time_taken"
+
+       # List test failures
+       if [ -r "$_llog_time" ] ; then
+               echo "    local TIMED OUT:"
+               test_failures "$_llog_time"
+       elif [ -r "$_llog_fail" ] ; then
+               echo "    local FAILED:"
+               test_failures "$_llog_fail"
+       fi
+       if [ -r "$_clog_time" ] ; then
+               echo "    cluster TIMED OUT:"
+               test_failures "$_clog_time"
+       elif [ -r "$_clog_fail" ] ; then
+               echo "    cluster FAILED:"
+               test_failures "$_clog_fail"
+       fi
+
+       for _n in $(ssh -n "$ctest_node" onnode -q all hostname) ; do
+               scp -q "${_n}:/var/log/messages" "${_log_dir}/${_n}.messages"
+       done
+
+       return $_status
+}
+
+###################################
+
+test_branches ()
+{
+       echo "Starting at $(date)"
+
+       if [ $# -eq 0 ] ; then
+               _branches=$(grep -Ev '^$|^#' BRANCHES)
+       else
+               _branches="$*"
+       fi
+
+       _tarball_info="${test_ctdb_state}/tarball-info"
+
+       # shellcheck disable=SC2086
+       # Intentionally split $_branches on whitespace
+       build_tarballs $_branches >"$_tarball_info"
+
+       cat <<EOF
+BUILD
+=====
+
+EOF
+       # Count the number of missing binary builds to see if a batch
+       # build is necessary.  If there is only 1 build then this can
+       # be done during creation of the test cluster with less
+       # overhead than building a standalone build node.
+       _num=0
+       while read -r _branch _dir _prefix _tarball ; do
+               _binary="${_dir}/${_prefix}.build/binary.tar.gz"
+               if [ ! -r "$_binary" ] ; then
+                       _num=$((_num + 1))
+               fi
+       done <"$_tarball_info"
+       if [ "$_num" -gt 1 ] ; then
+               build_binaries_batch "$_tarball_info"
+       else
+               binary_tarball_status "$_tarball_info"
+               echo
+               if [ "$_num" -eq 0 ] ; then
+                       echo "Skipping batch build (0 builds needed)"
+               else
+                       echo "Skipping batch build (only 1 build needed)"
+               fi
+       fi
+
+       cat <<EOF
+
+TEST
+====
+
+EOF
+
+       _status=0
+
+       while read -r _branch _dir _prefix _tarball ; do
+               _time_start=$(date '+%s')
+               _datetime=$(date --date="@${_time_start}" '+%Y%m%d-%H%M%S')
+               _log_dir="${_dir}/${_prefix}.test/${_datetime}"
+               mkdir -p "$_log_dir"
+
+               _log="${_log_dir}/create_test_cluster.log"
+               if ! create_test_cluster "$_tarball" >"$_log" 2>&1 ; then
+                       echo "Unable to create test cluster for \"${_branch}\""
+                       echo "See \"${_log}\" for details"
+                       continue
+               fi
+
+               run_ctdb_tests \
+                       "$_branch" "$_prefix" "$_log_dir" "$_time_start" || \
+                       _status=$?
+       done <"$_tarball_info"
+
+       return $_status
+}
+
+if ! type python3 >/dev/null 2>&1 ; then
+    export PYTHON=python
+fi
+
+# Usage:
+# autocluster-test-ctdb.sh                # Run on branches in BRANCHES file
+# autocluster-test-ctdb.sh <branch> ...   # Run on <branch> ...
+
+(
+       if ! flock -n 9 ; then
+               echo "$0: FAILED - another instance is running"
+               exit 1
+       fi
+
+       # First time setup.  Ensure there is a local copy of cluster
+       # configuration files - these must be in the current
+       # directory.
+       build_config="${basedir}/build.yml"
+       if [ ! -f "$build_config" ] ; then
+               cp -v "${scriptdir}/build.yml" "$build_config"
+       fi
+       test_config="${basedir}/test.yml"
+       if [ ! -f "$test_config" ] ; then
+               cp -v "${scriptdir}/test.yml" "$test_config"
+       fi
+
+       test_branches "$@"
+) 9>"${test_ctdb_state}/lock" || exit $?
diff --git a/test-ctdb/build.yml b/test-ctdb/build.yml
new file mode 100644 (file)
index 0000000..01ee673
--- /dev/null
@@ -0,0 +1,9 @@
+# https://cloud.centos.org/centos/8/x86_64/images/CentOS-8-Vagrant-8.2.2004-20200611.2.x86_64.vagrant-libvirt.box
+vagrant_box: CentOS-8.2
+
+node_list: [tbuild]
+firstip: 70
+
+cpus: 8
+
+include: .test_ctdb/build-tarballs.yml
diff --git a/test-ctdb/ipv6ify b/test-ctdb/ipv6ify
new file mode 100755 (executable)
index 0000000..085cf05
--- /dev/null
@@ -0,0 +1,252 @@
+#!/bin/sh
+
+# Update a virtual cluster built by autocluster, with IPv6 addresses
+
+# Copyright (C) Martin Schwenke  2019, 2020
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+
+# Add IPv6 addresses on interfaces eth1, eth2, eth3.  The IPv6
+# addresses are calculated from the IPv4 addresses.  The CTDB nodes
+# and public_addresses files are updated to contain only the IPv6
+# addresses.
+
+# This nasty hack is requird because vagrant-libvirt does not properly
+# support IPv6 addresses.
+
+set -eu
+
+if [ $# -ne 1 ] ; then
+       echo "usage: ${0} <cluster>"
+       exit 1
+fi
+
+cluster="$1"
+
+hosts=".autocluster/${cluster}/hosts"
+if [ ! -r "$hosts" ] ; then
+       echo "error: no file \"${hosts}\""
+       exit 1
+fi
+
+nodes=".autocluster/${cluster}/nodes"
+: >"$nodes"
+
+public_addresses=".autocluster/${cluster}/public_addresses"
+: >"$public_addresses"
+
+hosts_ipv6=".autocluster/${cluster}/hosts.ipv6"
+echo '# autocluster m1' >"$hosts_ipv6"
+
+maybe_add_ip ()
+{
+       _host="$1"
+       _addr="$2"
+       _iface="$3"
+
+       printf '  %s@%s: ' "$_addr" "$_iface"
+       _t=$(ssh -n "$_host" ip -o addr show to "${_addr%/*}" | \
+                   awk '{print $2}')
+       if [ -z "$_t" ] ; then
+               ssh -n "$_host" ip addr add "$_addr" dev "$_iface"
+               echo '+'
+       else
+               echo '.'
+       fi
+}
+
+echo "Confirming IPv6 addresses:"
+sort -n "$hosts" |
+while read -r ipv4 fqdn host ; do
+       case "$ipv4" in
+       \#*) continue ;;
+       esac
+
+       ipv4_rest="$ipv4"
+       a="${ipv4_rest%%.*}"
+       ipv4_rest="${ipv4_rest#*.}"
+       b="${ipv4_rest%%.*}"
+       ipv4_rest="${ipv4_rest#*.}"
+       c="${ipv4_rest%%.*}"
+       ipv4_rest="${ipv4_rest#*.}"
+       d="${ipv4_rest%%.*}"
+
+       echo "${host}:"
+
+       ssh -n "$host" sysctl -q net.ipv6.conf.all.disable_ipv6=0
+
+       #
+       # eth1
+       #
+
+       eth1_ipv6=$(printf 'fd00:%x:%x:%x::%x/64' "$a" "$b" "$c" "$d")
+       case "$host" in
+       ${cluster}n[0-9]*)
+               echo "${eth1_ipv6%/*}" >>"$nodes"
+               ;;
+       esac
+
+       echo "${eth1_ipv6%/*} ${fqdn} ${host}" >>"$hosts_ipv6"
+
+       maybe_add_ip "$host" "$eth1_ipv6" "eth1"
+
+       #
+       # eth2
+       #
+
+       eth2_c=$((c + 1))
+       eth2_ipv6=$(printf 'fd00:%x:%x:%x::%x/64' "$a" "$b" "$eth2_c" "$d")
+       case "$host" in
+       ${cluster}n[0-9]*)
+               p_d=$((d + 100))
+               p=$(printf 'fd00:%x:%x:%x::%x/64' "$a" "$b" "$eth2_c" "$p_d")
+               printf '%s\t%s\n' "$p" "eth2" >>"$public_addresses"
+               ;;
+       esac
+
+       maybe_add_ip "$host" "$eth2_ipv6" "eth2"
+
+       #
+       # eth3
+       #
+
+       eth3_c=$((c + 2))
+       eth3_ipv6=$(printf 'fd00:%x:%x:%x::%x/64' "$a" "$b" "$eth3_c" "$d")
+       case "$host" in
+       ${cluster}n[0-9]*)
+               p_d=$((d + 100))
+               p=$(printf 'fd00:%x:%x:%x::%x/64' "$a" "$b" "$eth3_c" "$p_d")
+               printf '%s\t%s\n' "$p" "eth3" >>"$public_addresses"
+               ;;
+       esac
+
+       maybe_add_ip "$host" "$eth3_ipv6" "eth3"
+done
+
+echo
+echo "${nodes}:"
+cat "$nodes"
+
+echo
+echo "${public_addresses}:"
+cat "$public_addresses"
+
+echo
+echo "${hosts_ipv6}:"
+cat "$hosts_ipv6"
+
+echo
+echo "Stopping ctdb:"
+sort -n "$hosts" |
+while read -r ipv4 _ host ; do
+       case "$ipv4" in
+       \#*) continue ;;
+       esac
+
+       case "$host" in
+       ${cluster}n[0-9]*)
+               echo "${host}"
+               ssh -n "$host" systemctl stop ctdb
+               ;;
+       esac
+done
+
+echo
+echo "Determining CTDB configuration directory"
+conf=".autocluster/${cluster}/config.yml"
+if grep -Fxq 'tarball: null' "$conf" ; then
+       # Default
+       ctdb_config_dir="/etc/ctdb"
+else
+       prefix=$(sed -n -e 's|^tarball_install_prefix: *||p' "$conf")
+       ctdb_config_dir="${prefix}/etc/ctdb"
+fi
+echo "$ctdb_config_dir"
+
+echo
+echo "Copying configuration files:"
+sort -n "$hosts" |
+while read -r ipv4 _ host ; do
+       case "$ipv4" in
+       \#*) continue ;;
+       esac
+
+       case "$host" in
+       ${cluster}n[0-9]*|${cluster}test[0-9]*)
+               dst="${host}:${ctdb_config_dir}/nodes"
+               echo "$dst"
+               scp -q "$nodes" "$dst"
+               ;;
+       esac
+
+       case "$host" in
+       ${cluster}n[0-9]*)
+               dst="${host}:${ctdb_config_dir}/public_addresses"
+               echo "$dst"
+               scp -q "$public_addresses" "$dst"
+               ;;
+       esac
+done
+
+echo
+echo "Starting ctdb:"
+sort -n "$hosts" |
+while read -r ipv4 _ host ; do
+       case "$ipv4" in
+       \#*) continue ;;
+       esac
+
+       case "$host" in
+       ${cluster}n[0-9]*)
+               echo "${host}"
+               ssh -n "$host" systemctl start ctdb
+               ;;
+       esac
+done
+
+echo
+echo "Waiting until healthy:"
+sort -n "$hosts" |
+while read -r ipv4 _ host ; do
+       case "$ipv4" in
+       \#*) continue ;;
+       esac
+
+       if [ "$host" != "${cluster}n1" ] ; then
+               continue
+       fi
+
+       timeout=120
+       printf '|<%d|' $timeout
+       for n in $(seq 1 $timeout) ; do
+               if ssh -n "$host" ctdb nodestatus all >/dev/null ; then
+                       break
+               fi
+
+               printf '.'
+               sleep 1
+       done
+
+       out=$(ssh -n "$host" ctdb nodestatus all 2>&1)
+       # shellcheck disable=SC2181
+       if [ $? -eq 0 ] ; then
+               printf '|%d|\n' "$n"
+       else
+               printf 'TIMEOUT!\n'
+       fi
+
+       echo
+       echo "$out"
+done
diff --git a/test-ctdb/samba_version.py b/test-ctdb/samba_version.py
new file mode 100755 (executable)
index 0000000..6b38c4a
--- /dev/null
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+
+'''Print Samba version when run from top level of Samba git repository'''
+
+import sys
+sys.path.insert(0, './buildtools/wafsamba')
+sys.path.insert(0, './third_party/waf')
+import samba_version
+import samba_waf18
+
+
+class ENV:
+    def __init__(self):
+        self.env = {}
+        self.env['GIT'] = '/usr/bin/git'
+        self.GIT_LOCAL_CHANGES = None
+
+    def get_flat(self, var):
+        return self.env[var]
+
+    def __iter__(self):
+        return iter(self.env)
+
+
+env = ENV()
+v = samba_version.samba_version_file('VERSION', '.', env)
+print(v.STRING)
diff --git a/test-ctdb/test.yml b/test-ctdb/test.yml
new file mode 100644 (file)
index 0000000..d04fd75
--- /dev/null
@@ -0,0 +1,16 @@
+# https://cloud.centos.org/centos/8/x86_64/images/CentOS-8-Vagrant-8.2.2004-20200611.2.x86_64.vagrant-libvirt.box
+vagrant_box: CentOS-8.2
+
+node_list: [tbuild, ad, nas, nas, nas, nas, test, test]
+firstip: 71
+
+resolv_conf:
+  domain: sambadom.example.local
+samba:
+  workgroup: SAMBADOM
+
+include:
+  # Location of the source tarball
+  - .test_ctdb/test-tarball.yml
+  # Any additional repositories - e.g. for cluster filesystem
+  - test-repos.yml