+#!/bin/sh
+
+# Test CTDB in specified Samba git branches
+
+# Copyright (C) Martin Schwenke 2020
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+
+# 1. Build a tarball for each branch
+#
+# 2. Batch builds binary tarballs for each branch (if more than 1)
+#
+# 3. For each branch:
+#
+# a. Build test cluster for each branch with 2 test nodes
+# b. Run CTDB local tests on 1 test node
+# c. Run CTDB cluster tests from other test node
+#
+# (b) and (c) are run in parallel
+
+set -e
+
+scriptdir=$(dirname "$(realpath "$0")")
+basedir="$PWD"
+
+gitdir="${basedir}/git"
+testsdir="${basedir}/tests"
+logdir="${basedir}/log"
+
+mkdir -pv "$testsdir" "$logdir"
+
+build_cluster="build"
+
+test_cluster="test"
+ltest_node="${test_cluster}test1"
+ctest_node="${test_cluster}test2"
+
+test_ctdb_state=".test_ctdb"
+mkdir -p "$test_ctdb_state"
+
+####################
+
+# Update git repository in $PWD to build branch $_branch
+#
+# The repository directory must have remotes set up for desired
+# branches
+update_repo ()
+{
+ _branch="$1"
+
+ git checkout master
+ git branch -D build || true
+ git remote update -p "${_branch%%/*}" || return 1
+ git checkout -b build "$_branch" || return 1
+ git clean -fx
+}
+
+# Build tarball for given $_branch, write info to stdout
+#
+# A versioned subdirectory is created under $testsdir/.builds, with a
+# symlink named after the branch pointing to it
+build_tarball ()
+{
+ _branch="$1"
+
+ cd "$gitdir"
+
+ _flat=$(echo "$_branch" | tr '/' '-')
+ _log="${logdir}/${_flat}-update-repo.log"
+ if ! update_repo "$_branch" >"$_log" 2>&1 ; then
+ echo "ERROR: Failed to update git repo for \"${_branch}\":" >&2
+ cat "$_log" >&2
+ rm "$_log"
+ exit 1
+ fi
+ rm "$_log"
+
+ _version=$("${scriptdir}/samba_version.py")
+ _prefix="samba-${_version}"
+ _subdir=".builds/${_prefix}"
+ _dir="${testsdir}/${_subdir}"
+ mkdir -p "$_dir"
+
+ _tarball="${_prefix}.tar.gz"
+ _dest="${_dir}/${_tarball}"
+
+ # Early return if destination tarball already exists
+ if [ -r "$_dest" ] ; then
+ # Print tarball info
+ echo "${_branch} ${_dir} ${_prefix} ${_dest}"
+ return
+ fi
+
+ _log="${logdir}/${_flat}-make-dist.log"
+ if ! make dist >"$_log" 2>&1 ; then
+ echo "ERROR: Failed to create tarball for \"${_branch}\":" >&2
+ cat "$_log" >&2
+ rm "$_log"
+ exit 1
+ fi
+
+ if [ ! -r "$_tarball" ] ; then
+ echo "ERROR: Unable to read tarball \"${_tarball}\":" >&2
+ cat "$_log" >&2
+ rm "$_log"
+ exit 1
+ fi
+ rm "$_log"
+
+ mv "$_tarball" "$_dest"
+
+ # Create link for branch name
+ _t="${testsdir}/${_flat}"
+ rm -f "$_t"
+ ln -s "${_subdir}" "${_t}"
+
+ # Print tarball info
+ echo "${_branch} ${_dir} ${_prefix} ${_dest}"
+}
+
+# Create tarballs for a multiple branches, printing summary
+# information to stdout
+build_tarballs ()
+{
+ # Create tarballs
+ for _branch ; do
+ (build_tarball "$_branch") || exit $?
+ done
+}
+
+####################
+
+binary_tarball_status ()
+{
+ _tarball_info="$1"
+
+ # shellcheck disable=SC2094
+ # Nothing is writing to $_tarball_info here
+ while read -r _branch _dir _prefix _tarball ; do
+ _binary="${_dir}/${_prefix}.build/binary.tar.gz"
+ # shellcheck disable=SC2039
+ # -nt is non-portable but useful here ;-)
+ if [ ! -r "$_binary" ] ; then
+ _str="MISSING"
+ elif [ "$_binary" -nt "$_tarball_info" ] ; then
+ _str="OK"
+ else
+ _str="SKIPPED"
+ fi
+ printf ' %-10s %-25s %s\n' "$_str" "$_branch" "$_prefix"
+ done <"$_tarball_info"
+}
+
+build_binaries_batch ()
+{
+ _tarball_info="$1"
+
+ # Create build-tarballs.yml
+ {
+ echo "tarballs:"
+ while read -r _branch _dir _prefix _tarball ; do
+ echo "- ${_tarball}"
+ done <"$_tarball_info"
+ } >"${test_ctdb_state}/build-tarballs.yml"
+
+ # Build binary tarballs
+ _log="${logdir}/build.log"
+ _begin=$(date '+%s')
+ autocluster cluster "$build_cluster" build >"$_log" 2>&1
+ _status=$?
+ _end=$(date '+%s')
+ #autocluster cluster "$build_cluster" destroy >>"$_log" 2>&1
+ _seconds=$((_end - _begin))
+ _time_taken=$(TZ=UTC date --date="@${_seconds}" '+%T')
+
+ if [ $_status -ne 0 ] ; then
+ echo "BUILD FAILED after ${_time_taken}, for details see ${_log}"
+ exit 1
+ fi
+
+ # Summarise builds
+ binary_tarball_status "$_tarball_info"
+ echo
+ echo "Batch build took ${_time_taken}"
+}
+
+
+####################
+
+cluster_hacks ()
+{
+ # A hack to switch CTDB's NFS call-out to use systemd services
+ # Needed for versions <= 4.10
+ for n in t1n1 t1n2 t1n3 t1n4 ; do
+ ssh "$n" tee -a /etc/sysconfig/ctdb <<EOF
+CTDB_NFS_DISTRO_STYLE="systemd-redhat"
+export CTDB_NFS_DISTRO_STYLE
+EOF
+ done
+
+ if [ "$AUTOBUILD_USE_IPV6" = "yes" ] ; then
+ "${scriptdir}/ipv6ify" "$test_cluster"
+ fi
+}
+
+create_test_cluster ()
+{
+ _tarball="$1"
+
+ echo "tarball: ${_tarball}" >"${test_ctdb_state}/test-tarball.yml"
+
+ autocluster cluster "$test_cluster" build && \
+ cluster_hacks
+}
+
+test_failures ()
+{
+ _log="$1"
+
+ _failpat='^(\*FAILED\*|\*ERROR\*|\*TIMEDOUT\*)'
+ sed -r -n -e "s@${_failpat} .*/ctdb/tests/(.*)@ \\1 \\2@p" "$_log"
+}
+
+run_ctdb_tests ()
+{
+ _branch="$1"
+ _prefix="$2"
+ _log_dir="$3"
+ _time_start="$4"
+
+ _llog_pass="${_log_dir}/local.pass"
+ _llog_time="${_llog_pass%.pass}.timeout"
+ _llog_fail="${_llog_pass%.pass}.fail"
+ (
+ # Add -L for local daemons tests if it exists
+ _extra_opts=""
+ if ssh -n "$ltest_node" "ctdb_run_tests -h" | grep -Fq -- ' -L'
+ then
+ _extra_opts='-L'
+ fi
+ timeout 90m ssh -n "$ltest_node" \
+ "ctdb_run_tests -C ${_extra_opts}" >"$_llog_pass" 2>&1
+ _status=$?
+ if [ $_status -eq 124 ]; then
+ mv "$_llog_pass" "$_llog_time"
+ elif [ $_status -ne 0 ]; then
+ mv "$_llog_pass" "$_llog_fail"
+ fi
+ ) &
+ _lpid=$!
+
+ _clog_pass="${_log_dir}/cluster.pass"
+ _clog_time="${_clog_pass%.pass}.timeout"
+ _clog_fail="${_clog_pass%.pass}.fail"
+ (
+ timeout 180m ssh -n "$ctest_node" \
+ "ctdb_run_cluster_tests -C" >"$_clog_pass" 2>&1
+ _status=$?
+ if [ $_status -eq 124 ]; then
+ mv "$_clog_pass" "$_clog_time"
+ elif [ $_status -ne 0 ]; then
+ mv "$_clog_pass" "$_clog_fail"
+ fi
+ ) &
+ _cpid=$!
+
+ wait $_lpid $_cpid
+
+ _time_end=$(date '+%s')
+ _seconds=$((_time_end - _time_start))
+ _time_taken=$(TZ=UTC date --date="@${_seconds}" '+%T')
+
+ if [ ! -r "$_llog_pass" ] || [ ! -r "$_clog_pass" ] ; then
+ _status=0
+ _status_str="FAILED"
+ else
+ _status=1
+ _status_str="PASSED"
+ fi
+ printf ' %-10s %-25s %-35s [%s]\n' \
+ "$_status_str" "$_branch" "$_prefix" "$_time_taken"
+
+ # List test failures
+ if [ -r "$_llog_time" ] ; then
+ echo " local TIMED OUT:"
+ test_failures "$_llog_time"
+ elif [ -r "$_llog_fail" ] ; then
+ echo " local FAILED:"
+ test_failures "$_llog_fail"
+ fi
+ if [ -r "$_clog_time" ] ; then
+ echo " cluster TIMED OUT:"
+ test_failures "$_clog_time"
+ elif [ -r "$_clog_fail" ] ; then
+ echo " cluster FAILED:"
+ test_failures "$_clog_fail"
+ fi
+
+ for _n in $(ssh -n "$ctest_node" onnode -q all hostname) ; do
+ scp -q "${_n}:/var/log/messages" "${_log_dir}/${_n}.messages"
+ done
+
+ return $_status
+}
+
+###################################
+
+test_branches ()
+{
+ echo "Starting at $(date)"
+
+ if [ $# -eq 0 ] ; then
+ _branches=$(grep -Ev '^$|^#' BRANCHES)
+ else
+ _branches="$*"
+ fi
+
+ _tarball_info="${test_ctdb_state}/tarball-info"
+
+ # shellcheck disable=SC2086
+ # Intentionally split $_branches on whitespace
+ build_tarballs $_branches >"$_tarball_info"
+
+ cat <<EOF
+BUILD
+=====
+
+EOF
+ # Count the number of missing binary builds to see if a batch
+ # build is necessary. If there is only 1 build then this can
+ # be done during creation of the test cluster with less
+ # overhead than building a standalone build node.
+ _num=0
+ while read -r _branch _dir _prefix _tarball ; do
+ _binary="${_dir}/${_prefix}.build/binary.tar.gz"
+ if [ ! -r "$_binary" ] ; then
+ _num=$((_num + 1))
+ fi
+ done <"$_tarball_info"
+ if [ "$_num" -gt 1 ] ; then
+ build_binaries_batch "$_tarball_info"
+ else
+ binary_tarball_status "$_tarball_info"
+ echo
+ if [ "$_num" -eq 0 ] ; then
+ echo "Skipping batch build (0 builds needed)"
+ else
+ echo "Skipping batch build (only 1 build needed)"
+ fi
+ fi
+
+ cat <<EOF
+
+TEST
+====
+
+EOF
+
+ _status=0
+
+ while read -r _branch _dir _prefix _tarball ; do
+ _time_start=$(date '+%s')
+ _datetime=$(date --date="@${_time_start}" '+%Y%m%d-%H%M%S')
+ _log_dir="${_dir}/${_prefix}.test/${_datetime}"
+ mkdir -p "$_log_dir"
+
+ _log="${_log_dir}/create_test_cluster.log"
+ if ! create_test_cluster "$_tarball" >"$_log" 2>&1 ; then
+ echo "Unable to create test cluster for \"${_branch}\""
+ echo "See \"${_log}\" for details"
+ continue
+ fi
+
+ run_ctdb_tests \
+ "$_branch" "$_prefix" "$_log_dir" "$_time_start" || \
+ _status=$?
+ done <"$_tarball_info"
+
+ return $_status
+}
+
+if ! type python3 >/dev/null 2>&1 ; then
+ export PYTHON=python
+fi
+
+# Usage:
+# autocluster-test-ctdb.sh # Run on branches in BRANCHES file
+# autocluster-test-ctdb.sh <branch> ... # Run on <branch> ...
+
+(
+ if ! flock -n 9 ; then
+ echo "$0: FAILED - another instance is running"
+ exit 1
+ fi
+
+ # First time setup. Ensure there is a local copy of cluster
+ # configuration files - these must be in the current
+ # directory.
+ build_config="${basedir}/build.yml"
+ if [ ! -f "$build_config" ] ; then
+ cp -v "${scriptdir}/build.yml" "$build_config"
+ fi
+ test_config="${basedir}/test.yml"
+ if [ ! -f "$test_config" ] ; then
+ cp -v "${scriptdir}/test.yml" "$test_config"
+ fi
+
+ test_branches "$@"
+) 9>"${test_ctdb_state}/lock" || exit $?