1 # Hey Emacs, this is a -*- shell-script -*- !!! :-)
9 ######################################################################
15 teststarttime=$(date '+%s')
18 echo "--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--"
19 echo "Running test $name ($(date '+%T'))"
20 echo "--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--"
25 local name="$1" ; shift
26 local status="$1" ; shift
27 # "$@" is command-line
29 local interp="SKIPPED"
30 local statstr=" (reason $*)"
31 if [ -n "$status" ] ; then
32 if [ $status -eq 0 ] ; then
38 statstr=" (status $status)"
39 testfailures=$(($testfailures+1))
43 testduration=$(($(date +%s)-$teststarttime))
45 echo "=========================================================================="
46 echo "TEST ${interp}: ${name}${statstr} (duration: ${testduration}s)"
47 echo "=========================================================================="
53 exit $(($testfailures+0))
62 [ $(($testfailures+0)) -eq 0 -a $status -ne 0 ] && testfailures=$status
63 status=$(($testfailures+0))
65 # Avoid making a test fail from this point onwards. The test is
69 echo "*** TEST COMPLETE (RC=$status), CLEANING UP..."
71 eval "$ctdb_test_exit_hook" || true
72 unset ctdb_test_exit_hook
74 if $ctdb_test_restart_scheduled || ! cluster_is_healthy ; then
78 # This could be made unconditional but then we might get
79 # duplication from the recovery in restart_ctdb. We want to
80 # leave the recovery in restart_ctdb so that future tests that
81 # might do a manual restart mid-test will benefit.
82 echo "Forcing a recovery..."
89 ctdb_test_exit_hook_add ()
91 ctdb_test_exit_hook="${ctdb_test_exit_hook}${ctdb_test_exit_hook:+ ; }$*"
96 local name="$1" ; shift
98 [ -n "$1" ] || set -- "$name"
100 ctdb_test_begin "$name"
105 ctdb_test_end "$name" "$status" "$*"
118 -h, --help show this screen.
119 -v, --version show test case version.
120 --category show the test category (ACL, CTDB, Samba ...).
121 -d, --description show test case description.
122 --summary show short test case summary.
123 -x trace test using set -x
131 [ -n "$CTDB_DIR" ] || fail "Can not determine version."
133 (cd "$CTDB_DIR" && git describe)
136 ctdb_test_cmd_options()
138 [ -n "$1" ] || return 0
141 -h|--help) ctdb_test_usage 0 ;;
142 -v|--version) ctdb_test_version ;;
143 --category) echo "CTDB" ;;
144 -d|--description) test_info ;;
145 -x) set -x ; return 0 ;;
147 echo "Error: Unknown parameter = $1"
158 scriptname=$(basename "$0")
160 ctdb_test_restart_scheduled=false
162 ctdb_test_cmd_options $@
164 trap "ctdb_test_exit" 0
167 ctdb_test_check_real_cluster ()
169 [ -n "$CTDB_TEST_REAL_CLUSTER" ] && return 0
171 echo "ERROR: This test must be run on a real/virtual cluster, not local daemons."
175 ########################################
178 try_command_on_node ()
180 local nodespec="$1" ; shift
185 while [ "${nodespec#-}" != "$nodespec" ] ; do
186 if [ "$nodespec" = "-v" ] ; then
189 onnode_opts="$nodespec"
191 nodespec="$1" ; shift
196 out=$(onnode -q $onnode_opts "$nodespec" "$cmd" 2>&1) || {
198 echo "Failed to execute \"$cmd\" on node(s) \"$nodespec\""
204 echo "Output of \"$cmd\":"
209 sanity_check_output ()
212 local regexp="$2" # Should be anchored as necessary.
217 local num_lines=$(echo "$output" | wc -l)
218 echo "There are $num_lines lines of output"
219 if [ $num_lines -lt $min_lines ] ; then
220 echo "BAD: that's less than the required number (${min_lines})"
225 local unexpected # local doesn't pass through status of command on RHS.
226 unexpected=$(echo "$output" | egrep -v "$regexp") || status=$?
228 # Note that this is reversed.
229 if [ $status -eq 0 ] ; then
230 echo "BAD: unexpected lines in output:"
231 echo "$unexpected" | cat -A
234 echo "Output lines look OK"
242 local ips="$1" # Output of "ctdb ip -n all"
244 echo "Sanity checking IPs..."
248 while read x ipp ; do
249 [ "$ipp" = "-1" ] && break
250 if [ -n "$prev" -a "$ipp" != "$prev" ] ; then
257 echo "BAD: a node was -1 or IPs are only assigned to one node"
258 echo "Are you running an old version of CTDB?"
262 select_test_node_and_ips ()
264 try_command_on_node 0 "$CTDB ip -n all | sed -e '1d'"
266 # When selecting test_node we just want a node that has public
267 # IPs. This will work and is economically semi-random. :-)
269 read x test_node <<<"$out"
273 while read ip pnn ; do
274 if [ "$pnn" = "$test_node" ] ; then
275 test_node_ips="${test_node_ips}${test_node_ips:+ }${ip}"
277 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
279 echo "Selected node ${test_node} with IPs: ${test_node_ips}."
280 test_ip="${test_node_ips%% *}"
283 #######################################
285 # Wait until either timeout expires or command succeeds. The command
286 # will be tried once per second.
289 local timeout="$1" ; shift # "$@" is the command...
291 echo -n "<${timeout}|"
293 while [ $t -gt 0 ] ; do
295 echo "|$(($timeout - $t))|"
312 for i in $(seq 1 $1) ; do
319 _cluster_is_healthy ()
321 local out x count line
323 out=$(ctdb -Y status 2>&1) || return 1
329 count=$(($count + 1))
330 [ "${line##:*:*:*1:}" != "$line" ] && return 1
332 [ $count -gt 0 ] && return $?
333 } <<<"$out" # Yay bash!
336 cluster_is_healthy ()
338 if onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
339 echo "Cluster is HEALTHY"
342 echo "Cluster is UNHEALTHY"
343 if ! ${ctdb_test_restart_scheduled:-false} ; then
346 for i in "onnode -q 0 ctdb status" "onnode -q 0 onnode all ctdb scriptstatus" ; do
355 wait_until_healthy ()
357 local timeout="${1:-120}"
359 echo "Waiting for cluster to become healthy..."
361 wait_until 120 _cluster_is_healthy
364 # This function is becoming nicely overloaded. Soon it will collapse! :-)
372 (unhealthy) bits="?:?:?:1:*" ;;
373 (healthy) bits="?:?:?:0:*" ;;
374 (disconnected) bits="1:*" ;;
375 (connected) bits="0:*" ;;
376 (banned) bits="?:1:*" ;;
377 (unbanned) bits="?:0:*" ;;
378 (disabled) bits="?:?:1:*" ;;
379 (enabled) bits="?:?:0:*" ;;
380 (stopped) bits="?:?:?:?:1:*" ;;
381 (notstopped) bits="?:?:?:?:0:*" ;;
382 (frozen) fpat='^[[:space:]]+frozen[[:space:]]+1$' ;;
383 (unfrozen) fpat='^[[:space:]]+frozen[[:space:]]+0$' ;;
384 (monon) mpat='^Monitoring mode:ACTIVE \(0\)$' ;;
385 (monoff) mpat='^Monitoring mode:DISABLED \(1\)$' ;;
387 echo "node_has_status: unknown status \"$status\""
391 if [ -n "$bits" ] ; then
394 out=$(ctdb -Y status 2>&1) || return 1
399 # This needs to be done in 2 steps to avoid false matches.
400 local line_bits="${line#:${pnn}:*:}"
401 [ "$line_bits" = "$line" ] && continue
402 [ "${line_bits#${bits}}" != "$line_bits" ] && return 0
405 } <<<"$out" # Yay bash!
406 elif [ -n "$fpat" ] ; then
407 ctdb statistics -n "$pnn" | egrep -q "$fpat"
408 elif [ -n "$mpat" ] ; then
409 ctdb getmonmode -n "$pnn" | egrep -q "$mpat"
411 echo 'node_has_status: unknown mode, neither $bits nor $fpat is set'
416 wait_until_node_has_status ()
420 local timeout="${3:-30}"
422 echo "Waiting until node $pnn has status \"$status\"..."
424 if ! onnode any $CTDB_TEST_WRAPPER wait_until $timeout node_has_status "$pnn" "$status" ; then
425 for i in "onnode -q any ctdb status" "onnode -q any onnode all ctdb scriptstatus" ; do
435 # Useful for superficially testing IP failover.
436 # IPs must be on nodes matching nodeglob.
437 ips_are_on_nodeglob ()
439 local nodeglob="$1" ; shift
444 try_command_on_node 1 ctdb ip -n all
446 while read ip pnn ; do
447 for check in $ips ; do
448 if [ "$check" = "$ip" ] ; then
453 ips="${ips/${ip}}" # Remove from list
456 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
458 ips="${ips// }" # Remove any spaces.
462 wait_until_ips_are_on_nodeglob ()
464 echo "Waiting for IPs to fail over..."
466 wait_until 60 ips_are_on_nodeglob "$@"
472 local dst_socket="$2"
476 local pat="^${proto}[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[^[:space:]]+[[:space:]]+${dst_socket//./\\.}[[:space:]]+ESTABLISHED[[:space:]]+${pid}/${prog}[[:space:]]*\$"
477 out=$(netstat -tanp |
484 wait_until_get_src_socket ()
487 local dst_socket="$2"
491 echo "Waiting for ${prog} to establish connection to ${dst_socket}..."
493 wait_until 5 get_src_socket "$@"
496 #######################################
498 # filename will be in $tcpdump_filename, pid in $tcpdump_pid
501 tcpdump_filter="$1" # global
503 echo "Running tcpdump..."
504 tcpdump_filename=$(mktemp)
505 ctdb_test_exit_hook_add "rm -f $tcpdump_filename"
507 # The only way of being sure that tcpdump is listening is to send
508 # some packets that it will see. So we use dummy pings - the -U
509 # option to tcpdump ensures that packets are flushed to the file
510 # as they are captured.
511 local dummy_addr="127.3.2.1"
512 local dummy="icmp and dst host ${dummy_addr} and icmp[icmptype] == icmp-echo"
513 tcpdump -n -p -s 0 -e -U -w $tcpdump_filename -i any "($tcpdump_filter) or ($dummy)" &
514 ctdb_test_exit_hook_add "kill $! >/dev/null 2>&1"
516 echo "Waiting for tcpdump output file to be ready..."
517 ping -q "$dummy_addr" >/dev/null 2>&1 &
518 ctdb_test_exit_hook_add "kill $! >/dev/null 2>&1"
520 tcpdump_listen_for_dummy ()
522 tcpdump -n -r $tcpdump_filename -c 1 "$dummy" >/dev/null 2>&1
525 wait_until 10 tcpdump_listen_for_dummy
528 # By default, wait for 1 matching packet.
531 local count="${1:-1}"
532 local filter="${2:-${tcpdump_filter}}"
536 local found=$(tcpdump -n -r $tcpdump_filename "$filter" 2>/dev/null | wc -l)
537 [ $found -ge $count ]
540 echo "Waiting for tcpdump to capture some packets..."
541 if ! wait_until 30 tcpdump_check ; then
544 for i in "onnode -q 0 ctdb status" "netstat -tanp" "tcpdump -n -e -r $tcpdump_filename" ; do
554 local filter="${1:-${tcpdump_filter}}"
556 tcpdump -n -r $tcpdump_filename "$filter" 2>/dev/null
559 tcptickle_sniff_start ()
564 local in="src host ${dst%:*} and tcp src port ${dst##*:} and dst host ${src%:*} and tcp dst port ${src##*:}"
565 local out="src host ${src%:*} and tcp src port ${src##*:} and dst host ${dst%:*} and tcp dst port ${dst##*:}"
566 local tickle_ack="${in} and (tcp[tcpflags] & tcp-ack != 0) and (tcp[14] == 4) and (tcp[15] == 210)" # win == 1234
567 local ack_ack="${out} and (tcp[tcpflags] & tcp-ack != 0)"
568 tcptickle_reset="${in} and tcp[tcpflags] & tcp-rst != 0"
569 local filter="(${tickle_ack}) or (${ack_ack}) or (${tcptickle_reset})"
571 tcpdump_start "$filter"
574 tcptickle_sniff_wait_show ()
576 tcpdump_wait 1 "$tcptickle_reset"
578 echo "GOOD: here are some TCP tickle packets:"
582 gratarp_sniff_start ()
584 tcpdump_start "arp host ${test_ip}"
587 gratarp_sniff_wait_show ()
591 echo "GOOD: this should be the some gratuitous ARPs:"
596 #######################################
600 echo "Attempting to politely shutdown daemons..."
601 onnode 1 ctdb shutdown -n all || true
603 echo "Sleeping for a while..."
606 if pgrep -f $CTDB_DIR/bin/ctdbd >/dev/null ; then
607 echo "Killing remaining daemons..."
608 pkill -f $CTDB_DIR/bin/ctdbd
610 if pgrep -f $CTDB_DIR/bin/ctdbd >/dev/null ; then
611 echo "Once more with feeling.."
612 pkill -9 $CTDB_DIR/bin/ctdbd
616 local var_dir=$CTDB_DIR/tests/var
617 rm -rf $var_dir/test.db
622 local num_nodes="${CTDB_TEST_NUM_DAEMONS:-2}" # default is 2 nodes
624 local var_dir=$CTDB_DIR/tests/var
626 mkdir -p $var_dir/test.db/persistent
628 local nodes=$var_dir/nodes.txt
629 local public_addresses=$var_dir/public_addresses.txt
630 local no_public_addresses=$var_dir/no_public_addresses.txt
631 rm -f $nodes $public_addresses $no_public_addresses
633 # If there are (strictly) greater than 2 nodes then we'll randomly
634 # choose a node to have no public addresses.
635 local no_public_ips=-1
636 [ $num_nodes -gt 2 ] && no_public_ips=$(($RANDOM % $num_nodes))
637 echo "$no_public_ips" >$no_public_addresses
640 for i in $(seq 1 $num_nodes) ; do
641 if [ "${CTDB_USE_IPV6}x" != "x" ]; then
643 ip addr add ::$i/128 dev lo
645 echo 127.0.0.$i >> $nodes
646 # 2 public addresses on most nodes, just to make things interesting.
647 if [ $(($i - 1)) -ne $no_public_ips ] ; then
648 echo "192.0.2.$i/24 lo" >> $public_addresses
649 echo "192.0.2.$(($i + $num_nodes))/24 lo" >> $public_addresses
658 shift # "$@" gets passed to ctdbd
660 local var_dir=$CTDB_DIR/tests/var
662 local nodes=$var_dir/nodes.txt
663 local public_addresses=$var_dir/public_addresses.txt
664 local no_public_addresses=$var_dir/no_public_addresses.txt
666 local no_public_ips=-1
667 [ -r $no_public_addresses ] && read no_public_ips <$no_public_addresses
669 if [ "$no_public_ips" = $pnn ] ; then
670 echo "Node $no_public_ips will have no public IPs."
673 local ctdb_options="--reclock=$var_dir/rec.lock --nlist $nodes --nopublicipcheck --event-script-dir=$CTDB_DIR/tests/events.d --logfile=$var_dir/daemons.log -d 0 --dbdir=$var_dir/test.db --dbdir-persistent=$var_dir/test.db/persistent"
675 if [ $(id -u) -eq 0 ]; then
676 ctdb_options="$ctdb_options --public-interface=lo"
679 if [ $pnn -eq $no_public_ips ] ; then
680 ctdb_options="$ctdb_options --public-addresses=/dev/null"
682 ctdb_options="$ctdb_options --public-addresses=$public_addresses"
685 # Need full path so we can use "pkill -f" to kill the daemons.
686 $VALGRIND $CTDB_DIR/bin/ctdbd --socket=$var_dir/sock.$pnn $ctdb_options "$@" ||return 1
691 # "$@" gets passed to ctdbd
693 local num_nodes="${CTDB_TEST_NUM_DAEMONS:-2}" # default is 2 nodes
695 echo "Starting $num_nodes ctdb daemons..."
697 for i in $(seq 0 $(($num_nodes - 1))) ; do
698 daemons_start_1 $i "$@"
701 local var_dir=$CTDB_DIR/tests/var
703 if [ -L /tmp/ctdb.socket -o ! -S /tmp/ctdb.socket ] ; then
704 ln -sf $var_dir/sock.0 /tmp/ctdb.socket || return 1
708 #######################################
710 _ctdb_hack_options ()
712 local ctdb_options="$*"
714 # We really just want to pass CTDB_OPTIONS but on RH
715 # /etc/sysconfig/ctdb can, and frequently does, set that variable.
716 # So instead, we hack badly. We'll add these as we use them.
717 # Note that these may still be overridden by the above file... but
718 # we tend to use the exotic options here... so that is unlikely.
720 case "$ctdb_options" in
721 *--start-as-stopped*)
722 export CTDB_START_AS_STOPPED="yes"
728 _ctdb_hack_options "$@"
730 if [ -e /etc/redhat-release ] ; then
733 /etc/init.d/ctdb restart
739 _ctdb_hack_options "$@"
741 /etc/init.d/ctdb start
746 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
751 # Common things to do after starting one or more nodes.
754 onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || return 1
756 echo "Setting RerecoveryTimeout to 1"
757 onnode -pq all "ctdb setvar RerecoveryTimeout 1"
759 # In recent versions of CTDB, forcing a recovery like this blocks
760 # until the recovery is complete. Hopefully this will help the
761 # cluster to stabilise before a subsequent test.
762 echo "Forcing a recovery..."
763 onnode -q 0 ctdb recover
765 echo "Forcing a recovery..."
766 onnode -q 0 ctdb recover
771 # This assumes that ctdbd is not running on the given node.
775 shift # "$@" is passed to ctdbd start.
777 echo -n "Starting CTDB on node ${pnn}..."
779 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
780 daemons_start_1 $pnn "$@"
782 onnode $pnn $CTDB_TEST_WRAPPER _ctdb_start "$@"
785 # If we're starting only 1 node then we're doing something weird.
786 ctdb_restart_when_done
791 # "$@" is passed to ctdbd start.
793 echo -n "Restarting CTDB"
794 if $ctdb_test_restart_scheduled ; then
795 echo -n " (scheduled)"
801 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
805 onnode -p all $CTDB_TEST_WRAPPER _restart_ctdb "$@"
809 [ $i -lt 5 ] || break
811 echo "That didn't seem to work - sleeping for a while..."
815 onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || return 1
817 echo "Setting RerecoveryTimeout to 1"
818 onnode -pq all "ctdb setvar RerecoveryTimeout 1"
820 # In recent versions of CTDB, forcing a recovery like this blocks
821 # until the recovery is complete. Hopefully this will help the
822 # cluster to stabilise before a subsequent test.
823 echo "Forcing a recovery..."
824 onnode -q 0 ctdb recover
826 echo "Forcing a recovery..."
827 onnode -q 0 ctdb recover
832 ctdb_restart_when_done ()
834 ctdb_test_restart_scheduled=true
837 #######################################
839 install_eventscript ()
841 local script_name="$1"
842 local script_contents="$2"
844 if [ -n "$CTDB_TEST_REAL_CLUSTER" ] ; then
845 # The quoting here is *very* fragile. However, we do
846 # experience the joy of installing a short script using
847 # onnode, and without needing to know the IP addresses of the
849 onnode all "f=\"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\" ; echo \"Installing \$f\" ; echo '${script_contents}' > \"\$f\" ; chmod 755 \"\$f\""
851 f="${CTDB_DIR}/tests/events.d/${script_name}"
852 echo "$script_contents" >"$f"
857 uninstall_eventscript ()
859 local script_name="$1"
861 if [ -n "$CTDB_TEST_REAL_CLUSTER" ] ; then
862 onnode all "rm -vf \"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\""
864 rm -vf "${CTDB_DIR}/tests/events.d/${script_name}"