6 Verify that the server end of an NFS connection is correctly reset
10 * An active CTDB cluster with at least 2 nodes with public addresses.
12 * Test must be run on a real or virtual cluster rather than against
15 * Test must not be run from a cluster node.
17 * Cluster nodes must be listening on the NFS TCP port (2049).
21 * CTDB should correctly record the connection and the releasing node
22 should reset the server end of the connection.
26 . "${TEST_SCRIPTS_DIR}/integration.bash"
32 ctdb_test_check_real_cluster
36 # We need this for later, so we know how long to sleep.
37 try_command_on_node 0 $CTDB getvar MonitorInterval
38 monitor_interval="${out#*= }"
40 select_test_node_and_ips
44 echo "Set NoIPTakeover=1 on all nodes"
45 try_command_on_node all $CTDB setvar NoIPTakeover 1
47 echo "Give the recovery daemon some time to reload tunables"
50 echo "Connecting to node ${test_node} on IP ${test_ip}:${test_port} with nc..."
52 sleep $((monitor_interval * 4)) | nc $test_ip $test_port &
54 ctdb_test_exit_hook_add "kill $nc_pid >/dev/null 2>&1"
56 wait_until_get_src_socket "tcp" "${test_ip}:${test_port}" $nc_pid "nc"
58 echo "Source socket is $src_socket"
60 echo "Wait until NFS connection is tracked by CTDB on test node ..."
61 wait_until $((monitor_interval * 2)) \
62 check_tickles $test_node $test_ip $test_port $src_socket
65 # It would be nice if ss consistently used local/peer instead of src/dst
66 ss_filter="src ${test_ip}:${test_port} dst ${src_socket}"
68 try_command_on_node $test_node \
69 "ss -tn state established '${ss_filter}' | tail -n +2"
70 if [ -z "$out" ] ; then
71 echo "BAD: ss did not list the socket"
74 echo "GOOD: ss lists the socket:"
77 echo "Disabling node $test_node"
78 try_command_on_node 1 $CTDB disable -n $test_node
79 wait_until_node_has_status $test_node disabled
81 try_command_on_node $test_node \
82 "ss -tn state established '${ss_filter}' | tail -n +2"
83 if [ -n "$out" ] ; then
84 echo "BAD: ss listed the socket after failover"
87 echo "GOOD: ss no longer lists the socket"