mode);
}
+static bool validate_recovery_lock(const char *key,
+ const char *old_reclock,
+ const char *new_reclock,
+ enum conf_update_mode mode)
+{
+ bool status;
+
+ if (new_reclock != NULL) {
+ D_WARNING("Configuration option [%s] -> %s is deprecated\n",
+ CLUSTER_CONF_SECTION,
+ key);
+ }
+
+ status = check_static_string_change(key, old_reclock, new_reclock, mode);
+
+ return status;
+}
+
void cluster_conf_init(struct conf_context *conf)
{
conf_define_section(conf, CLUSTER_CONF_SECTION, NULL);
validate_node_address);
conf_define_string(conf,
CLUSTER_CONF_SECTION,
- CLUSTER_CONF_RECOVERY_LOCK,
+ CLUSTER_CONF_CLUSTER_LOCK,
NULL,
check_static_string_change);
+ conf_define_string(conf,
+ CLUSTER_CONF_SECTION,
+ CLUSTER_CONF_RECOVERY_LOCK,
+ NULL,
+ validate_recovery_lock);
}
#define CLUSTER_CONF_TRANSPORT "transport"
#define CLUSTER_CONF_NODE_ADDRESS "node address"
+#define CLUSTER_CONF_CLUSTER_LOCK "cluster lock"
#define CLUSTER_CONF_RECOVERY_LOCK "recovery lock"
void cluster_conf_init(struct conf_context *conf);
# log level = NOTICE
[cluster]
- # Shared recovery lock file to avoid split brain. Daemon
- # default is no recovery lock. Do NOT run CTDB without a
- # recovery lock file unless you know exactly what you are
+ # Shared cluster lock file to avoid split brain. Daemon
+ # default is no cluster lock. Do NOT run CTDB without a
+ # cluster lock file unless you know exactly what you are
# doing.
#
- # Please see the RECOVERY LOCK section in ctdb(7) for more
+ # Please see the CLUSTER LOCK section in ctdb(7) for more
# details.
#
- # recovery lock = !/bin/false RECOVERY LOCK NOT CONFIGURED
+ # cluster lock = !/bin/false CLUSTER LOCK NOT CONFIGURED
which could occur if the cluster becomes partitioned due to network
failure or similar.
-CTDB uses a cluster-wide mutex for its "recovery lock", which is used
+CTDB uses a cluster-wide mutex for its "cluster lock", which is used
to ensure that only one database recovery can happen at a time. For
-an overview of recovery lock configuration see the RECOVERY LOCK
+an overview of cluster lock configuration see the CLUSTER LOCK
section in ctdb(7). CTDB tries to ensure correct operation of the
-recovery lock by attempting to take the recovery lock when CTDB knows
+cluster lock by attempting to take the cluster lock when CTDB knows
that it should already be held.
By default, CTDB uses a supplied mutex helper that uses a fcntl(2)
<para>
ctdb_etcd_lock is intended to be run as a mutex helper for CTDB. It
will try to connect to an existing etcd cluster and grab a lock in that
- cluster to function as CTDB's recovery lock. Please see
+ cluster to function as CTDB's cluster lock. Please see
<emphasis>ctdb/doc/cluster_mutex_helper.txt</emphasis> for details on
the mutex helper API. To use this, include the following line in
the <literal>[cluster]</literal> section of
<manvolnum>5</manvolnum></citerefentry>:
</para>
<screen format="linespecific">
-recovery lock = !/usr/local/usr/libexec/ctdb/ctdb_etcd_lock
+cluster lock = !/usr/local/usr/libexec/ctdb/ctdb_etcd_lock
</screen>
<para>
You can also pass "-v", "-vv", or "-vvv" to include verbose output in
<variablelist>
<varlistentry>
- <term>recovery lock = <parameter>LOCK</parameter></term>
+ <term>cluster lock = <parameter>LOCK</parameter></term>
<listitem>
<para>
LOCK specifies the cluster-wide mutex used to detect and
prevent a partitioned cluster (or "split brain").
</para>
<para>
- For information about the recovery lock please see the
- <citetitle>RECOVERY LOCK</citetitle> section in
+ For information about the cluster lock please see the
+ <citetitle>CLUSTER LOCK</citetitle> section in
<citerefentry><refentrytitle>ctdb</refentrytitle>
<manvolnum>7</manvolnum></citerefentry>.
</para>
<para>
- Default: NONE. However, uses of a recovery lock is
+ Default: NONE. However, uses of a cluster lock is
<emphasis>strongly recommended</emphasis>.
</para>
</listitem>
<refsect1>
<title>DESCRIPTION</title>
<para>
- ctdb_mutex_ceph_rados_helper can be used as a recovery lock provider
+ ctdb_mutex_ceph_rados_helper can be used as a cluster lock provider
for CTDB. When configured, split brain avoidance during CTDB recovery
will be handled using locks against an object located in a Ceph RADOS
pool.
<manvolnum>5</manvolnum></citerefentry>:
</para>
<screen format="linespecific">
-recovery lock = !ctdb_mutex_ceph_rados_helper [Cluster] [User] [Pool] [Object]
+cluster lock = !ctdb_mutex_ceph_rados_helper [Cluster] [User] [Pool] [Object]
Cluster: Ceph cluster name (e.g. ceph)
User: Ceph cluster user name (e.g. client.admin)
</para>
<para>
For informational purposes, ctdb_mutex_ceph_rados_helper will also
- register the recovery lock holder in Ceph Manager's service map.
+ register the cluster lock holder in Ceph Manager's service map.
</para>
</refsect1>
log level = NOTICE
[cluster]
- recovery lock = /shared/recovery.lock
+ cluster lock = /shared/cluster.lock
#
# Nodes configuration
CLUSTER_CONF_SECTION,
CLUSTER_CONF_NODE_ADDRESS,
&ctdb_config.node_address);
+ conf_assign_string_pointer(conf,
+ CLUSTER_CONF_SECTION,
+ CLUSTER_CONF_CLUSTER_LOCK,
+ &ctdb_config.cluster_lock);
conf_assign_string_pointer(conf,
CLUSTER_CONF_SECTION,
CLUSTER_CONF_RECOVERY_LOCK,
/* Cluster */
const char *transport;
const char *node_address;
+ const char *cluster_lock;
const char *recovery_lock;
/* Database */
goto fail;
}
- if (ctdb_config.recovery_lock == NULL) {
- D_WARNING("Recovery lock not set\n");
+ if (ctdb_config.cluster_lock != NULL) {
+ ctdb->recovery_lock = ctdb_config.cluster_lock;
+ } else if (ctdb_config.recovery_lock != NULL) {
+ ctdb->recovery_lock = ctdb_config.recovery_lock;
+ } else {
+ D_WARNING("Cluster lock not set\n");
}
- ctdb->recovery_lock = ctdb_config.recovery_lock;
/* tell ctdb what address to listen on */
if (ctdb_config.node_address) {
[cluster]
# transport = tcp
# node address =
+ # cluster lock =
# recovery lock =
[database]
# volatile database directory = ${database_volatile_dbdir}
ok <<EOF
EOF
-unit_test ctdb-config get "cluster" "recovery lock"
+unit_test ctdb-config get "cluster" "cluster lock"
cat > "$conffile" <<EOF
[cluster]
Failed to load config file $conffile
EOF
unit_test ctdb-config validate
+
+cat > "$conffile" <<EOF
+[cluster]
+ cluster lock = /foo/bar
+EOF
+
+required_result 0 <<EOF
+EOF
+unit_test ctdb-config validate
+
+cat > "$conffile" <<EOF
+[cluster]
+ recovery lock = /foo/bar
+EOF
+
+required_result 0 <<EOF
+Configuration option [cluster] -> recovery lock is deprecated
+EOF
+unit_test ctdb-config -d WARNING validate
-N <file> Nodes file (default: automatically generated)
-n <num> Number of nodes (default: 3)
-P <file> Public addresses file (default: automatically generated)
- -R Use a command for the recovery lock (default: use a file)
+ -R Use a command for the cluster lock (default: use a file)
-r <time> Like -R and set recheck interval to <time> (default: use a file)
-S <library> Socket wrapper shared library to preload (default: none)
-6 Generate IPv6 IPs for nodes, public addresses (default: IPv4)
_nodes_file=""
_num_nodes=3
_public_addresses_file=""
- _recovery_lock_use_command=false
- _recovery_lock_recheck_interval=""
+ _cluster_lock_use_command=false
+ _cluster_lock_recheck_interval=""
_socket_wrapper=""
_use_ipv6=false
N) _nodes_file="$OPTARG" ;;
n) _num_nodes="$OPTARG" ;;
P) _public_addresses_file="$OPTARG" ;;
- R) _recovery_lock_use_command=true ;;
- r) _recovery_lock_use_command=true
- _recovery_lock_recheck_interval="$OPTARG"
+ R) _cluster_lock_use_command=true ;;
+ r) _cluster_lock_use_command=true
+ _cluster_lock_recheck_interval="$OPTARG"
;;
S) _socket_wrapper="$OPTARG" ;;
6) _use_ipv6=true ;;
$_use_ipv6 >"$_public_addresses_all"
fi
- _recovery_lock_dir="${directory}/shared/.ctdb"
- mkdir -p "$_recovery_lock_dir"
- _recovery_lock="${_recovery_lock_dir}/rec.lock"
- if $_recovery_lock_use_command ; then
+ _cluster_lock_dir="${directory}/shared/.ctdb"
+ mkdir -p "$_cluster_lock_dir"
+ _cluster_lock="${_cluster_lock_dir}/cluster.lock"
+ if $_cluster_lock_use_command ; then
_helper="${CTDB_SCRIPTS_HELPER_BINDIR}/ctdb_mutex_fcntl_helper"
- _t="! ${_helper} ${_recovery_lock}"
- if [ -n "$_recovery_lock_recheck_interval" ] ; then
- _t="${_t} ${_recovery_lock_recheck_interval}"
+ _t="! ${_helper} ${_cluster_lock}"
+ if [ -n "$_cluster_lock_recheck_interval" ] ; then
+ _t="${_t} ${_cluster_lock_recheck_interval}"
fi
- _recovery_lock="$_t"
+ _cluster_lock="$_t"
fi
if [ -n "$_socket_wrapper" ] ; then
log level = INFO
[cluster]
- recovery lock = ${_recovery_lock}
+ cluster lock = ${_cluster_lock}
node address = ${_node_ip}
[database]
#define CTDB_MUTEX_CEPH_LOCK_NAME "ctdb_reclock_mutex"
#define CTDB_MUTEX_CEPH_LOCK_COOKIE CTDB_MUTEX_CEPH_LOCK_NAME
-#define CTDB_MUTEX_CEPH_LOCK_DESC "CTDB recovery lock"
+#define CTDB_MUTEX_CEPH_LOCK_DESC "CTDB cluster lock"
/*
* During failover it may take up to <lock duration> seconds before the
* newly elected recovery master can obtain the lock.
[ "$LOCKER_COOKIE" == "ctdb_reclock_mutex" ] \
|| _fail "unexpected locker cookie: $LOCKER_COOKIE"
LOCKER_DESC="$(jq -r '.lockers[0].description' ${TMP_DIR}/lock_state_first)"
-[ "$LOCKER_DESC" == "CTDB recovery lock" ] \
+[ "$LOCKER_DESC" == "CTDB cluster lock" ] \
|| _fail "unexpected locker description: $LOCKER_DESC"
LOCKER_EXP="$(jq -r '.lockers[0].expiration' ${TMP_DIR}/lock_state_first)"
[ "$LOCKER_EXP" == "0.000000" ] \
[ "$LOCKER_COOKIE" == "ctdb_reclock_mutex" ] \
|| _fail "unexpected locker cookie: $LOCKER_COOKIE"
LOCKER_DESC="$(jq -r '.lockers[0].description' ${TMP_DIR}/lock_state_fourth)"
-[ "$LOCKER_DESC" == "CTDB recovery lock" ] \
+[ "$LOCKER_DESC" == "CTDB cluster lock" ] \
|| _fail "unexpected locker description: $LOCKER_DESC"
kill $locker_pid || exit 1
This script is intended to be run as a mutex helper for CTDB. It will try to
connect to an existing etcd cluster and grab an etcd.Lock() to function as
-CTDB's recovery lock. Please see ctdb/doc/cluster_mutex_helper.txt for
+CTDB's cluster lock. Please see ctdb/doc/cluster_mutex_helper.txt for
details on what we're SUPPOSED to be doing. :) To use this, include
the following line in the ctdb.conf:
- recovery lock = !/path/to/script
+ cluster lock = !/path/to/script
You can also pass "-v", "-vv", or "-vvv" to include verbose output in the
CTDB log. Additional "v"s indicate increases in verbosity.