From a8ccf41b8796df417c92b157e6c61457768d0e6c Mon Sep 17 00:00:00 2001 From: Martin Schwenke Date: Wed, 4 Apr 2018 19:17:59 +1000 Subject: [PATCH] ctdb-docs: Document script.options Signed-off-by: Martin Schwenke Reviewed-by: Amitay Isaacs --- ctdb/doc/ctdb-script.options.5.xml | 1205 ++++++++++++++++++++++++++++ ctdb/doc/ctdb.7.xml | 9 +- ctdb/doc/ctdbd.conf.5.xml | 1041 ------------------------ ctdb/packaging/RPM/ctdb.spec.in | 1 + ctdb/wscript | 1 + 5 files changed, 1213 insertions(+), 1044 deletions(-) create mode 100644 ctdb/doc/ctdb-script.options.5.xml diff --git a/ctdb/doc/ctdb-script.options.5.xml b/ctdb/doc/ctdb-script.options.5.xml new file mode 100644 index 00000000000..22cdd371bd2 --- /dev/null +++ b/ctdb/doc/ctdb-script.options.5.xml @@ -0,0 +1,1205 @@ + + + + + + + ctdb-script.options + 5 + ctdb + CTDB - clustered TDB database + + + + ctdb-script.options + CTDB scripts configuration files + + + + DESCRIPTION + + + Each CTDB script has 2 possible locations for its configuration options: + + + + + + + /usr/local/etc/ctdb/script.options + + + + This is a catch-all global file for general purpose + scripts and for options that are used in multiple event + scripts. + + + + + + + SCRIPT.options + + + + That is, options for + SCRIPT are + placed in a file alongside the script, with a ".script" + suffix added. This style is usually recommended for event + scripts. + + + + Options in this script-specific file override those in + the global file. + + + + + + + + For short-term backward compatibility the CTDB configuration + file (see + ctdbd.conf + 5) is also loaded. + + + + These files should include simple shell-style variable + assignments and shell-style comments. + + + + + + NETWORK CONFIGURATION + + + 10.interface + + + This event script handles monitoring of interfaces using by + public IP addresses. + + + + + + + CTDB_PARTIALLY_ONLINE_INTERFACES=yes|no + + + + Whether one or more offline interfaces should cause a + monitor event to fail if there are other interfaces that + are up. If this is "yes" and a node has some interfaces + that are down then ctdb status will + display the node as "PARTIALLYONLINE". + + + + Note that CTDB_PARTIALLY_ONLINE_INTERFACES=yes is not + generally compatible with NAT gateway or LVS. NAT + gateway relies on the interface configured by + CTDB_NATGW_PUBLIC_IFACE to be up and LVS replies on + CTDB_LVS_PUBLIC_IFACE to be up. CTDB does not check if + these options are set in an incompatible way so care is + needed to understand the interaction. + + + + Default is "no". + + + + + + + + + 11.natgw + + + Provides CTDB's NAT gateway functionality. + + + + NAT gateway is used to configure fallback routing for nodes + when they do not host any public IP addresses. For example, + it allows unhealthy nodes to reliably communicate with + external infrastructure. One node in a NAT gateway group will + be designated as the NAT gateway master node and other (slave) + nodes will be configured with fallback routes via the NAT + gateway master node. For more information, see the + NAT GATEWAY section in + ctdb + 7. + + + + + + CTDB_NATGW_DEFAULT_GATEWAY=IPADDR + + + IPADDR is an alternate network gateway to use on the NAT + gateway master node. If set, a fallback default route + is added via this network gateway. + + + No default. Setting this variable is optional - if not + set that no route is created on the NAT gateway master + node. + + + + + + CTDB_NATGW_NODES=FILENAME + + + FILENAME contains the list of nodes that belong to the + same NAT gateway group. + + + File format: + +IPADDR slave-only + + + + IPADDR is the private IP address of each node in the NAT + gateway group. + + + If "slave-only" is specified then the corresponding node + can not be the NAT gateway master node. In this case + CTDB_NATGW_PUBLIC_IFACE and + CTDB_NATGW_PUBLIC_IP are optional and + unused. + + + No default, usually + /usr/local/etc/ctdb/natgw_nodes when enabled. + + + + + + CTDB_NATGW_PRIVATE_NETWORK=IPADDR/MASK + + + IPADDR/MASK is the private sub-network that is + internally routed via the NAT gateway master node. This + is usually the private network that is used for node + addresses. + + + No default. + + + + + + CTDB_NATGW_PUBLIC_IFACE=IFACE + + + IFACE is the network interface on which the + CTDB_NATGW_PUBLIC_IP will be configured. + + + No default. + + + + + + CTDB_NATGW_PUBLIC_IP=IPADDR/MASK + + + IPADDR/MASK indicates the IP address that is used for + outgoing traffic (originating from + CTDB_NATGW_PRIVATE_NETWORK) on the NAT gateway master + node. This must not be a + configured public IP address. + + + No default. + + + + + + CTDB_NATGW_STATIC_ROUTES=IPADDR/MASK[@GATEWAY] ... + + + Each IPADDR/MASK identifies a network or host to which + NATGW should create a fallback route, instead of + creating a single default route. This can be used when + there is already a default route, via an interface that + can not reach required infrastructure, that overrides + the NAT gateway default route. + + + If GATEWAY is specified then the corresponding route on + the NATGW master node will be via GATEWAY. Such routes + are created even if + CTDB_NATGW_DEFAULT_GATEWAY is not + specified. If GATEWAY is not specified for some + networks then routes are only created on the NATGW + master node for those networks if + CTDB_NATGW_DEFAULT_GATEWAY is + specified. + + + This should be used with care to avoid causing traffic + to unnecessarily double-hop through the NAT gateway + master, even when a node is hosting public IP addresses. + Each specified network or host should probably have a + corresponding automatically created link route or static + route to avoid this. + + + No default. + + + + + + + + Example + +CTDB_NATGW_NODES=/usr/local/etc/ctdb/natgw_nodes +CTDB_NATGW_PRIVATE_NETWORK=192.168.1.0/24 +CTDB_NATGW_DEFAULT_GATEWAY=10.0.0.1 +CTDB_NATGW_PUBLIC_IP=10.0.0.227/24 +CTDB_NATGW_PUBLIC_IFACE=eth0 + + + + A variation that ensures that infrastructure (ADS, DNS, ...) + directly attached to the public network (10.0.0.0/24) is + always reachable would look like this: + + +CTDB_NATGW_NODES=/usr/local/etc/ctdb/natgw_nodes +CTDB_NATGW_PRIVATE_NETWORK=192.168.1.0/24 +CTDB_NATGW_PUBLIC_IP=10.0.0.227/24 +CTDB_NATGW_PUBLIC_IFACE=eth0 +CTDB_NATGW_STATIC_ROUTES=10.0.0.0/24 + + + Note that CTDB_NATGW_DEFAULT_GATEWAY is + not specified. + + + + + + + 13.per_ip_routing + + + Provides CTDB's policy routing functionality. + + + + A node running CTDB may be a component of a complex network + topology. In particular, public addresses may be spread + across several different networks (or VLANs) and it may not be + possible to route packets from these public addresses via the + system's default route. Therefore, CTDB has support for + policy routing via the 13.per_ip_routing + eventscript. This allows routing to be specified for packets + sourced from each public address. The routes are added and + removed as CTDB moves public addresses between nodes. + + + + For more information, see the POLICY + ROUTING section in + ctdb + 7. + + + + + CTDB_PER_IP_ROUTING_CONF=FILENAME + + + FILENAME contains elements for constructing the desired + routes for each source address. + + + + The special FILENAME value + __auto_link_local__ indicates that no + configuration file is provided and that CTDB should + generate reasonable link-local routes for each public IP + address. + + + + File format: + + IPADDR DEST-IPADDR/MASK GATEWAY-IPADDR + + + + + No default, usually + /usr/local/etc/ctdb/policy_routing + when enabled. + + + + + + + CTDB_PER_IP_ROUTING_RULE_PREF=NUM + + + + NUM sets the priority (or preference) for the routing + rules that are added by CTDB. + + + + This should be (strictly) greater than 0 and (strictly) + less than 32766. A priority of 100 is recommended, unless + this conflicts with a priority already in use on the + system. See + ip + 8, for more details. + + + + + + + CTDB_PER_IP_ROUTING_TABLE_ID_LOW=LOW-NUM, + CTDB_PER_IP_ROUTING_TABLE_ID_HIGH=HIGH-NUM + + + + CTDB determines a unique routing table number to use for + the routing related to each public address. LOW-NUM and + HIGH-NUM indicate the minimum and maximum routing table + numbers that are used. + + + + ip + 8 uses some + reserved routing table numbers below 255. Therefore, + CTDB_PER_IP_ROUTING_TABLE_ID_LOW should be (strictly) + greater than 255. + + + + CTDB uses the standard file + /etc/iproute2/rt_tables to maintain + a mapping between the routing table numbers and labels. + The label for a public address + ADDR will look like + ctdb.addr. This means that + the associated rules and routes are easy to read (and + manipulate). + + + + No default, usually 1000 and 9000. + + + + + + + Example + +CTDB_PER_IP_ROUTING_CONF=/usr/local/etc/ctdb/policy_routing +CTDB_PER_IP_ROUTING_RULE_PREF=100 +CTDB_PER_IP_ROUTING_TABLE_ID_LOW=1000 +CTDB_PER_IP_ROUTING_TABLE_ID_HIGH=9000 + + + + + + + 91.lvs + + + Provides CTDB's LVS functionality. + + + + For a general description see the LVS + section in ctdb + 7. + + + + + + + CTDB_LVS_NODES=FILENAME + + + + FILENAME contains the list of nodes that belong to the + same LVS group. + + + File format: + +IPADDR slave-only + + + + IPADDR is the private IP address of each node in the LVS + group. + + + If "slave-only" is specified then the corresponding node + can not be the LVS master node. In this case + CTDB_LVS_PUBLIC_IFACE and + CTDB_LVS_PUBLIC_IP are optional and + unused. + + + No default, usually + /usr/local/etc/ctdb/lvs_nodes when enabled. + + + + + + + CTDB_LVS_PUBLIC_IFACE=INTERFACE + + + + INTERFACE is the network interface that clients will use + to connection to CTDB_LVS_PUBLIC_IP. + This is optional for slave-only nodes. + No default. + + + + + + + CTDB_LVS_PUBLIC_IP=IPADDR + + + + CTDB_LVS_PUBLIC_IP is the LVS public address. No + default. + + + + + + + + + + + SERVICE CONFIGURATION + + + CTDB can be configured to manage and/or monitor various NAS (and + other) services via its eventscripts. + + + + In the simplest case CTDB will manage a service. This means the + service will be started and stopped along with CTDB, CTDB will + monitor the service and CTDB will do any required + reconfiguration of the service when public IP addresses are + failed over. + + + + 20.multipathd + + + Provides CTDB's Linux multipathd service management. + + + + It can monitor multipath devices to ensure that active paths + are available. + + + + + + CTDB_MONITOR_MPDEVICES=MP-DEVICE-LIST + + + + MP-DEVICE-LIST is a list of multipath devices for CTDB to monitor? + + + No default. + + + + + + + + 31.clamd + + + This event script provide CTDB's ClamAV anti-virus service + management. + + + + This eventscript is not enabled by default. Use ctdb + enablescript to enable it. + + + + + + + CTDB_MANAGES_CLAMD=yes|no + + + + Should CTDB manage ClamAV? + + + Default is no. + + + + + + + CTDB_CLAMD_SOCKET=FILENAME + + + + FILENAME is the socket to monitor ClamAV. + + + No default. + + + + + + + + + + 40.vsftpd + + + Provides CTDB's vsftpd FTP service management. + + + + + CTDB_MANAGES_VSFTPD=yes|no + + + Should CTDB manage the vsftpd FTP server? + + + Default is no. + + + + + + + + 41.httpd + + + Provides CTDB's Apache web service management. + + + + + + CTDB_MANAGES_HTTPD=yes|no + + + + Should CTDB manage the Apache web server? + + + Default is no. + + + + + + + + 49.winbind + + + Provides CTDB's Samba winbind service management. + + + + + + + CTDB_MANAGES_WINBIND=yes|no + + + + Should CTDB manage Winbind? + + + Default is no. + + + + + + + CTDB_SERVICE_WINBIND=SERVICE + + + + Distribution specific SERVICE for managing winbindd. + + + Default is "winbind". + + + + + + + + + + 50.samba + + + Provides the core of CTDB's Samba file service management. + + + + + + + CTDB_MANAGES_SAMBA=yes|no + + + + Should CTDB manage Samba? + + + Default is no. + + + + + + + CTDB_SAMBA_CHECK_PORTS=PORT-LIST + + + + When monitoring Samba, check TCP ports in + space-separated PORT-LIST. + + + Default is to monitor ports that Samba is configured to listen on. + + + + + + + CTDB_SAMBA_SKIP_SHARE_CHECK=yes|no + + + + As part of monitoring, should CTDB skip the check for + the existence of each directory configured as share in + Samba. This may be desirable if there is a large number + of shares. + + + Default is no. + + + + + + + CTDB_SERVICE_NMB=SERVICE + + + + Distribution specific SERVICE for managing nmbd. + + + Default is distribution-dependant. + + + + + + CTDB_SERVICE_SMB=SERVICE + + + + Distribution specific SERVICE for managing smbd. + + + Default is distribution-dependant. + + + + + + + + + + 60.nfs + + + This event script (along with 06.nfs) provides CTDB's NFS + service management. + + + + This includes parameters for the kernel NFS server. + Alternative NFS subsystems (such as NFS-Ganesha) + can be integrated using CTDB_NFS_CALLOUT. + + + + + + + CTDB_MANAGES_NFS=yes|no + + + + Should CTDB manage NFS? + + + Default is no. + + + + + + + CTDB_NFS_CALLOUT=COMMAND + + + + COMMAND specifies the path to a callout to handle + interactions with the configured NFS system, including + startup, shutdown, monitoring. + + + Default is the included + nfs-linux-kernel-callout. + + + + + + + CTDB_NFS_CHECKS_DIR=DIRECTORY + + + + Specifies the path to a DIRECTORY containing files that + describe how to monitor the responsiveness of NFS RPC + services. See the README file for this directory for an + explanation of the contents of these "check" files. + + + CTDB_NFS_CHECKS_DIR can be used to point to different + sets of checks for different NFS servers. + + + One way of using this is to have it point to, say, + /usr/local/etc/ctdb/nfs-checks-enabled.d + and populate it with symbolic links to the desired check + files. This avoids duplication and is upgrade-safe. + + + Default is + /usr/local/etc/ctdb/nfs-checks.d, + which contains NFS RPC checks suitable for Linux kernel + NFS. + + + + + + + CTDB_NFS_SKIP_SHARE_CHECK=yes|no + + + + As part of monitoring, should CTDB skip the check for + the existence of each directory exported via NFS. This + may be desirable if there is a large number of exports. + + + Default is no. + + + + + + + CTDB_RPCINFO_LOCALHOST=IPADDR|HOSTNAME + + + + IPADDR or HOSTNAME indicates the address that + rpcinfo should connect to when doing + rpcinfo check on IPv4 RPC service during + monitoring. Optimally this would be "localhost". + However, this can add some performance overheads. + + + Default is "127.0.0.1". + + + + + + + CTDB_RPCINFO_LOCALHOST6=IPADDR|HOSTNAME + + + + IPADDR or HOSTNAME indicates the address that + rpcinfo should connect to when doing + rpcinfo check on IPv6 RPC service + during monitoring. Optimally this would be "localhost6" + (or similar). However, this can add some performance + overheads. + + + Default is "::1". + + + + + + + CTDB_NFS_STATE_FS_TYPE=TYPE + + + + The type of filesystem used for a clustered NFS' shared + state. No default. + + + + + + + CTDB_NFS_STATE_MNT=DIR + + + + The directory where a clustered NFS' shared state will be + located. No default. + + + + + + + + + + 70.iscsi + + + Provides CTDB's Linux iSCSI tgtd service management. + + + + + + + CTDB_MANAGES_ISCSI=yes|no + + + + Should CTDB manage iSCSI tgtd? + + + Default is no. + + + + + + + CTDB_START_ISCSI_SCRIPTS=DIRECTORY + + + + DIRECTORY on shared storage containing scripts to start + tgtd for each public IP address. + + + No default. + + + + + + + + + + + DATABASE SETUP + + + + CTDB checks the consistency of databases during startup. + + + + 00.ctdb + + + + + CTDB_MAX_CORRUPT_DB_BACKUPS=NUM + + + NUM is the maximum number of volatile TDB database + backups to be kept (for each database) when a corrupt + database is found during startup. Volatile TDBs are + zeroed during startup so backups are needed to debug + any corruption that occurs before a restart. + + + Default is 10. + + + + + + + + + + + SYSTEM RESOURCE MONITORING + + + + 05.system + + + + Provides CTDB's filesystem and memory usage monitoring. + + + + CTDB can experience seemingly random (performance and other) + issues if system resources become too constrained. Options in + this section can be enabled to allow certain system resources + to be checked. They allows warnings to be logged and nodes to + be marked unhealthy when system resource usage reaches the + configured thresholds. + + + + Some checks are enabled by default. It is recommended that + these checks remain enabled or are augmented by extra checks. + There is no supported way of completely disabling the checks. + + + + + + + CTDB_MONITOR_FILESYSTEM_USAGE=FS-LIMIT-LIST + + + + FS-LIMIT-LIST is a space-separated list of + FILESYSTEM:WARN_LIMIT:UNHEALTHY_LIMIT + triples indicating that warnings should be logged if the + space used on FILESYSTEM reaches WARN_LIMIT%. If usage + reaches UNHEALTHY_LIMIT then the node should be flagged + unhealthy. Either WARN_LIMIT or UNHEALTHY_LIMIT may be + left blank, meaning that check will be omitted. + + + + Default is to warn for each filesystem containing a + database directory (CTDB_DBDIR, + CTDB_DBDIR_PERSISTENT, + CTDB_DBDIR_STATE) with a threshold of + 90%. + + + + + + + CTDB_MONITOR_MEMORY_USAGE=MEM-LIMITS + + + + MEM-LIMITS takes the form + WARN_LIMIT:UNHEALTHY_LIMIT + indicating that warnings should be logged if memory + usage reaches WARN_LIMIT%. If usage reaches + UNHEALTHY_LIMIT then the node should be flagged + unhealthy. Either WARN_LIMIT or UNHEALTHY_LIMIT may be + left blank, meaning that check will be omitted. + + + Default is 80, so warnings will be logged when memory + usage reaches 80%. + + + + + + + CTDB_MONITOR_SWAP_USAGE=SWAP-LIMITS + + + + SWAP-LIMITS takes the form + WARN_LIMIT:UNHEALTHY_LIMIT + indicating that warnings should be logged if + swap usage reaches WARN_LIMIT%. If usage reaches + UNHEALTHY_LIMIT then the node should be flagged + unhealthy. Either WARN_LIMIT or UNHEALTHY_LIMIT may be + left blank, meaning that check will be omitted. + + + Default is 25, so warnings will be logged when swap + usage reaches 25%. + + + + + + + + + + + + EVENT SCRIPT DEBUGGING + + + + debug-hung-script.sh + + + + + + CTDB_DEBUG_HUNG_SCRIPT_STACKPAT=REGEXP + + + REGEXP specifies interesting processes for which stack + traces should be logged when debugging hung eventscripts + and those processes are matched in pstree output. + REGEXP is an extended regexp so choices are separated by + pipes ('|'). However, REGEXP should not contain + parentheses. See also + CTDB_DEBUG_HUNG_SCRIPT. + + + Default is "exportfs|rpcinfo". + + + + + + + + + + + FILES + + + /usr/local/etc/ctdb/script.options + + + + + SEE ALSO + + ctdbd + 1, + + ctdb + 7, + + + + + + + + + This documentation was written by + Amitay Isaacs, + Martin Schwenke + + + + + 2007 + Andrew Tridgell + Ronnie Sahlberg + + + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 3 of + the License, or (at your option) any later version. + + + This program is distributed in the hope that it will be + useful, but WITHOUT ANY WARRANTY; without even the implied + warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the GNU General Public License for more details. + + + You should have received a copy of the GNU General Public + License along with this program; if not, see + . + + + + + diff --git a/ctdb/doc/ctdb.7.xml b/ctdb/doc/ctdb.7.xml index a419438f5ee..7de9455d5e8 100644 --- a/ctdb/doc/ctdb.7.xml +++ b/ctdb/doc/ctdb.7.xml @@ -728,7 +728,7 @@ CTDB_NATGW_DEFAULT_GATEWAY=10.0.0.1 See the NAT GATEWAY section in - ctdbd.conf + ctdb-script.options 5 for more details of NATGW configuration. @@ -780,7 +780,7 @@ CTDB_NATGW_DEFAULT_GATEWAY=10.0.0.1 This is implemented in the 11.natgw eventscript. Please see the eventscript file and the NAT GATEWAY section in - ctdbd.conf + ctdb-script.options 5 for more details. @@ -812,7 +812,7 @@ CTDB_NATGW_DEFAULT_GATEWAY=10.0.0.1 CTDB_PER_IP_ROUTING_TABLE_ID_LOW, CTDB_PER_IP_ROUTING_TABLE_ID_HIGH. See the POLICY ROUTING section in - ctdbd.conf + ctdb-script.options 5 for more details. @@ -1077,6 +1077,9 @@ CTDB_CAPABILITY_RECMASTER=no ctdbd.conf 5, + ctdb-script.options + 5, + ctdb-statistics 7, diff --git a/ctdb/doc/ctdbd.conf.5.xml b/ctdb/doc/ctdbd.conf.5.xml index 2f230756a24..95a8ae7e251 100644 --- a/ctdb/doc/ctdbd.conf.5.xml +++ b/ctdb/doc/ctdbd.conf.5.xml @@ -185,23 +185,6 @@ - - CTDB_DEBUG_HUNG_SCRIPT_STACKPAT=REGEXP - - - REGEXP specifies interesting processes for which stack - traces should be logged when debugging hung eventscripts - and those processes are matched in pstree output. REGEXP - is an extended regexp so choices are separated by pipes - ('|'). However, REGEXP should not contain parentheses. - See also CTDB_DEBUG_HUNG_SCRIPT. - - - Default is "exportfs|rpcinfo". - - - - CTDB_DEBUG_LOCKS=FILENAME @@ -477,1030 +460,6 @@ - - NETWORK CONFIGURATION - - - PUBLIC IP ADDRESS FAILOVER - - - Eventscripts - - - 10.interface - - - - - - - - CTDB_PARTIALLY_ONLINE_INTERFACES=yes|no - - - - Whether one or more offline interfaces should cause a - monitor event to fail if there are other interfaces that - are up. If this is "yes" and a node has some interfaces - that are down then ctdb status will - display the node as "PARTIALLYONLINE". - - - - Note that CTDB_PARTIALLY_ONLINE_INTERFACES=yes is not - generally compatible with NAT gateway or LVS. NAT - gateway relies on the interface configured by - CTDB_NATGW_PUBLIC_IFACE to be up and LVS replies on - CTDB_LVS_PUBLIC_IFACE to be up. CTDB does not check if - these options are set in an incompatible way so care is - needed to understand the interaction. - - - - Default is "no". - - - - - - - - - NAT GATEWAY - - - NAT gateway is used to configure fallback routing for nodes - when they do not host any public IP addresses. For example, - it allows unhealthy nodes to reliably communicate with - external infrastructure. One node in a NAT gateway group will - be designated as the NAT gateway master node and other (slave) - nodes will be configured with fallback routes via the NAT - gateway master node. For more information, see the - NAT GATEWAY section in - ctdb - 7. - - - - - - CTDB_NATGW_DEFAULT_GATEWAY=IPADDR - - - IPADDR is an alternate network gateway to use on the NAT - gateway master node. If set, a fallback default route - is added via this network gateway. - - - No default. Setting this variable is optional - if not - set that no route is created on the NAT gateway master - node. - - - - - - CTDB_NATGW_NODES=FILENAME - - - FILENAME contains the list of nodes that belong to the - same NAT gateway group. - - - File format: - -IPADDR slave-only - - - - IPADDR is the private IP address of each node in the NAT - gateway group. - - - If "slave-only" is specified then the corresponding node - can not be the NAT gateway master node. In this case - CTDB_NATGW_PUBLIC_IFACE and - CTDB_NATGW_PUBLIC_IP are optional and - unused. - - - No default, usually - /usr/local/etc/ctdb/natgw_nodes when enabled. - - - - - - CTDB_NATGW_PRIVATE_NETWORK=IPADDR/MASK - - - IPADDR/MASK is the private sub-network that is - internally routed via the NAT gateway master node. This - is usually the private network that is used for node - addresses. - - - No default. - - - - - - CTDB_NATGW_PUBLIC_IFACE=IFACE - - - IFACE is the network interface on which the - CTDB_NATGW_PUBLIC_IP will be configured. - - - No default. - - - - - - CTDB_NATGW_PUBLIC_IP=IPADDR/MASK - - - IPADDR/MASK indicates the IP address that is used for - outgoing traffic (originating from - CTDB_NATGW_PRIVATE_NETWORK) on the NAT gateway master - node. This must not be a - configured public IP address. - - - No default. - - - - - - CTDB_NATGW_STATIC_ROUTES=IPADDR/MASK[@GATEWAY] ... - - - Each IPADDR/MASK identifies a network or host to which - NATGW should create a fallback route, instead of - creating a single default route. This can be used when - there is already a default route, via an interface that - can not reach required infrastructure, that overrides - the NAT gateway default route. - - - If GATEWAY is specified then the corresponding route on - the NATGW master node will be via GATEWAY. Such routes - are created even if - CTDB_NATGW_DEFAULT_GATEWAY is not - specified. If GATEWAY is not specified for some - networks then routes are only created on the NATGW - master node for those networks if - CTDB_NATGW_DEFAULT_GATEWAY is - specified. - - - This should be used with care to avoid causing traffic - to unnecessarily double-hop through the NAT gateway - master, even when a node is hosting public IP addresses. - Each specified network or host should probably have a - corresponding automatically created link route or static - route to avoid this. - - - No default. - - - - - - - - Example - -CTDB_NATGW_NODES=/usr/local/etc/ctdb/natgw_nodes -CTDB_NATGW_PRIVATE_NETWORK=192.168.1.0/24 -CTDB_NATGW_DEFAULT_GATEWAY=10.0.0.1 -CTDB_NATGW_PUBLIC_IP=10.0.0.227/24 -CTDB_NATGW_PUBLIC_IFACE=eth0 - - - - A variation that ensures that infrastructure (ADS, DNS, ...) - directly attached to the public network (10.0.0.0/24) is - always reachable would look like this: - - -CTDB_NATGW_NODES=/usr/local/etc/ctdb/natgw_nodes -CTDB_NATGW_PRIVATE_NETWORK=192.168.1.0/24 -CTDB_NATGW_PUBLIC_IP=10.0.0.227/24 -CTDB_NATGW_PUBLIC_IFACE=eth0 -CTDB_NATGW_STATIC_ROUTES=10.0.0.0/24 - - - Note that CTDB_NATGW_DEFAULT_GATEWAY is - not specified. - - - - - - - POLICY ROUTING - - - A node running CTDB may be a component of a complex network - topology. In particular, public addresses may be spread - across several different networks (or VLANs) and it may not be - possible to route packets from these public addresses via the - system's default route. Therefore, CTDB has support for - policy routing via the 13.per_ip_routing - eventscript. This allows routing to be specified for packets - sourced from each public address. The routes are added and - removed as CTDB moves public addresses between nodes. - - - - For more information, see the POLICY - ROUTING section in - ctdb - 7. - - - - - CTDB_PER_IP_ROUTING_CONF=FILENAME - - - FILENAME contains elements for constructing the desired - routes for each source address. - - - - The special FILENAME value - __auto_link_local__ indicates that no - configuration file is provided and that CTDB should - generate reasonable link-local routes for each public IP - address. - - - - File format: - -IPADDR DEST-IPADDR/MASK GATEWAY-IPADDR - - - - - No default, usually - /usr/local/etc/ctdb/policy_routing when enabled. - - - - - - CTDB_PER_IP_ROUTING_RULE_PREF=NUM - - - NUM sets the priority (or preference) for the routing - rules that are added by CTDB. - - - - This should be (strictly) greater than 0 and (strictly) - less than 32766. A priority of 100 is recommended, unless - this conflicts with a priority already in use on the - system. See - ip - 8, for more details. - - - - - - - CTDB_PER_IP_ROUTING_TABLE_ID_LOW=LOW-NUM, - CTDB_PER_IP_ROUTING_TABLE_ID_HIGH=HIGH-NUM - - - - CTDB determines a unique routing table number to use for - the routing related to each public address. LOW-NUM and - HIGH-NUM indicate the minimum and maximum routing table - numbers that are used. - - - - ip - 8 uses some - reserved routing table numbers below 255. Therefore, - CTDB_PER_IP_ROUTING_TABLE_ID_LOW should be (strictly) - greater than 255. - - - - CTDB uses the standard file - /etc/iproute2/rt_tables to maintain - a mapping between the routing table numbers and labels. - The label for a public address - ADDR will look like - ctdb.addr. This means that - the associated rules and routes are easy to read (and - manipulate). - - - - No default, usually 1000 and 9000. - - - - - - - Example - -CTDB_PER_IP_ROUTING_CONF=/usr/local/etc/ctdb/policy_routing -CTDB_PER_IP_ROUTING_RULE_PREF=100 -CTDB_PER_IP_ROUTING_TABLE_ID_LOW=1000 -CTDB_PER_IP_ROUTING_TABLE_ID_HIGH=9000 - - - - - - - LVS - - - For a general description see the LVS - section in ctdb - 7. - - - - Eventscript - - - 91.lvs - - - - - - - CTDB_LVS_NODES=FILENAME - - - FILENAME contains the list of nodes that belong to the - same LVS group. - - - File format: - -IPADDR slave-only - - - - IPADDR is the private IP address of each node in the LVS - group. - - - If "slave-only" is specified then the corresponding node - can not be the LVS master node. In this case - CTDB_LVS_PUBLIC_IFACE and - CTDB_LVS_PUBLIC_IP are optional and - unused. - - - No default, usually - /usr/local/etc/ctdb/lvs_nodes when enabled. - - - - - - CTDB_LVS_PUBLIC_IFACE=INTERFACE - - - INTERFACE is the network interface that clients will use - to connection to CTDB_LVS_PUBLIC_IP. - This is optional for slave-only nodes. - No default. - - - - - - CTDB_LVS_PUBLIC_IP=IPADDR - - - CTDB_LVS_PUBLIC_IP is the LVS public address. No - default. - - - - - - - - - - - SERVICE CONFIGURATION - - - CTDB can be configured to manage and/or monitor various NAS (and - other) services via its eventscripts. - - - - In the simplest case CTDB will manage a service. This means the - service will be started and stopped along with CTDB, CTDB will - monitor the service and CTDB will do any required - reconfiguration of the service when public IP addresses are - failed over. - - - - SAMBA - - - Eventscripts - - - 49.winbind - 50.samba - - - - - - - CTDB_MANAGES_SAMBA=yes|no - - - Should CTDB manage Samba? - - - Default is no. - - - - - - CTDB_MANAGES_WINBIND=yes|no - - - Should CTDB manage Winbind? - - - Default is no. - - - - - - CTDB_SAMBA_CHECK_PORTS=PORT-LIST - - - When monitoring Samba, check TCP ports in - space-separated PORT-LIST. - - - Default is to monitor ports that Samba is configured to listen on. - - - - - - CTDB_SAMBA_SKIP_SHARE_CHECK=yes|no - - - As part of monitoring, should CTDB skip the check for - the existence of each directory configured as share in - Samba. This may be desirable if there is a large number - of shares. - - - Default is no. - - - - - - CTDB_SERVICE_NMB=SERVICE - - - Distribution specific SERVICE for managing nmbd. - - - Default is distribution-dependant. - - - - - CTDB_SERVICE_SMB=SERVICE - - - Distribution specific SERVICE for managing smbd. - - - Default is distribution-dependant. - - - - - - CTDB_SERVICE_WINBIND=SERVICE - - - Distribution specific SERVICE for managing winbindd. - - - Default is "winbind". - - - - - - - - - - NFS - - - This includes parameters for the kernel NFS server. - Alternative NFS subsystems (such as NFS-Ganesha) - can be integrated using CTDB_NFS_CALLOUT. - - - - Eventscript - - - 60.nfs - - - - - - - CTDB_MANAGES_NFS=yes|no - - - Should CTDB manage NFS? - - - Default is no. - - - - - - CTDB_NFS_CALLOUT=COMMAND - - - COMMAND specifies the path to a callout to handle - interactions with the configured NFS system, including - startup, shutdown, monitoring. - - - Default is the included - nfs-linux-kernel-callout. - - - - - - CTDB_NFS_CHECKS_DIR=DIRECTORY - - - Specifies the path to a DIRECTORY containing files that - describe how to monitor the responsiveness of NFS RPC - services. See the README file for this directory for an - explanation of the contents of these "check" files. - - - CTDB_NFS_CHECKS_DIR can be used to point to different - sets of checks for different NFS servers. - - - One way of using this is to have it point to, say, - /usr/local/etc/ctdb/nfs-checks-enabled.d - and populate it with symbolic links to the desired check - files. This avoids duplication and is upgrade-safe. - - - Default is - /usr/local/etc/ctdb/nfs-checks.d, - which contains NFS RPC checks suitable for Linux kernel - NFS. - - - - - - CTDB_NFS_SKIP_SHARE_CHECK=yes|no - - - As part of monitoring, should CTDB skip the check for - the existence of each directory exported via NFS. This - may be desirable if there is a large number of exports. - - - Default is no. - - - - - - CTDB_RPCINFO_LOCALHOST=IPADDR|HOSTNAME - - - IPADDR or HOSTNAME indicates the address that - rpcinfo should connect to when doing - rpcinfo check on IPv4 RPC service during - monitoring. Optimally this would be "localhost". - However, this can add some performance overheads. - - - Default is "127.0.0.1". - - - - - - CTDB_RPCINFO_LOCALHOST6=IPADDR|HOSTNAME - - - IPADDR or HOSTNAME indicates the address that - rpcinfo should connect to when doing - rpcinfo check on IPv6 RPC service - during monitoring. Optimally this would be "localhost6" - (or similar). However, this can add some performance - overheads. - - - Default is "::1". - - - - - - CTDB_NFS_STATE_FS_TYPE=TYPE - - - The type of filesystem used for a clustered NFS' shared - state. No default. - - - - - - CTDB_NFS_STATE_MNT=DIR - - - The directory where a clustered NFS' shared state will be - located. No default. - - - - - - - - - - APACHE HTTPD - - - CTDB can manage the Apache web server. - - - - Eventscript - - - 41.httpd - - - - - - CTDB_MANAGES_HTTPD=yes|no - - - Should CTDB manage the Apache web server? - - - Default is no. - - - - - - - - CLAMAV - - - CTDB has support to manage the popular anti-virus daemon - ClamAV. - - - - Eventscript - - - 31.clamd - - - - This eventscript is not enabled by default. Use - ctdb enablescript to enable it. - - - - - - - - CTDB_MANAGES_CLAMD=yes|no - - - Should CTDB manage ClamAV? - - - Default is no. - - - - - - CTDB_CLAMD_SOCKET=FILENAME - - - FILENAME is the socket to monitor ClamAV. - - - No default. - - - - - - - - - - ISCSI - - - CTDB has support for managing the Linux iSCSI tgtd service. - - - - Eventscript - - - 70.iscsi - - - - - - - CTDB_MANAGES_ISCSI=yes|no - - - Should CTDB manage iSCSI tgtd? - - - Default is no. - - - - - - CTDB_START_ISCSI_SCRIPTS=DIRECTORY - - - DIRECTORY on shared storage containing scripts to start - tgtd for each public IP address. - - - No default. - - - - - - - - MULTIPATHD - - - CTDB can monitor multipath devices to ensure that active paths - are available. - - - - Eventscript - - - 20.multipathd - - - - This eventscript is not enabled by default. Use - ctdb enablescript to enable it. - - - - - - CTDB_MONITOR_MPDEVICES=MP-DEVICE-LIST - - - MP-DEVICE-LIST is a list of multipath devices for CTDB to monitor? - - - No default. - - - - - - - - VSFTPD - - - CTDB can manage the vsftpd FTP server. - - - - Eventscript - - - 40.vsftpd - - - - - - CTDB_MANAGES_VSFTPD=yes|no - - - Should CTDB manage the vsftpd FTP server? - - - Default is no. - - - - - - - - - DATABASE SETUP - - - - CTDB checks the consistency of databases during startup. - - - - Eventscripts - - - 00.ctdb - - - - - - - - CTDB_MAX_CORRUPT_DB_BACKUPS=NUM - - - NUM is the maximum number of volatile TDB database backups - to be kept (for each database) when a corrupt database is - found during startup. Volatile TDBs are zeroed during - startup so backups are needed to debug any corruption that - occurs before a restart. - - - Default is 10. - - - - - - - - - - SYSTEM RESOURCE MONITORING CONFIGURATION - - - - CTDB can experience seemingly random (performance and other) - issues if system resources become too constrained. Options in - this section can be enabled to allow certain system resources - to be checked. They allows warnings to be logged and nodes to - be marked unhealthy when system resource usage reaches the - configured thresholds. - - - - Some checks are enabled by default. It is recommended that - these checks remain enabled or are augmented by extra checks. - There is no supported way of completely disabling the checks. - - - - Eventscripts - - - 05.system - - - - Filesystem and memory usage monitoring is in - 05.system. - - - - - - - CTDB_MONITOR_FILESYSTEM_USAGE=FS-LIMIT-LIST - - - FS-LIMIT-LIST is a space-separated list of - FILESYSTEM:WARN_LIMIT:UNHEALTHY_LIMIT - triples indicating that warnings should be logged if the - space used on FILESYSTEM reaches WARN_LIMIT%. If usage - reaches UNHEALTHY_LIMIT then the node should be flagged - unhealthy. Either WARN_LIMIT or UNHEALTHY_LIMIT may be - left blank, meaning that check will be omitted. - - - - Default is to warn for each filesystem containing a - database directory (CTDB_DBDIR, - CTDB_DBDIR_PERSISTENT, - CTDB_DBDIR_STATE) with a threshold of - 90%. - - - - - - CTDB_MONITOR_MEMORY_USAGE=MEM-LIMITS - - - MEM-LIMITS takes the form - WARN_LIMIT:UNHEALTHY_LIMIT - indicating that warnings should be logged if memory - usage reaches WARN_LIMIT%. If usage reaches - UNHEALTHY_LIMIT then the node should be flagged - unhealthy. Either WARN_LIMIT or UNHEALTHY_LIMIT may be - left blank, meaning that check will be omitted. - - - Default is 80, so warnings will be logged when memory - usage reaches 80%. - - - - - - CTDB_MONITOR_SWAP_USAGE=SWAP-LIMITS - - - SWAP-LIMITS takes the form - WARN_LIMIT:UNHEALTHY_LIMIT - indicating that warnings should be logged if - swap usage reaches WARN_LIMIT%. If usage reaches - UNHEALTHY_LIMIT then the node should be flagged - unhealthy. Either WARN_LIMIT or UNHEALTHY_LIMIT may be - left blank, meaning that check will be omitted. - - - Default is 25, so warnings will be logged when swap - usage reaches 25%. - - - - - - - - - FILES diff --git a/ctdb/packaging/RPM/ctdb.spec.in b/ctdb/packaging/RPM/ctdb.spec.in index 74119d5e5b3..95850517579 100644 --- a/ctdb/packaging/RPM/ctdb.spec.in +++ b/ctdb/packaging/RPM/ctdb.spec.in @@ -221,6 +221,7 @@ rm -rf $RPM_BUILD_ROOT %{_mandir}/man1/onnode.1.gz %{_mandir}/man1/ltdbtool.1.gz %{_mandir}/man1/ping_pong.1.gz +%{_mandir}/man5/ctdb-script.options.5.gz %{_mandir}/man5/ctdbd.conf.5.gz %{_mandir}/man7/ctdb.7.gz %{_mandir}/man7/ctdb-statistics.7.gz diff --git a/ctdb/wscript b/ctdb/wscript index 9643be2e506..95091ce0564 100644 --- a/ctdb/wscript +++ b/ctdb/wscript @@ -48,6 +48,7 @@ manpages_misc = [ 'ctdb_diagnostics.1', 'ctdbd_wrapper.1', 'onnode.1', + 'ctdb-script.options.5', 'ctdbd.conf.5', 'ctdb.7', 'ctdb-statistics.7', -- 2.34.1