# Avoid using root's TMPDIR
unset TMPDIR
-. /etc/ctdb/functions
+[ -z "$CTDB_BASE" ] && {
+ export CTDB_BASE="/etc/ctdb"
+}
+
+. $CTDB_BASE/functions
loadconfig network
loadconfig ctdb
[ $count -gt 10 ] && {
echo -n $"killing ctdbd "
killall -q -9 ctdbd
- pkill -9 -f /etc/ctdb/events.d/
+ pkill -9 -f $CTDB_BASE/events.d/
}
done
case $init_style in
# releaseip : called when an IP address is released
# recovered : called when ctdb has finished a recovery event
-. /etc/ctdb/functions
+. $CTDB_BASE/functions
loadconfig ctdb
# ensure we have /bin and /usr/bin in the path
case $cmd in
startup)
# make sure we have a blank state directory for the scripts to work with
- /bin/rm -rf /etc/ctdb/state
- /bin/mkdir -p /etc/ctdb/state
+ /bin/rm -rf $CTDB_BASE/state
+ /bin/mkdir -p $CTDB_BASE/state
# set any tunables from the config file
set | grep ^CTDB_SET_ | cut -d_ -f3- |
# this adds/removes IPs from your
# public interface
-. /etc/ctdb/functions
+. $CTDB_BASE/functions
loadconfig ctdb
cmd="$1"
shift
[ -z "$CTDB_PUBLIC_ADDRESSES" ] && {
- CTDB_PUBLIC_ADDRESSES=/etc/ctdb/public_addresses
+ CTDB_PUBLIC_ADDRESSES=$CTDB_BASE/public_addresses
}
[ ! -f "$CTDB_PUBLIC_ADDRESSES" ] && {
_failed=0
_killcount=0
- connfile="/etc/ctdb/state/connections.$_IP"
+ connfile="$CTDB_BASE/state/connections.$_IP"
netstat -tn |egrep "^tcp.*\s+$_IP:.*ESTABLISHED" | awk '{print $4" "$5}' > $connfile
while read dest src; do
srcip=`echo $src | cut -d: -f1`
#!/bin/sh
# event strict to manage vsftpd in a cluster environment
-. /etc/ctdb/functions
+. $CTDB_BASE/functions
loadconfig vsftpd
[ "$CTDB_MANAGES_VSFTPD" = "yes" ] || exit 0
case $cmd in
startup)
- /bin/mkdir -p /etc/ctdb/state/vsftpd
+ /bin/mkdir -p $CTDB_BASE/state/vsftpd
# make sure the service is stopped first
service vsftpd stop > /dev/null 2>&1
;;
takeip)
- echo "restart" >> /etc/ctdb/state/vsftpd/restart
+ echo "restart" >> $CTDB_BASE/state/vsftpd/restart
;;
releaseip)
- echo "restart" >> /etc/ctdb/state/vsftpd/restart
+ echo "restart" >> $CTDB_BASE/state/vsftpd/restart
;;
recovered)
# if we have taken or released any ips we must
# restart vsftpd to ensure that all tcp connections are reset
- [ -f /etc/ctdb/state/vsftpd/restart ] && {
+ [ -f $CTDB_BASE/state/vsftpd/restart ] && {
service vsftpd stop > /dev/null 2>&1
service vsftpd start
} >/dev/null 2>&1
- /bin/rm -f /etc/ctdb/state/vsftpd/restart
+ /bin/rm -f $CTDB_BASE/state/vsftpd/restart
;;
esac
PATH=/bin:/usr/bin:$PATH
-. /etc/ctdb/functions
+. $CTDB_BASE/functions
loadconfig ctdb
cmd="$1"
case $cmd in
startup)
# create the state directory for samba
- /bin/mkdir -p /etc/ctdb/state/samba
+ /bin/mkdir -p $CTDB_BASE/state/samba
# wait for all shared directories to become available
smb_dirs=`testparm -s 2> /dev/null | egrep '^\s*path = ' | cut -d= -f2`
monitor)
# Create a dummy file to track when we need to do periodic cleanup
# of samba databases
- [ -f /etc/ctdb/state/samba/periodic_cleanup ] || {
- touch /etc/ctdb/state/samba/periodic_cleanup
+ [ -f $CTDB_BASE/state/samba/periodic_cleanup ] || {
+ touch $CTDB_BASE/state/samba/periodic_cleanup
}
- [ `/usr/bin/find /etc/ctdb/state/samba/periodic_cleanup -mmin +$SAMBA_CLEANUP_PERIOD | wc -l` -eq 1 ] && {
+ [ `/usr/bin/find $CTDB_BASE/state/samba/periodic_cleanup -mmin +$SAMBA_CLEANUP_PERIOD | wc -l` -eq 1 ] && {
# Cleanup the databases
periodic_cleanup
- touch /etc/ctdb/state/samba/periodic_cleanup
+ touch $CTDB_BASE/state/samba/periodic_cleanup
}
testparm -s 2>&1 | egrep '^WARNING|^ERROR|^Unknown' && {
#!/bin/sh
# script to manage nfs in a clustered environment
-. /etc/ctdb/functions
+. $CTDB_BASE/functions
loadconfig nfs
[ "$CTDB_MANAGES_NFS" = "yes" ] || exit 0
case $cmd in
startup)
- /bin/mkdir -p /etc/ctdb/state/nfs
- /bin/mkdir -p /etc/ctdb/state/statd/ip
+ /bin/mkdir -p $CTDB_BASE/state/nfs
+ /bin/mkdir -p $CTDB_BASE/state/statd/ip
/bin/mkdir -p $STATD_SHARED_DIRECTORY
ctdb_wait_directories "nfslock" "$STATD_SHARED_DIRECTORY"
takeip)
ip=$2
- echo $ip >> /etc/ctdb/state/statd/restart
+ echo $ip >> $CTDB_BASE/state/statd/restart
# having a list of what IPs we have allows statd to do the right
- # thing via /etc/ctdb/statd-callout
- /bin/touch /etc/ctdb/state/statd/ip/$ip
+ # thing via $CTDB_BASE/statd-callout
+ /bin/touch $CTDB_BASE/state/statd/ip/$ip
exit 0
;;
ip=$2
maskbits=$3
- echo $ip >> /etc/ctdb/state/statd/restart
- /bin/rm -f /etc/ctdb/state/statd/ip/$ip
+ echo $ip >> $CTDB_BASE/state/statd/restart
+ /bin/rm -f $CTDB_BASE/state/statd/ip/$ip
exit 0
;;
recovered)
# always restart the lockmanager so that we start with a clusterwide
# graceperiod when ip addresses has changed
- [ -x /etc/ctdb/statd-callout ] && {
- /etc/ctdb/statd-callout notify &
+ [ -x $CTDB_BASE/statd-callout ] && {
+ $CTDB_BASE/statd-callout notify &
} >/dev/null 2>&1
- /bin/rm -f /etc/ctdb/state/statd/restart
+ /bin/rm -f $CTDB_BASE/state/statd/restart
;;
monitor)
PATH=/bin:/usr/bin:$PATH
-. /etc/ctdb/functions
+. $CTDB_BASE/functions
loadconfig nfs
cmd="$1"
case $cmd in
startup)
- mkdir -p /etc/ctdb/state/nfstickle
+ mkdir -p $CTDB_BASE/state/nfstickle
mkdir -p $NFS_TICKLE_SHARED_DIRECTORY/`hostname`
# we rely on fast tcp wait1 recycling
echo 1 > /proc/sys/net/ipv4/tcp_tw_recycle
. /etc/sysconfig/$name
elif [ -f /etc/default/$name ]; then
. /etc/default/$name
- elif [ -f /etc/ctdb/sysconfig/$name ]; then
- . /etc/ctdb/sysconfig/$name
+ elif [ -f $CTDB_BASE/sysconfig/$name ]; then
+ . $CTDB_BASE/sysconfig/$name
fi
}
# /etc/sysconfig/nfs:
# STATD_HOSTNAME="myhostname -H /etc/ctdb/statd-callout"
-. /etc/ctdb/functions
+[ -z "$CTDB_BASE" ] && {
+ export CTDB_BASE="/etc/ctdb"
+}
+
+. $CTDB_BASE/functions
loadconfig nfs
[ -z "$STATD_SHARED_DIRECTORY" ] && {
add-client)
# the callout does not tell us to which ip the client connected
# so we must add it to all the ips that we serve
- for f in `/bin/ls /etc/ctdb/state/statd/ip/*`; do
+ for f in `/bin/ls $CTDB_BASE/state/statd/ip/*`; do
ip=`/bin/basename $f`
[ -d $STATD_SHARED_DIRECTORY/$ip ] || /bin/mkdir $STATD_SHARED_DIRECTORY/$ip
/bin/touch $STATD_SHARED_DIRECTORY/$ip/$2
del-client)
# the callout does not tell us to which ip the client connected
# so we must add it to all the ips that we serve
- for f in `/bin/ls /etc/ctdb/state/statd/ip/*`; do
+ for f in `/bin/ls $CTDB_BASE/state/statd/ip/*`; do
ip=`/bin/basename $f`
/bin/rm -f $STATD_SHARED_DIRECTORY/$ip/$2
done
sleep 2
# copy all monitored clients on this node to the local lockmanager
- for f in `/bin/ls /etc/ctdb/state/statd/ip/* 2>/dev/null`; do
+ for f in `/bin/ls $CTDB_BASE/state/statd/ip/* 2>/dev/null`; do
ip=`/bin/basename $f`
[ -d $STATD_SHARED_DIRECTORY/$ip ] && [ -x /usr/bin/smnotify ] && {
for g in `/bin/ls $STATD_SHARED_DIRECTORY/$ip/* 2>/dev/null`; do
# Both 2a and 2b are commonly used in lockmanagers since they maximize
# probability that the client will accept the statd notify packet and
# not just ignore it.
- for f in `/bin/ls /etc/ctdb/state/statd/ip/* 2>/dev/null`; do
+ for f in `/bin/ls $CTDB_BASE/state/statd/ip/* 2>/dev/null`; do
ip=`/bin/basename $f`
[ -d $STATD_SHARED_DIRECTORY/$ip ] && [ -x /usr/bin/smnotify ] && {
for g in `/bin/ls $STATD_SHARED_DIRECTORY/$ip/* 2>/dev/null`; do
ctdb->do_setsched = !options.no_setsched;
+ /* setup a environment variable for the event scripts to use to find the
+ installation directory */
+ setenv("CTDB_BASE", ETCDIR "/ctdb", 1);
+
/* start the protocol running (as a child) */
return ctdb_start_daemon(ctdb, interactive?False:True);
}