2 # script to manage nfs in a clustered environment
7 [ "$CTDB_MANAGES_NFS" = "yes" ] || exit 0
8 [ -z "$STATD_SHARED_DIRECTORY" ] && exit 0
13 PATH=/usr/bin:/bin:/usr/sbin:/sbin:$PATH
17 /bin/mkdir -p $CTDB_BASE/state/nfs
18 /bin/mkdir -p $CTDB_BASE/state/statd/ip
19 /bin/mkdir -p $STATD_SHARED_DIRECTORY
20 ctdb_wait_directories "nfslock" "$STATD_SHARED_DIRECTORY"
23 # wait for all nfs exported directories to become available
24 nfs_dirs=`grep -v '^#' < /etc/exports | cut -d' ' -f1`
25 ctdb_wait_directories "NFS" $nfs_dirs
27 # make sure nfs is stopped before we start it, or it may get a bind error
28 service nfs stop > /dev/null 2>&1
29 service nfslock stop > /dev/null 2>&1
44 echo $ip >> $CTDB_BASE/state/statd/restart
46 # having a list of what IPs we have allows statd to do the right
47 # thing via $CTDB_BASE/statd-callout
48 /bin/touch $CTDB_BASE/state/statd/ip/$ip
57 echo $ip >> $CTDB_BASE/state/statd/restart
58 /bin/rm -f $CTDB_BASE/state/statd/ip/$ip
63 # always restart the lockmanager so that we start with a clusterwide
64 # graceperiod when ip addresses has changed
65 [ -x $CTDB_BASE/statd-callout ] && {
66 $CTDB_BASE/statd-callout notify &
69 /bin/rm -f $CTDB_BASE/state/statd/restart
73 # check that NFS responds to rpc requests
74 ctdb_check_rpc "NFS" 100003 3
75 ctdb_check_rpc "mount" 100005 1
77 # and that its directories are available
78 nfs_dirs=`grep -v '^#' < /etc/exports | cut -d' ' -f1`
79 ctdb_check_directories "nfs" $nfs_dirs
81 # check that lockd responds to rpc requests
82 ctdb_check_rpc "statd" 100024 1
83 ctdb_check_rpc "lockd" 100021 1
84 ctdb_check_directories "statd" $STATD_SHARED_DIRECTORY