ltmain.sh
Makefile.in
missing
+test-driver
*compile
*.gcda
*.gcno
*.vol
# Generated files
+tests/env.rc
+dht_layout_unittest
+mem_pool_unittest
+tests/utils/arequal-checksum
api/examples/__init__.py
api/examples/__init__.py?
api/examples/setup.py
+xlators/features/glupy/src/__init__.py
contrib/argp-standalone/libargp.a
contrib/uuid/uuid_types.h
extras/init.d/glusterd-Debian
extras/ocf/volume
extras/who-wrote-glusterfs/gitdm
extras/geo-rep/gsync-sync-gfid
+geo-replication/src/set_geo_rep_pem_keys.sh
+geo-replication/syncdaemon/configinterface.py
glusterfs-api.pc
glusterfs.spec
glusterfsd/src/glusterfsd
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
#pragma GCC diagnostic ignored "-Wunused-variable"
#endif
+#else
+#pragma clang diagnostic ignored "-Wunused-variable"
+#pragma clang diagnostic ignored "-Wunused-value"
#endif
#endif
#endif
cli-cmd-volume.c cli-cmd-peer.c cli-rpc-ops.c cli-cmd-parser.c\
cli-cmd-system.c cli-cmd-misc.c cli-xml-output.c cli-quotad-client.c cli-cmd-snapshot.c
-gluster_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(GF_LDADD)\
+gluster_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(GF_LDADD) \
$(RLLIBS) $(top_builddir)/rpc/xdr/src/libgfxdr.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
- $(GF_GLUSTERFS_LIBS) $(XML_LIBS)
+ $(XML_LIBS)
gluster_LDFLAGS = $(GF_LDFLAGS)
noinst_HEADERS = cli.h cli-mem-types.h cli-cmd.h cli-quotad-client.h
return (strcmp (key, "group") == 0);
}
-#define GLUSTERD_DEFAULT_WORKDIR "/var/lib/glusterd"
static int
cli_add_key_group (dict_t *dict, char *key, char *value, char **op_errstr)
{
return ret;
}
-#undef GLUSTERD_DEFAULT_WORKDIR
int32_t
cli_cmd_volume_set_parse (const char **words, int wordcount, dict_t **options,
/* TODO: fix hardcoding; Need to perform an RPC call to glusterd
* to fetch working directory
*/
- sprintf (quota_conf_file, "/var/lib/glusterd/vols/%s/quota.conf",
+ sprintf (quota_conf_file, "%s/vols/%s/quota.conf",
+ GLUSTERD_DEFAULT_WORKDIR,
volname);
fd = open (quota_conf_file, O_RDONLY);
if (fd == -1)
//TODO: fix hardcoding; Need to perform an RPC call to glusterd
//to fetch working directory
- sprintf (quota_conf_file, "/var/lib/glusterd/vols/%s/quota.conf",
+ sprintf (quota_conf_file, "%s/vols/%s/quota.conf",
+ GLUSTERD_DEFAULT_WORKDIR,
volname);
fd = open (quota_conf_file, O_RDONLY);
if (fd == -1) {
dict_t *dict = NULL;
char *help_str = NULL;
char msg[1024] = {0,};
- char *debug_xlator = _gf_false;
+ char *debug_xlator = NULL;
char tmp_str[512] = {0,};
if (-1 == req->rpc_status) {
dnl cases as published by the Free Software Foundation.
AC_INIT([glusterfs],
- [m4_esyscmd([build-aux/pkg-version --version])],
- [gluster-users@gluster.org],,[https://github.com/gluster/glusterfs.git])
+ [m4_esyscmd([build-aux/pkg-version --version])],
+ [gluster-users@gluster.org],,[https://github.com/gluster/glusterfs.git])
AC_SUBST([PACKAGE_RELEASE],
[m4_esyscmd([build-aux/pkg-version --release])])
AM_INIT_AUTOMAKE
+
# Removes warnings when using automake 1.14 around (...but option 'subdir-objects' is disabled )
#but libglusterfs fails to build with contrib (Then are not set up that way?)
#AM_INIT_AUTOMAKE([subdir-objects])
libglusterfs/src/Makefile
geo-replication/src/peer_gsec_create
geo-replication/src/peer_add_secret_pub
+ geo-replication/syncdaemon/configinterface.py
glusterfsd/Makefile
glusterfsd/src/Makefile
rpc/Makefile
rpc/xdr/Makefile
rpc/xdr/src/Makefile
xlators/Makefile
- xlators/meta/Makefile
- xlators/meta/src/Makefile
+ xlators/meta/Makefile
+ xlators/meta/src/Makefile
xlators/mount/Makefile
xlators/mount/fuse/Makefile
xlators/mount/fuse/src/Makefile
cli/src/Makefile
doc/Makefile
extras/Makefile
+ extras/glusterd.vol
extras/init.d/Makefile
extras/init.d/glusterd.plist
extras/init.d/glusterd-Debian
fi
AC_SUBST(HAVE_BACKTRACE)
-AC_CHECK_LIB([m], [ceil], , AC_MSG_ERROR([glibc math package missing - required]))
+if test "x${have_backtrace}" != "xyes"; then
+AC_TRY_COMPILE([#include <math.h>], [double x=0.0; x=ceil(0.0);],
+ [have_math_h=yes],
+ AC_MSG_ERROR([need math library for libexecinfo]))
+if test "x${have_math_h}" = "xyes"; then
+ LIBS="$LIBS -lm"
+fi
+fi
dnl glusterfs prints memory usage to stderr by sending it SIGUSR1
AC_CHECK_FUNC([malloc_stats], [have_malloc_stats=yes])
fi
fi
+### Dirty hacky stuff to make LOCALSTATEDIR work
+if test "x$prefix" = xNONE; then
+ test $localstatedir = '${prefix}/var' && localstatedir=$ac_default_prefix/var
+ localstatedir=/var
+ LOCALSTATEDIR=$(eval echo ${localstatedir})
+else
+ LOCALSTATEDIR=$(eval echo ${localstatedir})
+fi
+
case $host_os in
linux*)
GF_HOST_OS="GF_LINUX_HOST_OS"
- GF_CFLAGS="${ARGP_STANDALONE_CPPFLAGS}"
+ GF_CFLAGS="${GF_COMPILER_FLAGS} ${ARGP_STANDALONE_CPPFLAGS}"
GF_LDADD="${ARGP_STANDALONE_LDADD}"
GF_FUSE_CFLAGS="-DFUSERMOUNT_DIR=\\\"\$(bindir)\\\""
+ GLUSTERD_WORKDIR="${LOCALSTATEDIR}/lib/glusterd"
;;
solaris*)
GF_HOST_OS="GF_SOLARIS_HOST_OS"
GF_CFLAGS="${ARGP_STANDALONE_CPPFLAGS} -D_REENTRANT -D_POSIX_PTHREAD_SEMANTICS -m64"
GF_LDFLAGS=""
GF_LDADD="${ARGP_STANDALONE_LDADD}"
- GF_GLUSTERFS_LIBS="-lnsl -lresolv -lsocket"
BUILD_FUSE_CLIENT=no
FUSE_CLIENT_SUBDIR=""
+ GLUSTERD_WORKDIR="${LOCALSTATEDIR}/lib/glusterd"
;;
*netbsd*)
GF_HOST_OS="GF_BSD_HOST_OS"
GF_FUSE_CFLAGS="-DFUSERMOUNT_DIR=\\\"\$(sbindir)\\\""
GF_LDADD="${ARGP_STANDALONE_LDADD}"
if test "x$ac_cv_header_execinfo_h" = "xyes"; then
- GF_GLUSTERFS_LIBS="-lexecinfo"
+ GF_LDFLAGS="-lexecinfo"
fi
GF_FUSE_LDADD="-lperfuse"
BUILD_FUSE_CLIENT=yes
LEXLIB=""
BUILD_FUSERMOUNT=no
FUSERMOUNT_SUBDIR=""
+ GLUSTERD_WORKDIR="${LOCALSTATEDIR}/db/glusterd"
;;
*freebsd*)
GF_HOST_OS="GF_BSD_HOST_OS"
- GF_CFLAGS="${ARGP_STANDALONE_CPPFLAGS} -O0"
+ GF_CFLAGS="${GF_COMPILER_FLAGS} ${ARGP_STANDALONE_CPPFLAGS} -O0"
GF_CFLAGS="${GF_CFLAGS} -DTHREAD_UNSAFE_BASENAME"
GF_CFLAGS="${GF_CFLAGS} -DTHREAD_UNSAFE_DIRNAME"
GF_CFLAGS="${GF_CFLAGS} -D_LIBGEN_H_"
GF_CFLAGS="${GF_CFLAGS} -DO_DSYNC=0"
GF_CFLAGS="${GF_CFLAGS} -Dxdr_quad_t=xdr_longlong_t"
GF_CFLAGS="${GF_CFLAGS} -Dxdr_u_quad_t=xdr_u_longlong_t"
- GF_GLUSTERFS_CFLAGS="${GF_CFLAGS}"
GF_FUSE_CFLAGS="-DFUSERMOUNT_DIR=\\\"\$(sbindir)\\\""
- GF_FUSE_LIBS="${GF_FUSE_LIBS} /usr/lib/libutil.so"
- GF_FUSE_LDADD="-lutil"
- GF_CFLAGS=" -I/usr/local/include ${GF_CFLAGS}"
- CFLAGS=" -std=gnu89 ${CFLAGS}"
- GF_CFLAGS=" -std=gnu89 ${GF_CFLAGS}"
- GF_GLUSTERFS_CFLAGS="${GF_CFLAGS}"
- GF_LDADD="${ARGP_STANDALONE_LDADD} /usr/local/lib/libpython2.7.so /usr/local/lib/libintl.so"
- LDFLAGS="${LDFLAGS} -L/usr/local/lib"
+ GF_LDADD="${ARGP_STANDALONE_LDADD}"
if test "x$ac_cv_header_execinfo_h" = "xyes"; then
- GF_GLUSTERFS_LIBS="-lexecinfo"
+ GF_LDFLAGS="-lexecinfo"
fi
+ BUILD_FUSE_CLIENT=yes
BUILD_FUSERMOUNT=no
- BUILD_QEMU_BLOCK=no
FUSERMOUNT_SUBDIR=""
+ GLUSTERD_WORKDIR="${LOCALSTATEDIR}/db/glusterd"
;;
darwin*)
GF_HOST_OS="GF_DARWIN_HOST_OS"
BUILD_FUSERMOUNT="no"
BUILD_QEMU_BLOCK="no"
FUSERMOUNT_SUBDIR=""
+ GLUSTERD_WORKDIR="${LOCALSTATEDIR}/db/glusterd"
;;
esac
GF_CFLAGS="${GF_CFLAGS} ${UNITTEST_CPPFLAGS}"
AC_SUBST(GF_HOST_OS)
-AC_SUBST([GF_GLUSTERFS_LIBS])
AC_SUBST(GF_CFLAGS)
AC_SUBST(GF_LDFLAGS)
AC_SUBST(GF_LDADD)
AM_CONDITIONAL([GF_DARWIN_HOST_OS], test "${GF_HOST_OS}" = "GF_DARWIN_HOST_OS")
AM_CONDITIONAL([GF_BSD_HOST_OS], test "${GF_HOST_OS}" = "GF_BSD_HOST_OS")
-AM_CONDITIONAL([GF_INSTALL_VAR_LIB_GLUSTERD], test ! -d ${localstatedir}/lib/glusterd && test -d ${sysconfdir}/glusterd )
+AC_SUBST(GLUSTERD_WORKDIR)
+AM_CONDITIONAL([GF_INSTALL_GLUSTERD_WORKDIR], test ! -d ${GLUSTERD_WORKDIR} && test -d ${sysconfdir}/glusterd )
dnl pkg-config versioning
GFAPI_VERSION="7.0.0"
AC_SUBST(LIBGFCHANGELOG_LT_VERSION)
AC_SUBST(GFAPI_LT_VERSION)
+dnl this change necessary for run-tests.sh
+AC_CONFIG_FILES([tests/env.rc],[ln -s ${ac_abs_builddir}/env.rc ${ac_abs_srcdir}/env.rc 2>/dev/null])
+
AC_OUTPUT
echo
CPPFLAGS="$CPPFLAGS -I$srcdir"
dnl Added for C99 standards
-CFLAGS="$CFLAGS -std=gnu89"
+CFLAGS="$CFLAGS -std=gnu89 -static"
AC_OUTPUT(Makefile)
#ifdef __FreeBSD__
struct iovec *iov = NULL;
int iovlen = 0;
+ char fdstr[15];
+ sprintf (fdstr, "%d", fd);
+
build_iovec (&iov, &iovlen, "fstype", "fusefs", -1);
build_iovec (&iov, &iovlen, "subtype", "glusterfs", -1);
- build_iovec (&iov, &iovlen, "fspath", mountpoint, -1);
+ build_iovec (&iov, &iovlen, "fspath", __DECONST(void *, mountpoint),
+ -1);
build_iovec (&iov, &iovlen, "from", "/dev/fuse", -1);
build_iovec (&iov, &iovlen, "volname", source, -1);
- build_iovec_argf (&iov, &iovlen, "fd", "%d", fd);
- build_iovec_argf (&iov, &iovlen, "user_id", "%d", getuid());
- build_iovec_argf (&iov, &iovlen, "group_id", "%d", getgid());
+ build_iovec (&iov, &iovlen, "fd", fdstr, -1);
ret = nmount (iov, iovlen, mountflags);
#else
ret = mount (source, mountpoint, fstype, mountflags,
extern "C" {
#endif
-int backtrace(void **, int);
-char **backtrace_symbols(void *const *, int);
-void backtrace_symbols_fd(void *const *, int, int);
+extern int backtrace(void **, int);
+extern char **backtrace_symbols(void *const *, int);
+extern void backtrace_symbols_fd(void *const *, int, int);
#ifdef __cplusplus
}
typedef struct timeval qemu_timeval;
#define qemu_gettimeofday(tp) gettimeofday(tp, NULL)
-#ifndef CONFIG_UTIMENSAT
+#if !defined(CONFIG_UTIMENSAT) || defined(__FreeBSD__)
#ifndef UTIME_NOW
# define UTIME_NOW ((1l << 30) - 1l)
#endif
#include <glib/gprintf.h>
#include "config-host.h"
+#include "qemu-common.h"
#include "sysemu/sysemu.h"
#include "trace.h"
#include "qemu/sockets.h"
command-completion/Makefile command-completion/README
install-data-local:
- $(mkdir_p) $(DESTDIR)$(localstatedir)/lib/glusterd/groups
+ $(mkdir_p) $(DESTDIR)$(GLUSTERD_WORKDIR)/groups
$(INSTALL_DATA) $(top_srcdir)/extras/group-virt.example \
- $(DESTDIR)$(localstatedir)/lib/glusterd/groups/virt
+ $(DESTDIR)$(GLUSTERD_WORKDIR)/groups/virt
scripts_PROGRAMS = gsync-sync-gfid
gsync_sync_gfid_CFLAGS = $(GF_CFLAGS) -Wall -I$(top_srcdir)/libglusterfs/src
gsync_sync_gfid_LDFLAGS = $(GF_LDFLAGS)
-gsync_sync_gfid_LDADD = $(GF_LIBS) $(top_builddir)/libglusterfs/src/libglusterfs.la $(GF_GLUSTERFS_LIBS)
+gsync_sync_gfid_LDADD = $(GF_LIBS) $(top_builddir)/libglusterfs/src/libglusterfs.la
gsync_sync_gfid_SOURCES = gsync-sync-gfid.c
EXTRA_DIST = gsync-sync-gfid.c gsync-upgrade.sh generate-gfid-file.sh \
volume management
type mgmt/glusterd
- option working-directory /var/lib/glusterd
+ option working-directory @GLUSTERD_WORKDIR@
option transport-type socket,rdma
option transport.socket.keepalive-time 10
option transport.socket.keepalive-interval 2
VOL_NAME=
VERSION=
VOLUME_OP=
-GLUSTERD_WORKING_DIR=
+GLUSTERD_WORKDIR=
ENABLED_NAME="S28Quota-root-xattr-heal.sh"
while true;
do
- case $1 in
+ case $1 in
--volname)
- shift
- VOL_NAME=$1
- ;;
+ shift
+ VOL_NAME=$1
+ ;;
--version)
- shift
- VERSION=$1
- ;;
- --gd-workdir)
- shift
- GLUSTERD_WORKING_DIR=$1
- ;;
- --volume-op)
- shift
- VOLUME_OP=$1
- ;;
+ shift
+ VERSION=$1
+ ;;
+ --gd-workdir)
+ shift
+ GLUSTERD_WORKDIR=$1
+ ;;
+ --volume-op)
+ shift
+ VOLUME_OP=$1
+ ;;
*)
- shift
- break
- ;;
- esac
- shift
+ shift
+ break
+ ;;
+ esac
+ shift
done
##----------------------------------------
-ENABLED_STATE="$GLUSTERD_WORKING_DIR/hooks/$VERSION/$VOLUME_OP/post/$ENABLED_NAME"
+ENABLED_STATE="$GLUSTERD_WORKDIR/hooks/$VERSION/$VOLUME_OP/post/$ENABLED_NAME"
FLAG=`gluster volume quota $VOL_NAME list / 2>&1 | grep \
OPTSPEC="volname:,version:,gd-workdir:,volume-op:"
PROGNAME="Quota-xattr-heal-add-brick-pre"
VOL_NAME=
-GLUSTERD_WORKING_DIR=
+GLUSTERD_WORKDIR=
VOLUME_OP=
VERSION=
ENABLED_NAME="S28Quota-root-xattr-heal.sh"
while true;
do
- case $1 in
+ case $1 in
--volname)
- shift
- VOL_NAME=$1
- ;;
+ shift
+ VOL_NAME=$1
+ ;;
--gd-workdir)
- shift
- GLUSTERD_WORKING_DIR=$1
- ;;
+ shift
+ GLUSTERD_WORKDIR=$1
+ ;;
--volume-op)
- shift
- VOLUME_OP=$1
- ;;
+ shift
+ VOLUME_OP=$1
+ ;;
--version)
- shift
- VERSION=$1
- ;;
+ shift
+ VERSION=$1
+ ;;
*)
- shift
- break
- ;;
- esac
- shift
+ shift
+ break
+ ;;
+ esac
+ shift
done
##----------------------------------------
-DISABLED_STATE="$GLUSTERD_WORKING_DIR/hooks/$VERSION/add-brick/post/$DISABLED_NAME"
-ENABLED_STATE_START="$GLUSTERD_WORKING_DIR/hooks/$VERSION/start/post/$ENABLED_NAME"
-ENABLED_STATE_ADD_BRICK="$GLUSTERD_WORKING_DIR/hooks/$VERSION/add-brick/post/$ENABLED_NAME";
+DISABLED_STATE="$GLUSTERD_WORKDIR/hooks/$VERSION/add-brick/post/$DISABLED_NAME"
+ENABLED_STATE_START="$GLUSTERD_WORKDIR/hooks/$VERSION/start/post/$ENABLED_NAME"
+ENABLED_STATE_ADD_BRICK="$GLUSTERD_WORKDIR/hooks/$VERSION/add-brick/post/$ENABLED_NAME";
## Why to proceed if the required script itself is not present?
ls $DISABLED_STATE;
fi
## Is quota enabled?
-FLAG=`cat $GLUSTERD_WORKING_DIR/vols/$VOL_NAME/info | grep "^features.quota=" \
+FLAG=`cat $GLUSTERD_WORKDIR/vols/$VOL_NAME/info | grep "^features.quota=" \
| awk -F'=' '{print $NF}'`;
if [ "$FLAG" != "on" ]
then
fi
## Is volume started?
-FLAG=`cat $GLUSTERD_WORKING_DIR/vols/$VOL_NAME/info | grep "^status=" \
+FLAG=`cat $GLUSTERD_WORKDIR/vols/$VOL_NAME/info | grep "^status=" \
| awk -F'=' '{print $NF}'`;
if [ "$FLAG" != "1" ]
then
#/bin/bash
PROGNAME="Sganesha-reset"
-OPTSPEC="volname:"
+OPTSPEC="volname:,gd-workdir:"
VOL=
+GLUSTERD_WORKDIR=
function parse_args () {
ARGS=$(getopt -l $OPTSPEC -o "o" -name $PROGNAME $@)
eval set -- "$ARGS"
- case $1 in
- --volname)
- shift
- VOL=$1
- ;;
- esac
+ while true; do
+ case $1 in
+ --volname)
+ shift
+ VOL=$1
+ ;;
+ --gd-workdir)
+ shift
+ GLUSTERD_WORKDIR=$1
+ ;;
+ esac
+ shift
+ done
}
function is_volume_started () {
volname=$1
- echo "$(grep status /var/lib/glusterd/vols/"$volname"/info |\
+ echo "$(grep status $GLUSTERD_WORKDIR/vols/"$volname"/info |\
cut -d"=" -f2)"
}
parse_args $@
-if ps aux | grep -q "[g]anesha.nfsd"
+if ps auxwww | grep -q "[g]anesha.nfsd"
then
kill -s TERM `cat /var/run/ganesha.pid`
sleep 10
gluster volume start $VOL force
fi
fi
-
-
-
PROGNAME="Ssamba-set"
-OPTSPEC="volname:"
+OPTSPEC="volname:,gd-workdir:"
VOL=
CONFIGFILE=
LOGFILEBASE=
PIDDIR=
+GLUSTERD_WORKDIR=
enable_smb=""
while true; do
case $1 in
- --volname)
- shift
- VOL=$1
- ;;
- *)
- shift
- for pair in $@; do
+ --volname)
+ shift
+ VOL=$1
+ ;;
+ --gd-workdir)
+ shift
+ GLUSTERD_WORKDIR=$1
+ ;;
+ *)
+ shift
+ for pair in $@; do
read key value < <(echo "$pair" | tr "=" " ")
case "$key" in
"user.cifs")
- enable_smb=$value
- ;;
+ enable_smb=$value
+ ;;
"user.smb")
- enable_smb=$value
- ;;
+ enable_smb=$value
+ ;;
*)
- ;;
+ ;;
esac
- done
-
- shift
- break
- ;;
+ done
+ shift
+ break
+ ;;
esac
shift
done
function is_volume_started () {
volname=$1
- echo "$(grep status /var/lib/glusterd/vols/"$volname"/info |\
+ echo "$(grep status $GLUSTERD_WORKDIR/vols/"$volname"/info |\
cut -d"=" -f2)"
}
#!/bin/bash
PROGNAME="Sganesha-set"
-OPTSPEC="volname:"
+OPTSPEC="volname:,gd-workdir:"
VOL=
declare -i EXPORT_ID
GANESHA_DIR="/var/lib/glusterfs-ganesha"
enable_ganesha=""
host_name="none"
LOC=""
-
+GLUSTERD_WORKDIR=
function parse_args ()
while true; do
case $1 in
- --volname)
- shift
- VOL=$1
- ;;
- *)
- shift
- for pair in $@; do
+ --volname)
+ shift
+ VOL=$1
+ ;;
+ --gd-workdir)
+ shift
+ GLUSTERD_WORKDIR=$1
+ ;;
+ *)
+ shift
+ for pair in $@; do
read key value < <(echo "$pair" | tr "=" " ")
case "$key" in
- "nfs-ganesha.enable")
- enable_ganesha=$value
- ;;
- "nfs-ganesha.host")
- host_name=$value
- ;;
+ "nfs-ganesha.enable")
+ enable_ganesha=$value
+ ;;
+ "nfs-ganesha.host")
+ host_name=$value
+ ;;
*)
- ;;
+ ;;
esac
- done
-
- shift
- break
- ;;
+ done
+ shift
+ break
+ ;;
esac
shift
done
function check_if_host_set()
{
- if ! cat /var/lib/glusterd/vols/$VOL/info | grep -q "nfs-ganesha.host"
+ if ! cat $GLUSTERD_WORKDIR/vols/$VOL/info | grep -q "nfs-ganesha.host"
then
exit 1
fi
function check_gluster_nfs()
{
- if cat /var/lib/glusterd/vols/$VOL/info | grep -q "nfs.disable=ON"
+ if cat $GLUSTERD_WORKDIR/vols/$VOL/info | grep -q "nfs.disable=ON"
then
gnfs="disabled"
fi
fi
fi
-
-
#volume.
PROGNAME="Ssamba-start"
-OPTSPEC="volname:"
+OPTSPEC="volname:,gd-workdir:"
VOL=
CONFIGFILE=
LOGFILEBASE=
PIDDIR=
+GLUSTERD_WORKDIR=
function parse_args () {
ARGS=$(getopt -l $OPTSPEC -name $PROGNAME $@)
eval set -- "$ARGS"
while true; do
- case $1 in
- --volname)
- shift
- VOL=$1
- ;;
- *)
- shift
- break
- ;;
- esac
- shift
+ case $1 in
+ --volname)
+ shift
+ VOL=$1
+ ;;
+ --gd-workdir)
+ shift
+ GLUSTERD_WORKDIR=$1
+ ;;
+ *)
+ shift
+ break
+ ;;
+ esac
+ shift
done
}
volname=$1
uservalue=
- usercifsvalue=$(grep user.cifs /var/lib/glusterd/vols/"$volname"/info |\
+ usercifsvalue=$(grep user.cifs $GLUSTERD_WORKDIR/vols/"$volname"/info |\
cut -d"=" -f2)
- usersmbvalue=$(grep user.smb /var/lib/glusterd/vols/"$volname"/info |\
+ usersmbvalue=$(grep user.smb $GLUSTERD_WORKDIR/vols/"$volname"/info |\
cut -d"=" -f2)
if [[ $usercifsvalue = "disable" || $usersmbvalue = "disable" ]]; then
glusterd_prestart()
{
- mkdir -p /var/lib/glusterd /var/log/glusterfs
+ mkdir -p @GLUSTERD_WORKDIR@ /var/log/glusterfs
return 0
}
[Service]
Type=forking
-PIDFile=/run/glusterd.pid
+PIDFile=@localstatedir@/run/glusterd.pid
LimitNOFILE=65536
-ExecStart=@prefix@/sbin/glusterd -p /run/glusterd.pid
+ExecStart=@prefix@/sbin/glusterd -p @localstatedir@/run/glusterd.pid
KillMode=process
[Install]
-
gsyncddir = $(libexecdir)/glusterfs
gsyncd_SCRIPTS = gverify.sh peer_add_secret_pub peer_gsec_create set_geo_rep_pem_keys.sh
gsyncd_SOURCES = gsyncd.c procdiggy.c
-gsyncd_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
- $(GF_GLUSTERFS_LIBS)
+gsyncd_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
gsyncd_LDFLAGS = $(GF_LDFLAGS)
chown $user: $home_dir/.ssh/authorized_keys;
fi
-cat "$GLUSTERD_WORKING_DIR"/geo-replication/common_secret.pem.pub >> $home_dir/.ssh/authorized_keys;
+cat "$GLUSTERD_WORKDIR"/geo-replication/common_secret.pem.pub >> $home_dir/.ssh/authorized_keys;
prefix=@prefix@
exec_prefix=@exec_prefix@
-if [ ! -f "$GLUSTERD_WORKING_DIR"/geo-replication/secret.pem.pub ]; then
- \rm -rf "$GLUSTERD_WORKING_DIR"/geo-replication/secret.pem*
- ssh-keygen -N '' -f "$GLUSTERD_WORKING_DIR"/geo-replication/secret.pem > /dev/null
+if [ ! -f "$GLUSTERD_WORKDIR"/geo-replication/secret.pem.pub ]; then
+ \rm -rf "$GLUSTERD_WORKDIR"/geo-replication/secret.pem*
+ ssh-keygen -N '' -f "$GLUSTERD_WORKDIR"/geo-replication/secret.pem > /dev/null
fi
-if [ ! -f "$GLUSTERD_WORKING_DIR"/geo-replication/tar_ssh.pem.pub ]; then
- \rm -rf "$GLUSTERD_WORKING_DIR"/geo-replication/tar_ssh.pem*
- ssh-keygen -N '' -f "$GLUSTERD_WORKING_DIR"/geo-replication/tar_ssh.pem > /dev/null
+if [ ! -f "$GLUSTERD_WORKDIR"/geo-replication/tar_ssh.pem.pub ]; then
+ \rm -rf "$GLUSTERD_WORKDIR"/geo-replication/tar_ssh.pem*
+ ssh-keygen -N '' -f "$GLUSTERD_WORKDIR"/geo-replication/tar_ssh.pem > /dev/null
fi
-output1=`echo command=\"${exec_prefix}/libexec/glusterfs/gsyncd\" " "``cat "$GLUSTERD_WORKING_DIR"/geo-replication/secret.pem.pub`
-output2=`echo command=\"tar \$\{SSH_ORIGINAL_COMMAND#* \}\" " "``cat "$GLUSTERD_WORKING_DIR"/geo-replication/tar_ssh.pem.pub`
+output1=`echo command=\"${exec_prefix}/libexec/glusterfs/gsyncd\" " "``cat "$GLUSTERD_WORKDIR"/geo-replication/secret.pem.pub`
+output2=`echo command=\"tar \$\{SSH_ORIGINAL_COMMAND#* \}\" " "``cat "$GLUSTERD_WORKDIR"/geo-replication/tar_ssh.pem.pub`
echo -e "$output1\n$output2"
#!/bin/bash
# Script to copy the pem keys from the user's home directory
-# to $GLUSTERD_WORKING_DIR/geo-replication/ and then copy
+# to $GLUSTERD_WORKDIR/geo-replication and then copy
# the keys to other nodes in the cluster and add them to the
# respective authorized keys. The script takes as argument the
# user name and assumes that the user will be present in all
fi
if [ -f $home_dir/common_secret.pem.pub ]; then
- cp $home_dir/common_secret.pem.pub /var/lib/glusterd/geo-replication/
+ cp $home_dir/common_secret.pem.pub ${GLUSTERD_WORKDIR}/geo-replication/
gluster system:: copy file /geo-replication/common_secret.pem.pub
gluster system:: execute add_secret_pub $user
else
("peersrx . .",
"georep_session_working_dir",
"",
- "/var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_"
+ "@GLUSTERD_WORKDIR@/geo-replication/${mastervol}_${remotehost}_"
"${slavevol}/"),
("peersrx .",
"gluster_params",
"ssh_command_tar",
"",
"ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no "
- "-i /var/lib/glusterd/geo-replication/tar_ssh.pem"),
+ "-i @GLUSTERD_WORKDIR@/geo-replication/tar_ssh.pem"),
("peersrx . .",
"changelog_log_file",
"",
"/${eSlave}${local_id}-changes.log"),
("peersrx . .",
"working_dir",
- "/var/run/gluster/${mastervol}/${eSlave}",
- "${iprefix}/lib/misc/glusterfsd/${mastervol}/${eSlave}"),
- ("peersrx . .",
- "working_dir",
- "/usr/local/var/run/gluster/${mastervol}/${eSlave}",
+ "@localstatedir@/run/gluster/${mastervol}/${eSlave}",
"${iprefix}/lib/misc/glusterfsd/${mastervol}/${eSlave}"),
)
glusterfsd_SOURCES = glusterfsd.c glusterfsd-mgmt.c
glusterfsd_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
- $(top_builddir)/rpc/xdr/src/libgfxdr.la \
- $(GF_LDADD) $(GF_GLUSTERFS_LIBS)
+ $(top_builddir)/rpc/xdr/src/libgfxdr.la ${GF_LDADD}
+
glusterfsd_LDFLAGS = $(GF_LDFLAGS)
noinst_HEADERS = glusterfsd.h glusterfsd-mem-types.h glusterfsd-messages.h
struct statvfs statvfs;
dict_t *xattr;
struct gf_flock lock;
- gf_dirent_t entries;
uint32_t weak_checksum;
uint8_t *strong_checksum;
dict_t *xdata;
+ gf_dirent_t entries;
} args_cbk;
} call_stub_t;
#define DEFAULT_VAR_RUN_DIRECTORY DATADIR "/run/gluster"
#define DEFAULT_GLUSTERFSD_MISC_DIRETORY DATADIR "/lib/misc/glusterfsd"
+#ifdef GF_LINUX_HOST_OS
+#define GLUSTERD_DEFAULT_WORKDIR DATADIR "/lib/glusterd"
+#else
+#define GLUSTERD_DEFAULT_WORKDIR DATADIR "/db/glusterd"
+#endif
#define GF_REPLICATE_TRASH_DIR ".landfill"
/* GlusterFS's maximum supported Auxiliary GIDs */
* reduce functionality, both for users and for testing (which can now be
* done using secure connections for all tests without change elsewhere).
*
- * Nonetheless, TBD: define in terms of build-time PREFIX
*/
-#define SECURE_ACCESS_FILE "/var/lib/glusterd/secure-access"
+#define SECURE_ACCESS_FILE GLUSTERD_DEFAULT_WORKDIR "/secure-access"
int glusterfs_graph_prepare (glusterfs_graph_t *graph, glusterfs_ctx_t *ctx);
int glusterfs_graph_destroy (glusterfs_graph_t *graph);
struct iatt iatt1;
struct iatt iatt2;
dict_t *xattr;
- gf_dirent_t entries;
struct statvfs statvfs_buf;
struct iovec *vector;
int count;
pthread_mutex_t mutex;
pthread_cond_t cond;
int done;
+ gf_dirent_t entries;
};
struct syncopctx {
rpcsvc_vector_sizer
rpcsvc_get_program_vector_sizer (rpcsvc_t *svc, uint32_t prognum,
- uint32_t progver, uint32_t procnum)
+ uint32_t progver, int procnum)
{
rpcsvc_program_t *program = NULL;
char found = 0;
rpcsvc_auth_array (rpcsvc_t *svc, char *volname, int *autharr, int arrlen);
rpcsvc_vector_sizer
rpcsvc_get_program_vector_sizer (rpcsvc_t *svc, uint32_t prognum,
- uint32_t progver, uint32_t procnum);
+ uint32_t progver, int procnum);
#endif
cases as published by the Free Software Foundation.
*/
+#if defined(__GNUC__)
+#if __GNUC__ >= 4
+#if !defined(__clang__)
+#if !defined(__NetBSD__)
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#endif
+#else
+#pragma clang diagnostic ignored "-Wunused-variable"
+#pragma clang diagnostic ignored "-Wunused-value"
+#endif
+#endif
+#endif
+
#include "xdr-nfs3.h"
#include "mem-pool.h"
#include "xdr-common.h"
echo "Running tests in file $t"
prove -f --timer $t
fi
- TMP_RES=$?
+ TMP_RES=$?
if [ ${TMP_RES} -ne 0 ] ; then
RES=${TMP_RES}
FAILED="$FAILED $t"
`gluster-devel@gluster.org`.
## Reminder
-- BE WARNED THAT THE TEST CASES DELETE /var/lib/glusterd/* !!!
+- BE WARNED THAT THE TEST CASES DELETE ``GLUSTERD_WORKDIR`` * !!!
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --entry-timeout=0
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-TEST glusterfs --volfile=/var/lib/glusterd/vols/$V0/${V0}-fuse.vol $M0
+TEST glusterfs --volfile=$GLUSTERD_WORKDIR/vols/$V0/${V0}-fuse.vol $M0
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 --log-file=/tmp/a.txt --log-level=DEBUG $M0
TEST killall glusterd;
-rm -f /var/lib/glusterd/vols/$V0/snapd.info
+rm -f $GLUSTERD_WORKDIR/vols/$V0/snapd.info
TEST glusterd
# do not expext hostname as part of the pathinfo string
EXPECT 0 count_hostname_or_uuid_from_pathinfo $H0
-uuid=`grep UUID /var/lib/glusterd/glusterd.info | cut -f2 -d=`
+uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
# ... but expect the uuid $REPLICA times
EXPECT $REPLICA count_hostname_or_uuid_from_pathinfo $uuid
touch $M0/{1..22};
rm -f $M0/*;
-pid_file=$(ls /var/lib/glusterd/vols/$V0/run);
-brick_pid=$(cat /var/lib/glusterd/vols/$V0/run/$pid_file);
+pid_file=$(ls $GLUSTERD_WORKDIR/vols/$V0/run);
+brick_pid=$(cat $GLUSTERD_WORKDIR/vols/$V0/run/$pid_file);
mkdir $statedumpdir/statedump_tmp/;
echo "path=$statedumpdir/statedump_tmp" > $statedumpdir/glusterdump.options;
TEST pidof glusterd
TEST $CLI system uuid reset;
-uuid1=$(grep UUID /var/lib/glusterd/glusterd.info | cut -f 2 -d "=");
+uuid1=$(grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f 2 -d "=");
TEST $CLI system uuid reset;
-uuid2=$(grep UUID /var/lib/glusterd/glusterd.info | cut -f 2 -d "=");
+uuid2=$(grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f 2 -d "=");
TEST [ $uuid1 != $uuid2 ]
# Kill a brick process and then query for pathinfo
# for directories pathinfo should list backend patch from available (up) subvolumes
-kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}1.pid`;
+kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}1.pid`;
EXPECT `expr $BRICK_COUNT - 1` query_pathinfo $M0/f00d;
TEST stat $M0/newfile;
TEST rm $M0/newfile;
-nfs_pid=$(cat /var/lib/glusterd/nfs/run/nfs.pid);
-glustershd_pid=$(cat /var/lib/glusterd/glustershd/run/glustershd.pid);
+nfs_pid=$(cat $GLUSTERD_WORKDIR/nfs/run/nfs.pid);
+glustershd_pid=$(cat $GLUSTERD_WORKDIR/glustershd/run/glustershd.pid);
pids=$(pidof glusterfs);
for i in $pids
ORIG_FILE_COUNT=`ls -l $M0 | wc -l`;
# Kill a brick process
-kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}1.pid`;
+kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}1.pid`;
TEST $CLI volume rebalance $V0 fix-layout start
TEST $CLI --xml volume status $V0 detail;
# Kill the brick process. After this, port number for the killed (in this case brick) process must be "N/A".
-kill `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-brick0.pid`
+kill `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-brick0.pid`
EXPECT "N/A" port_field $V0 '0'; # volume status
EXPECT "N/A" port_field $V0 '1'; # volume status detail
# Kill a brick process
-kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}1.pid`;
+kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}1.pid`;
# change dir ownership
NEW_UID=36;
NEW_GID=36;
touch $M0/files{1..1000};
# Kill a brick process
-kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}0.pid`;
+kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}0.pid`;
echo 3 >/proc/sys/vm/drop_caches;
TEST $CLI volume start $V0 force
# Kill a brick process
-kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}1.pid`;
+kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}1.pid`;
echo 3 >/proc/sys/vm/drop_caches;
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
B0_hiphenated=`echo $B0 | tr '/' '-'`
-kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0$B0_hiphenated-brick1.pid` ;
+kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0$B0_hiphenated-brick1.pid` ;
echo "GLUSTER FILE SYSTEM" > $M0/FILE1
echo "GLUSTER FILE SYSTEM" > $M0/FILE2
{
local event=$1
touch /tmp/pre.out /tmp/post.out
- touch /var/lib/glusterd/hooks/1/"$event"/pre/Spre.sh
- touch /var/lib/glusterd/hooks/1/"$event"/post/Spost.sh
+ touch $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh
+ touch $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh
- printf "#! /bin/bash\necho "$event"Pre > /tmp/pre.out\n" > /var/lib/glusterd/hooks/1/"$event"/pre/Spre.sh
- printf "#! /bin/bash\necho "$event"Post > /tmp/post.out\n" > /var/lib/glusterd/hooks/1/"$event"/post/Spost.sh
- chmod a+x /var/lib/glusterd/hooks/1/"$event"/pre/Spre.sh
- chmod a+x /var/lib/glusterd/hooks/1/"$event"/post/Spost.sh
+ printf "#! /bin/bash\necho "$event"Pre > /tmp/pre.out\n" > $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh
+ printf "#! /bin/bash\necho "$event"Post > /tmp/post.out\n" > $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh
+ chmod a+x $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh
+ chmod a+x $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh
}
function hooks_cleanup ()
{
local event=$1
rm /tmp/pre.out /tmp/post.out
- rm /var/lib/glusterd/hooks/1/"$event"/pre/Spre.sh
- rm /var/lib/glusterd/hooks/1/"$event"/post/Spost.sh
+ rm $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh
+ rm $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh
}
## Verify volume is created and its hooks script ran
TEST $CLI volume start $V0
pkill glusterfs
uuid=""
-for line in $(cat /var/lib/glusterd/glusterd.info)
+for line in $(cat $GLUSTERD_WORKDIR/glusterd.info)
do
if [[ $line == UUID* ]]
then
get_cached_brick
CACHED=$?
# Kill a brick process
- kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}$CACHED.pid`;
+ kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}$CACHED.pid`;
fi
## trigger a lookup
TEST ls -l $M0
## kill 2 bricks to bring down available subvol < spread count
-kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}2.pid`;
-kill -9 `cat /var/lib/glusterd/vols/$V0/run/$H0-d-backends-${V0}3.pid`;
+kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}2.pid`;
+kill -9 `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-${V0}3.pid`;
mkdir $M0/dir1 2>/dev/null
EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
# before mounting the rmtab should be empty
-EXPECT '0' count_lines /var/lib/glusterd/nfs/rmtab
+EXPECT '0' count_lines $GLUSTERD_WORKDIR/nfs/rmtab
TEST mount_nfs $H0:/$V0 $N0 nolock
# the output would looks similar to:
# hostname-0=172.31.122.104
# mountpoint-0=/ufo
#
-EXPECT '2' count_lines /var/lib/glusterd/nfs/rmtab
+EXPECT '2' count_lines $GLUSTERD_WORKDIR/nfs/rmtab
# duplicate mounts should not be recorded (client could have crashed)
TEST mount_nfs $H0:/$V0 $N1 nolock
-EXPECT '2' count_lines /var/lib/glusterd/nfs/rmtab
+EXPECT '2' count_lines $GLUSTERD_WORKDIR/nfs/rmtab
# removing a mount should (even if there are two) should remove the entry
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N1
-EXPECT '0' count_lines /var/lib/glusterd/nfs/rmtab
+EXPECT '0' count_lines $GLUSTERD_WORKDIR/nfs/rmtab
# unmounting the other mount should work flawlessly
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
-EXPECT '0' count_lines /var/lib/glusterd/nfs/rmtab
+EXPECT '0' count_lines $GLUSTERD_WORKDIR/nfs/rmtab
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $M0
TEST $CLI volume create $V0 $H0:$B0/${V0}1;
TEST $CLI volume start $V0;
-pid_file=$(ls /var/lib/glusterd/vols/$V0/run);
-brick_pid=$(cat /var/lib/glusterd/vols/$V0/run/$pid_file);
+pid_file=$(ls $GLUSTERD_WORKDIR/vols/$V0/run);
+brick_pid=$(cat $GLUSTERD_WORKDIR/vols/$V0/run/$pid_file);
kill -SIGKILL $brick_pid;
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
-uuid=`grep UUID /var/lib/glusterd/glusterd.info | cut -f2 -d=`
+uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
EXPECT $uuid get_brick_host_uuid $V0
TEST $CLI volume delete $V0;
TEST pidof glusterd;
TEST $CLI volume info;
-touch /var/lib/glusterd/groups/test
-echo "read-ahead=off" > /var/lib/glusterd/groups/test
-echo "open-behind=off" >> /var/lib/glusterd/groups/test
+touch $GLUSTERD_WORKDIR/groups/test
+echo "read-ahead=off" > $GLUSTERD_WORKDIR/groups/test
+echo "open-behind=off" >> $GLUSTERD_WORKDIR/groups/test
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
TEST $CLI volume set $V0 group test
--- /dev/null
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+
+PATH=@sbindir@:$PATH
+export PATH
+
+LD_LIBRARY_PATH=@libdir@:$LD_LIBRARY_PATH
+export LD_LIBRARY_PATH
+
+GLUSTERD_WORKDIR=@GLUSTERD_WORKDIR@
+export GLUSTERD_WORKDIR
\ No newline at end of file
V1=${V1:=patchy1}; # volume name to use in tests
B0=${B0:=/d/backends}; # top level of brick directories
CC=cc
-case `uname -s` in
+OSTYPE=$(uname -s)
+
+if [ ! -f ${PWD}/tests/env.rc ]; then
+ echo "Aborting."
+ echo
+ echo "env.rc not found"
+ echo
+ echo "Please correct the problem and try again."
+ echo
+ exit 1
+fi
+. ${PWD}/tests/env.rc
+
+case $OSTYPE in
Linux)
H0=${H0:=`hostname --fqdn`}; # hostname
;;
H0=${H0:=`hostname`}; # hostname
;;
esac
+
DEBUG=${DEBUG:=0} # turn on debugging?
PROCESS_UP_TIMEOUT=20
;;
esac
-
- rm -rf /var/lib/glusterd/* $B0/* /etc/glusterd/*;
+ if [ -n "${GLUSTERD_WORKDIR}" ] ; then
+ rm -rf $GLUSTERD_WORKDIR/* $B0/* /etc/glusterd/*;
+ fi
umount -l $M0 2>/dev/null || true;
umount -l $M1 2>/dev/null || true;
alias TEST_IN_LOOP='_TEST_IN_LOOP $LINENO'
shopt -s expand_aliases
-ostype=$(uname -s)
-if [ x"$ostype" = x"Linux" ]; then
+if [ x"$OSTYPE" = x"Linux" ]; then
alias dd="dd status=none"
-elif [ x"$ostype" = x"NetBSD" ]; then
+elif [ x"$OSTYPE" = x"NetBSD" ]; then
alias dd="dd msgfmt=quiet"
fi
# MacOS doesn't seem to support either option. Doing nothing at all is
function read_nfs_pidfile ()
{
- echo `cat /var/lib/glusterd/nfs/run/nfs.pid`
+ echo `cat $GLUSTERD_WORKDIR/nfs/run/nfs.pid`
}
function cleanup_statedump {
local host=$2
local brick=$3
local brick_hiphenated=$(echo $brick | tr '/' '-')
- echo `cat /var/lib/glusterd/vols/$vol/run/${host}${brick_hiphenated}.pid`
+ echo `cat $GLUSTERD_WORKDIR/vols/$vol/run/${host}${brick_hiphenated}.pid`
}
function kill_brick {
ios_log (this, logfp, "\nTIMESTAMP \t\t\t THROUGHPUT(KBPS)"
"\tFILE NAME");
list_head = &conf->thru_list[IOS_STATS_THRU_READ];
- ios_dump_throughput_stats(list_head, this, logfp, IOS_STATS_THRU_READ);
+ ios_dump_throughput_stats(list_head, this, logfp, IOS_STATS_TYPE_READ);
ios_log (this, logfp, "\n======Write Throughput File Stats======");
ios_log (this, logfp, "\nTIMESTAMP \t\t\t THROUGHPUT(KBPS)"
"\tFILE NAME");
list_head = &conf->thru_list[IOS_STATS_THRU_WRITE];
- ios_dump_throughput_stats (list_head, this, logfp, IOS_STATS_THRU_WRITE);
+ ios_dump_throughput_stats (list_head, this, logfp, IOS_STATS_TYPE_WRITE);
}
return 0;
}
#define MASTER_VOL_KEY_SIZE (32)
#define NMTD_VOL_KEY_SIZE (16)
-#if defined(GF_BSD_HOST_OS)
+#if !defined(GF_LINUX_HOST_OS)
typedef off_t loff_t;
#endif
-#if defined(GF_DARWIN_HOST_OS)
-typedef uint64_t loff_t;
-#endif
-
struct crypt_key {
uint32_t len;
const char *label;
-I$(top_srcdir)/xlators/features/changelog/src \
-DDATADIR=\"$(localstatedir)\"
-libgfchangelog_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
- $(GF_GLUSTERFS_LIBS)
+libgfchangelog_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
libgfchangelog_la_LDFLAGS = $(GF_LDFLAGS) -version-info $(LIBGFCHANGELOG_LT_VERSION)
#include "changelog-encoders.h"
#include <pthread.h>
-inline void
+static inline void
__mask_cancellation (xlator_t *this)
{
int ret = 0;
"failed to disable thread cancellation");
}
-inline void
+static inline void
__unmask_cancellation (xlator_t *this)
{
int ret = 0;
CLEANFILES =
install-data-hook:
-
-if GF_INSTALL_VAR_LIB_GLUSTERD
- $(mkdir_p) $(localstatedir)/lib/
- (stat $(sysconfdir)/glusterd && \
- mv $(sysconfdir)/glusterd $(localstatedir)/lib/) || true;
- (ln -sf $(localstatedir)/lib/glusterd $(sysconfdir)/glusterd) || true;
+if GF_INSTALL_GLUSTERD_WORKDIR
+ $(mkdir_p) $(DESTDIR)$(GLUSTERD_WORKDIR)
+ (stat $(DESTDIR)$(sysconfdir)/glusterd && \
+ mv $(DESTDIR)$(sysconfdir)/glusterd $(DESTDIR)$(GLUSTERD_WORKDIR)) || true;
+ (ln -sf $(DESTDIR)$(GLUSTERD_WORKDIR) $(sysconfdir)/glusterd) || true;
endif
case GD_OP_SET_VOLUME:
ret = glusterd_hooks_set_volume_args (op_ctx, runner);
+ glusterd_hooks_add_working_dir (runner, priv);
break;
case GD_OP_GSYNC_CREATE:
first_time = 1;
}
- setenv ("GLUSTERD_WORKING_DIR", workdir, 1);
+ setenv ("GLUSTERD_WORKDIR", workdir, 1);
gf_log (this->name, GF_LOG_INFO, "Using %s as working directory",
workdir);
GLUSTERD_VOL_COMP_RJT,
};
-#define GLUSTERD_DEFAULT_WORKDIR "/var/lib/glusterd"
#define GLUSTERD_DEFAULT_PORT GF_DEFAULT_BASE_PORT
#define GLUSTERD_INFO_FILE "glusterd.info"
#define GLUSTERD_VOLUME_QUOTA_CONFIG "quota.conf"
fi
# check if the mount point is a brick's parent directory
- GLUSTERD_WORKDIR="/var/lib/glusterd";
+ GLUSTERD_WORKDIR="@GLUSTERD_WORKDIR@";
- ls -L "$GLUSTERD_WORKDIR"/vols/*/bricks/* > /dev/null 2>&1;
+ ls -L "${GLUSTERD_WORKDIR}"/vols/*/bricks/* > /dev/null 2>&1;
if [ $? -ne 0 ]; then
return;
fi
-DLIBDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/auth\" \
-I$(top_srcdir)/libglusterfs/src \
-I$(nfsrpclibdir) -I$(CONTRIBDIR)/rbtree \
- -I$(top_srcdir)/rpc/xdr/src/
+ -I$(top_srcdir)/rpc/xdr/src/ -DDATADIR=\"$(localstatedir)\"
AM_CFLAGS = -Wall $(GF_CFLAGS)
#define OPT_SERVER_RPC_STATD_PIDFILE "nfs.rpc-statd-pidfile"
#define OPT_SERVER_RPC_STATD_NOTIFY_PIDFILE "nfs.rpc-statd-notify-pidfile"
-/* TODO: DATADIR should be based on configure's $(localstatedir) */
-#define DATADIR "/var/lib/glusterd"
-#define NFS_DATADIR DATADIR "/nfs"
+#define NFS_DATADIR GLUSTERD_DEFAULT_WORKDIR "/nfs"
/* Forward declaration */
int nfs_add_initer (struct list_head *list, nfs_version_initer_t init);
},
{ .key = {"nfs.mount-rmtab"},
.type = GF_OPTION_TYPE_PATH,
- .default_value = DATADIR "/rmtab",
+ .default_value = NFS_DATADIR "/rmtab",
.description = "Set the location of the cache file that is used to "
"list all the NFS-clients that have connected "
"through the MOUNT protocol. If this is on shared "