--- /dev/null
+pytalloc_CObject_FromTallocPtr: PyObject *(void *)
+pytalloc_Check: int (PyObject *)
+pytalloc_GetObjectType: PyTypeObject *(void)
+pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *)
+pytalloc_steal: PyObject *(PyTypeObject *, void *)
+pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *)
--- /dev/null
+_talloc: void *(const void *, size_t)
+_talloc_array: void *(const void *, size_t, unsigned int, const char *)
+_talloc_free: int (void *, const char *)
+_talloc_get_type_abort: void *(const void *, const char *, const char *)
+_talloc_memdup: void *(const void *, const void *, size_t, const char *)
+_talloc_move: void *(const void *, const void *)
+_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t)
+_talloc_realloc: void *(const void *, void *, size_t, const char *)
+_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *)
+_talloc_reference_loc: void *(const void *, const void *, const char *)
+_talloc_set_destructor: void (const void *, int (*)(void *))
+_talloc_steal_loc: void *(const void *, const void *, const char *)
+_talloc_zero: void *(const void *, size_t, const char *)
+_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *)
+talloc_asprintf: char *(const void *, const char *, ...)
+talloc_asprintf_append: char *(char *, const char *, ...)
+talloc_asprintf_append_buffer: char *(char *, const char *, ...)
+talloc_autofree_context: void *(void)
+talloc_check_name: void *(const void *, const char *)
+talloc_disable_null_tracking: void (void)
+talloc_enable_leak_report: void (void)
+talloc_enable_leak_report_full: void (void)
+talloc_enable_null_tracking: void (void)
+talloc_enable_null_tracking_no_autofree: void (void)
+talloc_find_parent_byname: void *(const void *, const char *)
+talloc_free_children: void (void *)
+talloc_get_name: const char *(const void *)
+talloc_get_size: size_t (const void *)
+talloc_increase_ref_count: int (const void *)
+talloc_init: void *(const char *, ...)
+talloc_is_parent: int (const void *, const void *)
+talloc_named: void *(const void *, size_t, const char *, ...)
+talloc_named_const: void *(const void *, size_t, const char *)
+talloc_parent: void *(const void *)
+talloc_parent_name: const char *(const void *)
+talloc_pool: void *(const void *, size_t)
+talloc_realloc_fn: void *(const void *, void *, size_t)
+talloc_reference_count: size_t (const void *)
+talloc_reparent: void *(const void *, const void *, const void *)
+talloc_report: void (const void *, FILE *)
+talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *)
+talloc_report_depth_file: void (const void *, int, int, FILE *)
+talloc_report_full: void (const void *, FILE *)
+talloc_set_abort_fn: void (void (*)(const char *))
+talloc_set_log_fn: void (void (*)(const char *))
+talloc_set_log_stderr: void (void)
+talloc_set_memlimit: int (const void *, size_t)
+talloc_set_name: const char *(const void *, const char *, ...)
+talloc_set_name_const: void (const void *, const char *)
+talloc_show_parents: void (const void *, FILE *)
+talloc_strdup: char *(const void *, const char *)
+talloc_strdup_append: char *(char *, const char *)
+talloc_strdup_append_buffer: char *(char *, const char *)
+talloc_strndup: char *(const void *, const char *, size_t)
+talloc_strndup_append: char *(char *, const char *, size_t)
+talloc_strndup_append_buffer: char *(char *, const char *, size_t)
+talloc_total_blocks: size_t (const void *)
+talloc_total_size: size_t (const void *)
+talloc_unlink: int (const void *, void *)
+talloc_vasprintf: char *(const void *, const char *, va_list)
+talloc_vasprintf_append: char *(char *, const char *, va_list)
+talloc_vasprintf_append_buffer: char *(char *, const char *, va_list)
+talloc_version_major: int (void)
+talloc_version_minor: int (void)
except OSError: pass
def find_lib():
- return os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
+ return os.path.abspath(os.path.join(os.path.dirname(__file__), '../../third_party/waf'))
wafdir = find_lib()
w = join(wafdir, 'wafadmin')
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Sample run-on-target script
+# This is a script that can be used as cross-execute parameter to samba
+# configuration process, running the command on a remote target for which
+# the cross-compiled configure test was compiled.
+#
+# To use:
+# ./configure \
+# --cross-compile \
+# '--cross-execute=./buildtools/example/run_on_target.py --host=<host>'
+#
+# A more elaborate example:
+# ./configure \
+# --cross-compile \
+# '--cross-execute=./buildtools/example/run_on_target.py --host=<host> --user=<user> "--ssh=ssh -i <some key file>" --destdir=/path/to/dir'
+#
+# Typically this is to be used also with --cross-answers, so that the
+# cross answers file gets built and further builds can be made without
+# the help of a remote target.
+#
+# The following assumptions are made:
+# 1. rsync is available on build machine and target machine
+# 2. A running ssh service on target machine with password-less shell login
+# 3. A directory writable by the password-less login user
+# 4. The tests on the target can run and provide reliable results
+# from the login account's home directory. This is significant
+# for example in locking tests which
+# create files in the current directory. As a workaround to this
+# assumption, the TESTDIR environment variable can be set on the target
+# (using ssh command line or server config) and the tests shall
+# chdir to that directory.
+#
+
+import sys
+import os
+import subprocess
+from optparse import OptionParser
+
+# those are defaults, but can be overidden using command line
+SSH = 'ssh'
+USER = None
+HOST = 'localhost'
+
+
+def xfer_files(ssh, srcdir, host, user, targ_destdir):
+ """Transfer executable files to target
+
+ Use rsync to copy the directory containing program to run
+ INTO a destination directory on the target. An exact copy
+ of the source directory is created on the target machine,
+ possibly deleting files on the target machine which do not
+ exist on the source directory.
+
+ The idea is that the test may include files in addition to
+ the compiled binary, and all of those files reside alongside
+ the binary in a source directory.
+
+ For example, if the test to run is /foo/bar/test and the
+ destination directory on the target is /tbaz, then /tbaz/bar
+ on the target shall be an exact copy of /foo/bar on the source,
+ including deletion of files inside /tbaz/bar which do not exist
+ on the source.
+ """
+
+ userhost = host
+ if user:
+ userhost = '%s@%s' % (user, host)
+
+ cmd = 'rsync --verbose -rl --ignore-times --delete -e "%s" %s %s:%s/' % \
+ (ssh, srcdir, userhost, targ_destdir)
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (out, err) = p.communicate()
+ if p.returncode != 0:
+ raise Exception('failed syncing files\n stdout:\n%s\nstderr:%s\n'
+ % (out, err))
+
+
+def exec_remote(ssh, host, user, destdir, targdir, prog, args):
+ """Run a test on the target
+
+ Using password-less ssh, run the compiled binary on the target.
+
+ An assumption is that there's no need to cd into the target dir,
+ same as there's no need to do it on a native build.
+ """
+ userhost = host
+ if user:
+ userhost = '%s@%s' % (user, host)
+
+ cmd = '%s %s %s/%s/%s' % (ssh, userhost, destdir, targdir, prog)
+ if args:
+ cmd = cmd + ' ' + ' '.join(args)
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (out, err) = p.communicate()
+ return (p.returncode, out)
+
+
+def main(argv):
+ usage = "usage: %prog [options] <prog> [args]"
+ parser = OptionParser(usage)
+
+ parser.add_option('--ssh', help="SSH client and additional flags",
+ default=SSH)
+ parser.add_option('--host', help="target host name or IP address",
+ default=HOST)
+ parser.add_option('--user', help="login user on target",
+ default=USER)
+ parser.add_option('--destdir', help="work directory on target",
+ default='~')
+
+ (options, args) = parser.parse_args(argv)
+ if len(args) < 1:
+ parser.error("please supply test program to run")
+
+ progpath = args[0]
+
+ # assume that a test that was not compiled fails (e.g. getconf)
+ if progpath[0] != '/':
+ return (1, "")
+
+ progdir = os.path.dirname(progpath)
+ prog = os.path.basename(progpath)
+ targ_progdir = os.path.basename(progdir)
+
+ xfer_files(
+ options.ssh,
+ progdir,
+ options.host,
+ options.user,
+ options.destdir)
+
+ (rc, out) = exec_remote(options.ssh,
+ options.host,
+ options.user,
+ options.destdir,
+ targ_progdir,
+ prog, args[1:])
+ return (rc, out)
+
+
+if __name__ == '__main__':
+ (rc, out) = main(sys.argv[1:])
+ sys.stdout.write(out)
+ sys.exit(rc)
+++ /dev/null
-#!/bin/sh
-# Update our copy of waf
-
-TARGETDIR="`dirname $0`"
-WORKDIR="`mktemp -d -t update-waf-XXXXXX`"
-
-mkdir -p "$WORKDIR"
-
-git clone https://code.google.com/p/waf.waf15/ "$WORKDIR"
-
-rsync -C -avz --delete "$WORKDIR/wafadmin/" "$TARGETDIR/wafadmin/"
-
-rm -rf "$WORKDIR"
s3reldir = os_path_relpath(s3dir, bld.curdir)
# the extra_includes list is relative to the source3 directory
- extra_includes = [ '.', 'include', 'lib', '../lib/tdb_compat' ]
+ extra_includes = [ '.', 'include', 'lib' ]
# local heimdal paths only included when USING_SYSTEM_KRB5 is not set
if not bld.CONFIG_SET("USING_SYSTEM_KRB5"):
extra_includes += [ '../source4/heimdal/lib/com_err',
headers=headers,
msg='Checking for declaration of %s' % v,
always=always):
- ret = False
+ if not CHECK_CODE(conf,
+ '''
+ return (int)%s;
+ ''' % (v),
+ execute=False,
+ link=False,
+ msg='Checking for declaration of %s (as enum)' % v,
+ local_include=False,
+ headers=headers,
+ define=define,
+ always=always):
+ ret = False
return ret
if not sys.platform.startswith("openbsd") and conf.env.undefined_ignore_ldflags == []:
if conf.CHECK_LDFLAGS(['-undefined', 'dynamic_lookup']):
conf.env.undefined_ignore_ldflags = ['-undefined', 'dynamic_lookup']
+
+@conf
+def CHECK_CFG(self, *k, **kw):
+ return self.check_cfg(*k, **kw)
name = name,
source = source,
target = header,
- on_results=True,
+ update_outputs=True,
ext_out='.c',
before ='cc',
rule = '${PERL} "${SCRIPT}/mkproto.pl" --srcdir=.. --builddir=. --public=/dev/null --private="${TGT}" ${SRC}'
pkg = libname
# try pkgconfig first
- if (conf.check_cfg(package=pkg,
+ if (conf.CHECK_CFG(package=pkg,
args='"%s >= %s" --cflags --libs' % (pkg, minversion),
msg=msg, uselib_store=uselib_store) and
check_functions_headers_code()):
# bugs in the real parse_flags() function.
#
if x == '-Wl,-rpath' or x == '-Wl,-R':
- linkflags.remove(x)
x = lst1.pop(0)
if x.startswith('-Wl,'):
rpath = x[4:]
import Utils, Logs, sys, os, Options, re
from Configure import conf
+import shlex
real_Popen = None
ANSWER_UNKNOWN = (254, "")
-ANSWER_FAIL = (255, "")
+ANSWER_NO = (1, "")
ANSWER_OK = (0, "")
cross_answers_incomplete = False
except:
Logs.error("Unable to open cross-answers file %s" % ca_file)
sys.exit(1)
+ (retcode, retstring) = answer
+ # if retstring is more than one line then we probably
+ # don't care about its actual content (the tests should
+ # yield one-line output in order to comply with the cross-answer
+ # format)
+ retstring = retstring.strip()
+ if len(retstring.split('\n')) > 1:
+ retstring = ''
+ answer = (retcode, retstring)
+
if answer == ANSWER_OK:
f.write('%s: OK\n' % msg)
elif answer == ANSWER_UNKNOWN:
f.write('%s: UNKNOWN\n' % msg)
- elif answer == ANSWER_FAIL:
- f.write('%s: FAIL\n' % msg)
+ elif answer == ANSWER_NO:
+ f.write('%s: NO\n' % msg)
else:
- (retcode, retstring) = answer
- f.write('%s: (%d, "%s")' % (msg, retcode, retstring))
+ if retcode == 0:
+ f.write('%s: "%s"\n' % (msg, retstring))
+ else:
+ f.write('%s: (%d, "%s")\n' % (msg, retcode, retstring))
f.close()
try:
f = open(ca_file, 'r')
except:
- add_answer(ca_file, msg, ANSWER_UNKNOWN)
return ANSWER_UNKNOWN
for line in f:
line = line.strip()
if line == '' or line[0] == '#':
continue
if line.find(':') != -1:
- a = line.split(':')
+ a = line.split(':', 1)
thismsg = a[0].strip()
if thismsg != msg:
continue
return ANSWER_UNKNOWN
elif ans == "FAIL" or ans == "NO":
f.close()
- return ANSWER_FAIL
+ return ANSWER_NO
elif ans[0] == '"':
+ f.close()
return (0, ans.strip('"'))
elif ans[0] == "'":
+ f.close()
return (0, ans.strip("'"))
else:
m = re.match('\(\s*(-?\d+)\s*,\s*\"(.*)\"\s*\)', ans)
else:
raise Utils.WafError("Bad answer format '%s' in %s" % (line, ca_file))
f.close()
- add_answer(ca_file, msg, ANSWER_UNKNOWN)
return ANSWER_UNKNOWN
'''cross-compilation wrapper for Popen'''
def __init__(*k, **kw):
(obj, args) = k
-
- if '--cross-execute' in args:
- # when --cross-execute is set, then change the arguments
- # to use the cross emulator
- i = args.index('--cross-execute')
- newargs = args[i+1].split()
- newargs.extend(args[0:i])
- args = newargs
- elif '--cross-answers' in args:
+ use_answers = False
+ ans = ANSWER_UNKNOWN
+
+ # Three possibilities:
+ # 1. Only cross-answers - try the cross-answers file, and if
+ # there's no corresponding answer, add to the file and mark
+ # the configure process as unfinished.
+ # 2. Only cross-execute - get the answer from cross-execute
+ # 3. Both - try the cross-answers file, and if there is no
+ # corresponding answer - use cross-execute to get an answer,
+ # and add that answer to the file.
+ if '--cross-answers' in args:
# when --cross-answers is set, then change the arguments
# to use the cross answers if available
+ use_answers = True
i = args.index('--cross-answers')
ca_file = args[i+1]
msg = args[i+2]
ans = cross_answer(ca_file, msg)
+
+ if '--cross-execute' in args and ans == ANSWER_UNKNOWN:
+ # when --cross-execute is set, then change the arguments
+ # to use the cross emulator
+ i = args.index('--cross-execute')
+ newargs = shlex.split(args[i+1])
+ newargs.extend(args[0:i])
+ if use_answers:
+ p = real_Popen(newargs,
+ stdout=Utils.pproc.PIPE,
+ stderr=Utils.pproc.PIPE)
+ ce_out, ce_err = p.communicate()
+ ans = (p.returncode, ce_out)
+ add_answer(ca_file, msg, ans)
+ else:
+ args = newargs
+
+ if use_answers:
if ans == ANSWER_UNKNOWN:
global cross_answers_incomplete
cross_answers_incomplete = True
+ add_answer(ca_file, msg, ans)
(retcode, retstring) = ans
args = ['/bin/sh', '-c', "echo -n '%s'; exit %d" % (retstring, retcode)]
real_Popen.__init__(*(obj, args), **kw)
if conf.env.CROSS_EXECUTE:
ret.extend(['--cross-execute', conf.env.CROSS_EXECUTE])
- elif conf.env.CROSS_ANSWERS:
+
+ if conf.env.CROSS_ANSWERS:
if msg is None:
raise Utils.WafError("Cannot have NULL msg in cross-answers")
ret.extend(['--cross-answers', os.path.join(Options.launch_dir, conf.env.CROSS_ANSWERS), msg])
savedeps_inputs = ['samba_deps', 'samba_includes', 'local_include', 'local_include_first', 'samba_cflags',
'source', 'grouping_library', 'samba_ldflags', 'allow_undefined_symbols',
'use_global_deps', 'global_include' ]
-savedeps_outputs = ['uselib', 'uselib_local', 'add_objects', 'includes', 'ccflags', 'ldflags', 'samba_deps_extended']
+savedeps_outputs = ['uselib', 'uselib_local', 'add_objects', 'includes',
+ 'ccflags', 'ldflags', 'samba_deps_extended', 'final_libs']
savedeps_outenv = ['INC_PATHS']
savedeps_envvars = ['NONSHARED_BINARIES', 'GLOBAL_DEPENDENCIES', 'EXTRA_CFLAGS', 'EXTRA_LDFLAGS', 'EXTRA_INCLUDES' ]
savedeps_caches = ['GLOBAL_DEPENDENCIES', 'TARGET_TYPE', 'INIT_FUNCTIONS', 'SYSLIB_DEPS']
env = dict(os.environ)
env["GIT_DIR"] = os.path.join(repo, ".git")
break
- elif os.path.isdir(os.path.join(repo, ".bzr")):
- ls_files_cmd = [ 'bzr', 'ls', '--recursive', '--versioned',
- os_path_relpath(path, repo)]
- cwd = repo
- env = None
- break
repo = os.path.dirname(repo)
if repo == "/":
raise Exception("unsupported or no vcs for %s" % path)
--- /dev/null
+import os
+import subprocess
+
+def find_git(env=None):
+ """Find the git binary."""
+ if env is not None and 'GIT' in env:
+ return env['GIT']
+
+ # Get version from GIT
+ if os.path.exists("/usr/bin/git"):
+ # this is useful when doing make dist without configuring
+ return "/usr/bin/git"
+
+ return None
+
+
+def has_submodules(path):
+ """Check whether a source directory is git-versioned and has submodules.
+
+ :param path: Path to Samba source directory
+ """
+ return (os.path.isdir(os.path.join(path, ".git")) and
+ os.path.isfile(os.path.join(path, ".gitmodules")))
+
+
+def read_submodule_status(path, env=None):
+ """Check status of submodules.
+
+ :param path: Path to git directory
+ :param env: Optional waf environment
+ :return: Yields tuples with submodule relpath and status
+ (one of: 'out-of-date', 'not-checked-out', 'up-to-date')
+ :raise RuntimeError: raised when parsing of 'git submodule status' output
+ fails.
+ """
+ if not has_submodules(path):
+ # No point in running git.
+ return
+ git = find_git(env)
+ if git is None:
+ return
+ p = subprocess.Popen([git, "submodule", "status"], stdout=subprocess.PIPE,
+ cwd=path)
+ (stdout, stderr) = p.communicate(None)
+ for l in stdout.splitlines():
+ l = l.rstrip()
+ status = l[0]
+ l = l[1:]
+ parts = l.split(" ")
+ if len(parts) > 2 and status in ("-", "+"):
+ yield (parts[1], "out-of-date")
+ elif len(parts) == 2 and status == "-":
+ yield (parts[1], "not-checked-out")
+ elif len(parts) > 2 and status == " ":
+ yield (parts[1], "up-to-date")
+ else:
+ raise RuntimeError("Unable to parse submodule status: %r, %r" % (status, parts))
bld = self.bld
- install_ldflags = install_rpath(self)
- build_ldflags = build_rpath(bld)
-
- if not Options.is_install or not getattr(self, 'samba_install', True):
- # just need to set the build rpath if we are not installing
- self.env.RPATH = build_ldflags
- return
-
- # setup the install path, expanding variables
- install_path = getattr(self, 'samba_inst_path', None)
- if install_path is None:
- if getattr(self, 'private_library', False):
- install_path = '${PRIVATELIBDIR}'
- else:
- install_path = '${LIBDIR}'
- install_path = bld.EXPAND_VARIABLES(install_path)
-
- target_name = self.target
-
- if install_ldflags != build_ldflags:
- # we will be creating a new target name, and using that for the
- # install link. That stops us from overwriting the existing build
- # target, which has different ldflags
- self.done_install_library = True
- t = self.clone('default')
- t.posted = False
- t.target += '.inst'
- self.env.RPATH = build_ldflags
- else:
- t = self
-
- t.env.RPATH = install_ldflags
+ default_env = bld.all_envs['default']
+ try:
+ if self.env['IS_EXTRA_PYTHON']:
+ bld.all_envs['default'] = bld.all_envs['extrapython']
- dev_link = None
+ install_ldflags = install_rpath(self)
+ build_ldflags = build_rpath(bld)
- # in the following the names are:
- # - inst_name is the name with .inst. in it, in the build
- # directory
- # - install_name is the name in the install directory
- # - install_link is a symlink in the install directory, to install_name
+ if not Options.is_install or not getattr(self, 'samba_install', True):
+ # just need to set the build rpath if we are not installing
+ self.env.RPATH = build_ldflags
+ return
- if getattr(self, 'samba_realname', None):
- install_name = self.samba_realname
- install_link = None
- if getattr(self, 'soname', ''):
- install_link = self.soname
- if getattr(self, 'samba_type', None) == 'PYTHON':
- inst_name = bld.make_libname(t.target, nolibprefix=True, python=True)
+ # setup the install path, expanding variables
+ install_path = getattr(self, 'samba_inst_path', None)
+ if install_path is None:
+ if getattr(self, 'private_library', False):
+ install_path = '${PRIVATELIBDIR}'
+ else:
+ install_path = '${LIBDIR}'
+ install_path = bld.EXPAND_VARIABLES(install_path)
+
+ target_name = self.target
+
+ if install_ldflags != build_ldflags:
+ # we will be creating a new target name, and using that for the
+ # install link. That stops us from overwriting the existing build
+ # target, which has different ldflags
+ self.done_install_library = True
+ t = self.clone(self.env)
+ t.posted = False
+ t.target += '.inst'
+ self.env.RPATH = build_ldflags
else:
+ t = self
+
+ t.env.RPATH = install_ldflags
+
+ dev_link = None
+
+ # in the following the names are:
+ # - inst_name is the name with .inst. in it, in the build
+ # directory
+ # - install_name is the name in the install directory
+ # - install_link is a symlink in the install directory, to install_name
+
+ if getattr(self, 'samba_realname', None):
+ install_name = self.samba_realname
+ install_link = None
+ if getattr(self, 'soname', ''):
+ install_link = self.soname
+ if getattr(self, 'samba_type', None) == 'PYTHON':
+ inst_name = bld.make_libname(t.target, nolibprefix=True, python=True)
+ else:
+ inst_name = bld.make_libname(t.target)
+ elif self.vnum:
+ vnum_base = self.vnum.split('.')[0]
+ install_name = bld.make_libname(target_name, version=self.vnum)
+ install_link = bld.make_libname(target_name, version=vnum_base)
+ inst_name = bld.make_libname(t.target)
+ if not self.private_library:
+ # only generate the dev link for non-bundled libs
+ dev_link = bld.make_libname(target_name)
+ elif getattr(self, 'soname', ''):
+ install_name = bld.make_libname(target_name)
+ install_link = self.soname
inst_name = bld.make_libname(t.target)
- elif self.vnum:
- vnum_base = self.vnum.split('.')[0]
- install_name = bld.make_libname(target_name, version=self.vnum)
- install_link = bld.make_libname(target_name, version=vnum_base)
- inst_name = bld.make_libname(t.target)
- if not self.private_library:
- # only generate the dev link for non-bundled libs
- dev_link = bld.make_libname(target_name)
- elif getattr(self, 'soname', ''):
- install_name = bld.make_libname(target_name)
- install_link = self.soname
- inst_name = bld.make_libname(t.target)
- else:
- install_name = bld.make_libname(target_name)
- install_link = None
- inst_name = bld.make_libname(t.target)
-
- if t.env.SONAME_ST:
- # ensure we get the right names in the library
- if install_link:
- t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_link)
else:
- t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_name)
- t.env.SONAME_ST = ''
+ install_name = bld.make_libname(target_name)
+ install_link = None
+ inst_name = bld.make_libname(t.target)
- # tell waf to install the library
- bld.install_as(os.path.join(install_path, install_name),
- os.path.join(self.path.abspath(bld.env), inst_name),
- chmod=MODE_755)
- if install_link and install_link != install_name:
- # and the symlink if needed
- bld.symlink_as(os.path.join(install_path, install_link), os.path.basename(install_name))
- if dev_link:
- bld.symlink_as(os.path.join(install_path, dev_link), os.path.basename(install_name))
+ if t.env.SONAME_ST:
+ # ensure we get the right names in the library
+ if install_link:
+ t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_link)
+ else:
+ t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_name)
+ t.env.SONAME_ST = ''
+
+ # tell waf to install the library
+ bld.install_as(os.path.join(install_path, install_name),
+ os.path.join(self.path.abspath(bld.env), inst_name),
+ chmod=MODE_755)
+ if install_link and install_link != install_name:
+ # and the symlink if needed
+ bld.symlink_as(os.path.join(install_path, install_link), os.path.basename(install_name))
+ if dev_link:
+ bld.symlink_as(os.path.join(install_path, dev_link), os.path.basename(install_name))
+ finally:
+ bld.all_envs['default'] = default_env
@feature('cshlib')
for i in v['LIBPATH']:
if is_standard_libpath(v, i):
v['LIBPATH'].remove(i)
-
-@feature('cc')
-@before('apply_incpaths', 'apply_obj_vars_cc')
-def samba_stash_cppflags(self):
- """Fix broken waf ordering of CPPFLAGS"""
-
- self.env.SAVED_CPPFLAGS = self.env.CPPFLAGS
- self.env.CPPFLAGS = []
-
-@feature('cc')
-@after('apply_incpaths', 'apply_obj_vars_cc')
-def samba_pop_cppflags(self):
- """append stashed user CPPFLAGS after our internally computed flags"""
-
- #
- # Note that we don't restore the values to 'CPPFLAGS',
- # but to _CCINCFLAGS instead.
- #
- # buildtools/wafadmin/Tools/cc.py defines the 'cc' task generator as
- # '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT}'
- #
- # Our goal is to effectively invert the order of ${CPPFLAGS} and
- # ${_CCINCFLAGS}.
- self.env.append_value('_CCINCFLAGS', self.env.SAVED_CPPFLAGS)
- self.env.SAVED_CPPFLAGS = []
-
-@feature('cprogram', 'cshlib', 'cstaticlib')
-@before('apply_obj_vars', 'add_extra_flags')
-def samba_stash_linkflags(self):
- """stash away LINKFLAGS in order to fix waf's broken ordering wrt or
- user LDFLAGS"""
-
- self.env.SAVE_LINKFLAGS = self.env.LINKFLAGS
- self.env.LINKFLAGS = []
-
-@feature('cprogram', 'cshlib', 'cstaticlib')
-@after('apply_obj_vars', 'add_extra_flags')
-def samba_pop_linkflags(self):
- """after apply_obj_vars append saved LDFLAGS"""
-
- self.env.append_value('LINKFLAGS', self.env.SAVE_LINKFLAGS)
- self.env.SAVE_LINKFLAGS = []
fp.write(" output(screen, \" sizeof(int): %lu\\n\",(unsigned long)sizeof(int));\n")
fp.write(" output(screen, \" sizeof(long): %lu\\n\",(unsigned long)sizeof(long));\n")
fp.write(" output(screen, \" sizeof(long long): %lu\\n\",(unsigned long)sizeof(long long));\n")
- fp.write(" output(screen, \" sizeof(uint8): %lu\\n\",(unsigned long)sizeof(uint8));\n")
- fp.write(" output(screen, \" sizeof(uint16): %lu\\n\",(unsigned long)sizeof(uint16));\n")
- fp.write(" output(screen, \" sizeof(uint32): %lu\\n\",(unsigned long)sizeof(uint32));\n")
+ fp.write(" output(screen, \" sizeof(uint8_t): %lu\\n\",(unsigned long)sizeof(uint8_t));\n")
+ fp.write(" output(screen, \" sizeof(uint16_t): %lu\\n\",(unsigned long)sizeof(uint16_t));\n")
+ fp.write(" output(screen, \" sizeof(uint32_t): %lu\\n\",(unsigned long)sizeof(uint32_t));\n")
fp.write(" output(screen, \" sizeof(short): %lu\\n\",(unsigned long)sizeof(short));\n")
fp.write(" output(screen, \" sizeof(void*): %lu\\n\",(unsigned long)sizeof(void*));\n")
fp.write(" output(screen, \" sizeof(size_t): %lu\\n\",(unsigned long)sizeof(size_t));\n")
t = bld(rule='cd .. && %s %s ${PERL} "${PIDL}" --quiet ${OPTIONS} --outputdir ${OUTPUTDIR} -- "${SRC[0].abspath(env)}"' % (cpp, cc),
ext_out = '.c',
before = 'cc',
- on_results = True,
+ update_outputs = True,
shell = True,
source = source,
target = out_files,
rule = '${PERL} ${SRC} --output ${TGT} | sed "s|default/||" > ${TGT}',
ext_out = '.c',
before = 'cc',
- on_results = True,
+ update_outputs = True,
shell = True,
source = '../../librpc/tables.pl',
target = target,
@conf
def SAMBA_CHECK_PYTHON(conf, mandatory=True, version=(2,4,2)):
# enable tool to build python extensions
+ if conf.env.HAVE_PYTHON_H:
+ conf.check_python_version(version)
+ return
+
+ interpreters = []
+
+ if conf.env['EXTRA_PYTHON']:
+ conf.all_envs['extrapython'] = conf.env.copy()
+ conf.setenv('extrapython')
+ conf.env['PYTHON'] = conf.env['EXTRA_PYTHON']
+ conf.env['IS_EXTRA_PYTHON'] = 'yes'
+ conf.find_program('python', var='PYTHON', mandatory=True)
+ conf.check_tool('python')
+ try:
+ conf.check_python_version((3, 3, 0))
+ except Exception:
+ Logs.warn('extra-python needs to be Python 3.3 or later')
+ raise
+ interpreters.append(conf.env['PYTHON'])
+ conf.setenv('default')
+
conf.find_program('python', var='PYTHON', mandatory=mandatory)
conf.check_tool('python')
path_python = conf.find_program('python')
conf.env.PYTHON_SPECIFIED = (conf.env.PYTHON != path_python)
conf.check_python_version(version)
+ interpreters.append(conf.env['PYTHON'])
+ conf.env.python_interpreters = interpreters
+
+
@conf
def SAMBA_CHECK_PYTHON_HEADERS(conf, mandatory=True):
if conf.env["python_headers_checked"] == []:
- conf.check_python_headers(mandatory)
+ if conf.env['EXTRA_PYTHON']:
+ conf.setenv('extrapython')
+ _check_python_headers(conf, mandatory=True)
+ conf.setenv('default')
+
+ _check_python_headers(conf, mandatory)
conf.env["python_headers_checked"] = "yes"
+
+ if conf.env['EXTRA_PYTHON']:
+ extraversion = conf.all_envs['extrapython']['PYTHON_VERSION']
+ if extraversion == conf.env['PYTHON_VERSION']:
+ raise Utils.WafError("extrapython %s is same as main python %s" % (
+ extraversion, conf.env['PYTHON_VERSION']))
else:
conf.msg("python headers", "using cache")
+ # we don't want PYTHONDIR in config.h, as otherwise changing
+ # --prefix causes a complete rebuild
+ del(conf.env.defines['PYTHONDIR'])
+ del(conf.env.defines['PYTHONARCHDIR'])
+
+def _check_python_headers(conf, mandatory):
+ conf.check_python_headers(mandatory=mandatory)
+
+ if conf.env['PYTHON_VERSION'] > '3':
+ abi_pattern = os.path.splitext(conf.env['pyext_PATTERN'])[0]
+ conf.env['PYTHON_SO_ABI_FLAG'] = abi_pattern % ''
+ else:
+ conf.env['PYTHON_SO_ABI_FLAG'] = ''
+
def SAMBA_PYTHON(bld, name,
source='',
init_function_sentinel=None,
local_include=True,
vars=None,
+ install=True,
enabled=True):
'''build a python extension for Samba'''
+ if bld.env['IS_EXTRA_PYTHON']:
+ name = 'extra-' + name
+
# when we support static python modules we'll need to gather
# the list from all the SAMBA_PYTHON() targets
if init_function_sentinel is not None:
target_type='PYTHON',
install_path='${PYTHONARCHDIR}',
allow_undefined_symbols=True,
- allow_warnings=True,
+ install=install,
enabled=enabled)
Build.BuildContext.SAMBA_PYTHON = SAMBA_PYTHON
+
+
+def pyembed_libname(bld, name, extrapython=False):
+ return name + bld.env['PYTHON_SO_ABI_FLAG']
+
+Build.BuildContext.pyembed_libname = pyembed_libname
+
+
+def gen_python_environments(bld, extra_env_vars=()):
+ """Generate all Python environments
+
+ To be used in a for loop. Normally, the loop body will be executed once.
+
+ When --extra-python is used, the body will additionaly be executed
+ with the extra-python environment active.
+ """
+ yield
+
+ if bld.env['EXTRA_PYTHON']:
+ copied = ('GLOBAL_DEPENDENCIES', 'TARGET_TYPE') + tuple(extra_env_vars)
+ for name in copied:
+ bld.all_envs['extrapython'][name] = bld.all_envs['default'][name]
+ default_env = bld.all_envs['default']
+ bld.all_envs['default'] = bld.all_envs['extrapython']
+ yield
+ bld.all_envs['default'] = default_env
+
+Build.BuildContext.gen_python_environments = gen_python_environments
return -1
+def RUN_PYTHON_TESTS(testfiles, pythonpath=None):
+ env = LOAD_ENVIRONMENT()
+ if pythonpath is None:
+ pythonpath = os.path.join(Utils.g_module.blddir, 'python')
+ result = 0
+ for interp in env.python_interpreters:
+ for testfile in testfiles:
+ cmd = "PYTHONPATH=%s %s %s" % (pythonpath, interp, testfile)
+ print('Running Python test with %s: %s' % (interp, testfile))
+ ret = RUN_COMMAND(cmd)
+ if ret:
+ print('Python test failed: %s' % cmd)
+ result = ret
+ return result
+
+
# make sure we have md5. some systems don't have it
try:
from hashlib import md5
return name
(root1, ext1) = os.path.splitext(name)
if python:
- (root2, ext2) = os.path.splitext(ctx.env.pyext_PATTERN)
+ return ctx.env.pyext_PATTERN % root1
else:
(root2, ext2) = os.path.splitext(ctx.env.shlib_PATTERN)
return root1+ext2
import os
import Utils
import samba_utils
-import sys
-
-def bzr_version_summary(path):
- try:
- import bzrlib
- except ImportError:
- return ("BZR-UNKNOWN", {})
-
- import bzrlib.ui
- bzrlib.ui.ui_factory = bzrlib.ui.make_ui_for_terminal(
- sys.stdin, sys.stdout, sys.stderr)
- from bzrlib import branch, osutils, workingtree
- from bzrlib.plugin import load_plugins
- load_plugins()
-
- b = branch.Branch.open(path)
- (revno, revid) = b.last_revision_info()
- rev = b.repository.get_revision(revid)
-
- fields = {
- "BZR_REVISION_ID": revid,
- "BZR_REVNO": revno,
- "COMMIT_DATE": osutils.format_date_with_offset_in_original_timezone(rev.timestamp,
- rev.timezone or 0),
- "COMMIT_TIME": int(rev.timestamp),
- "BZR_BRANCH": rev.properties.get("branch-nick", ""),
- }
-
- # If possible, retrieve the git sha
- try:
- from bzrlib.plugins.git.object_store import get_object_store
- except ImportError:
- # No git plugin
- ret = "BZR-%d" % revno
- else:
- store = get_object_store(b.repository)
- store.lock_read()
- try:
- full_rev = store._lookup_revision_sha1(revid)
- finally:
- store.unlock()
- fields["GIT_COMMIT_ABBREV"] = full_rev[:7]
- fields["GIT_COMMIT_FULLREV"] = full_rev
- ret = "GIT-" + fields["GIT_COMMIT_ABBREV"]
-
- if workingtree.WorkingTree.open(path).has_changes():
- fields["COMMIT_IS_CLEAN"] = 0
- ret += "+"
- else:
- fields["COMMIT_IS_CLEAN"] = 1
- return (ret, fields)
-
+from samba_git import find_git
def git_version_summary(path, env=None):
- # Get version from GIT
- if not 'GIT' in env and os.path.exists("/usr/bin/git"):
- # this is useful when doing make dist without configuring
- env.GIT = "/usr/bin/git"
+ git = find_git(env)
- if not 'GIT' in env:
+ if git is None:
return ("GIT-UNKNOWN", {})
+ env.GIT = git
+
environ = dict(os.environ)
environ["GIT_DIR"] = '%s/.git' % path
environ["GIT_WORK_TREE"] = path
self.vcs_fields = {}
elif os.path.exists(os.path.join(path, ".git")):
suffix, self.vcs_fields = git_version_summary(path, env=env)
- elif os.path.exists(os.path.join(path, ".bzr")):
- suffix, self.vcs_fields = bzr_version_summary(path)
elif os.path.exists(os.path.join(path, ".distversion")):
suffix, self.vcs_fields = distversion_version_summary(path)
else:
enabled=True):
'''define a Samba library'''
+ if pyembed and bld.env['IS_EXTRA_PYTHON']:
+ public_headers = pc_files = None
+
if LIB_MUST_BE_PRIVATE(bld, libname):
private_library=True
if vnum is None and soname is None:
raise Utils.WafError("public library '%s' must have a vnum" %
libname)
- if pc_files is None:
+ if pc_files is None and not bld.env['IS_EXTRA_PYTHON']:
raise Utils.WafError("public library '%s' must have pkg-config file" %
libname)
- if public_headers is None:
+ if public_headers is None and not bld.env['IS_EXTRA_PYTHON']:
raise Utils.WafError("public library '%s' must have header files" %
libname)
bundled_extension, private_library)
ldflags = TO_LIST(ldflags)
+ if bld.env['ENABLE_RELRO'] is True:
+ ldflags.extend(TO_LIST('-Wl,-z,relro,-z,now'))
features = 'cc cshlib symlink_lib install_lib'
if pyext:
source=bld.EXPAND_VARIABLES(source, vars=vars),
target=target,
shell=isinstance(rule, str),
- on_results=True,
+ update_outputs=True,
before='cc',
ext_out='.c',
samba_type='GENERATOR',
'''build and install manual pages'''
bld.env.SAMBA_EXPAND_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/expand-sambadoc.xsl'
bld.env.SAMBA_MAN_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/man.xsl'
- bld.env.SAMBA_CATALOGS = 'file:///etc/xml/catalog file:///usr/local/share/xml/catalog file://' + bld.srcnode.abspath() + '/bin/default/docs-xml/build/catalog.xml'
+ bld.env.SAMBA_CATALOG = bld.srcnode.abspath() + '/bin/default/docs-xml/build/catalog.xml'
+ bld.env.SAMBA_CATALOGS = 'file:///etc/xml/catalog file:///usr/local/share/xml/catalog file://' + bld.env.SAMBA_CATALOG
for m in manpages.split():
source = m + '.xml'
source=source,
target=m,
group='final',
+ dep_vars=['SAMBA_MAN_XSL', 'SAMBA_EXPAND_XSL', 'SAMBA_CATALOG'],
rule='''XML_CATALOG_FILES="${SAMBA_CATALOGS}"
export XML_CATALOG_FILES
${XSLTPROC} --xinclude --stringparam noreference 0 -o ${TGT}.xml --nonet ${SAMBA_EXPAND_XSL} ${SRC[0].abspath(env)}
help=("comma separated list of libraries to not apply extension to [%s]" % extension_exception),
action="store", dest='PRIVATE_EXTENSION_EXCEPTION', default=extension_exception)
- builtin_defauilt = Options.options['BUILTIN_LIBRARIES_DEFAULT']
+ builtin_default = Options.options['BUILTIN_LIBRARIES_DEFAULT']
gr.add_option('--builtin-libraries',
- help=("command separated list of libraries to build directly into binaries [%s]" % builtin_defauilt),
- action="store", dest='BUILTIN_LIBRARIES', default=builtin_defauilt)
+ help=("command separated list of libraries to build directly into binaries [%s]" % builtin_default),
+ action="store", dest='BUILTIN_LIBRARIES', default=builtin_default)
gr.add_option('--minimum-library-version',
help=("list of minimum system library versions (LIBNAME1:version,LIBNAME2:version)"),
help='tag release in git at the same time',
type='string', action='store', dest='TAG_RELEASE')
+ opt.add_option('--extra-python', type=str,
+ help=("build selected libraries for the specified "
+ "additional version of Python "
+ "(example: --extra-python=/usr/bin/python3)"),
+ metavar="PYTHON", dest='EXTRA_PYTHON', default=None)
+
@wafsamba.runonce
def configure(conf):
conf.env.AUTOCONF_HOST = Options.options.AUTOCONF_HOST
conf.env.AUTOCONF_PROGRAM_PREFIX = Options.options.AUTOCONF_PROGRAM_PREFIX
+ conf.env.EXTRA_PYTHON = Options.options.EXTRA_PYTHON
+
if (conf.env.AUTOCONF_HOST and
conf.env.AUTOCONF_BUILD and
conf.env.AUTOCONF_BUILD != conf.env.AUTOCONF_HOST):
conf.CHECK_INLINE()
# check for pkgconfig
- conf.check_cfg(atleast_pkgconfig_version='0.0.0')
+ conf.CHECK_CFG(atleast_pkgconfig_version='0.0.0')
conf.DEFINE('_GNU_SOURCE', 1, add_to_cflags=True)
conf.DEFINE('_XOPEN_SOURCE_EXTENDED', 1, add_to_cflags=True)
* - when using talloc_enable_leak_report(), giving directly NULL as a parent
* context implicitly refers to a hidden "null context" global variable, so
* this should not be used in a multi-threaded environment without proper
- * synchronization.
+ * synchronization. In threaded code turn off null tracking using
+ * talloc_disable_null_tracking().
* - the context returned by talloc_autofree_context() is also global so
* shouldn't be used by several threads simultaneously without
* synchronization.
@subpage libtalloc_bestpractices
-*/
\ No newline at end of file
+@subpage libtalloc_threads
+
+*/
--- /dev/null
+/**
+@page libtalloc_threads Chapter 8: Using threads with talloc
+
+@section Talloc and thread safety
+
+The talloc library is not internally thread-safe, in that accesses
+to variables on a talloc context are not controlled by mutexes or
+other thread-safe primitives.
+
+However, so long as talloc_disable_null_tracking() is called from
+the main thread to disable global variable access within talloc,
+then each thread can safely use its own top level talloc context
+allocated off the NULL context.
+
+For example:
+
+@code
+static void *thread_fn(void *arg)
+{
+ const char *ctx_name = (const char *)arg;
+ /*
+ * Create a new top level talloc hierarchy in
+ * this thread.
+ */
+ void *top_ctx = talloc_named_const(NULL, 0, "top");
+ if (top_ctx == NULL) {
+ return NULL;
+ }
+ sub_ctx = talloc_named_const(top_ctx, 100, ctx_name);
+ if (sub_ctx == NULL) {
+ return NULL;
+ }
+
+ /*
+ * Do more processing/talloc calls on top_ctx
+ * and its children.
+ */
+ ......
+
+ talloc_free(top_ctx);
+ return value;
+}
+@endcode
+
+is a perfectly safe use of talloc within a thread.
+
+The problem comes when one thread wishes to move some
+memory allocated on its local top level talloc context
+to another thread. Care must be taken to add data access
+exclusion to prevent memory corruption. One method would
+be to lock a mutex before any talloc call on each thread,
+but this would push the burden of total talloc thread-safety
+on the poor user of the library.
+
+A much easier way to transfer talloced memory between
+threads is by the use of an intermediate, mutex locked,
+intermediate variable.
+
+An example of this is below - taken from test code inside
+the talloc testsuite.
+
+The main thread creates 1000 sub-threads, and then accepts
+the transfer of some thread-talloc'ed memory onto its top
+level context from each thread in turn.
+
+A pthread mutex and condition variable are used to
+synchronize the transfer via the intermediate_ptr
+variable.
+
+@code
+/* Required sync variables. */
+static pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t condvar = PTHREAD_COND_INITIALIZER;
+
+/* Intermediate talloc pointer for transfer. */
+static void *intermediate_ptr;
+
+/* Subthread. */
+static void *thread_fn(void *arg)
+{
+ int ret;
+ const char *ctx_name = (const char *)arg;
+ void *sub_ctx = NULL;
+ /*
+ * Do stuff that creates a new talloc hierarchy in
+ * this thread.
+ */
+ void *top_ctx = talloc_named_const(NULL, 0, "top");
+ if (top_ctx == NULL) {
+ return NULL;
+ }
+ sub_ctx = talloc_named_const(top_ctx, 100, ctx_name);
+ if (sub_ctx == NULL) {
+ return NULL;
+ }
+
+ /*
+ * Now transfer a pointer from our hierarchy
+ * onto the intermediate ptr.
+ */
+ ret = pthread_mutex_lock(&mtx);
+ if (ret != 0) {
+ talloc_free(top_ctx);
+ return NULL;
+ }
+
+ /* Wait for intermediate_ptr to be free. */
+ while (intermediate_ptr != NULL) {
+ ret = pthread_cond_wait(&condvar, &mtx);
+ if (ret != 0) {
+ talloc_free(top_ctx);
+ return NULL;
+ }
+ }
+
+ /* and move our memory onto it from our toplevel hierarchy. */
+ intermediate_ptr = talloc_move(NULL, &sub_ctx);
+
+ /* Tell the main thread it's ready for pickup. */
+ pthread_cond_broadcast(&condvar);
+ pthread_mutex_unlock(&mtx);
+
+ talloc_free(top_ctx);
+ return NULL;
+}
+
+/* Main thread. */
+
+#define NUM_THREADS 1000
+
+static bool test_pthread_talloc_passing(void)
+{
+ int i;
+ int ret;
+ char str_array[NUM_THREADS][20];
+ pthread_t thread_id;
+ void *mem_ctx;
+
+ /*
+ * Important ! Null tracking breaks threaded talloc.
+ * It *must* be turned off.
+ */
+ talloc_disable_null_tracking();
+
+ /* Main thread toplevel context. */
+ mem_ctx = talloc_named_const(NULL, 0, "toplevel");
+ if (mem_ctx == NULL) {
+ return false;
+ }
+
+ /*
+ * Spin off NUM_THREADS threads.
+ * They will use their own toplevel contexts.
+ */
+ for (i = 0; i < NUM_THREADS; i++) {
+ (void)snprintf(str_array[i],
+ 20,
+ "thread:%d",
+ i);
+ if (str_array[i] == NULL) {
+ return false;
+ }
+ ret = pthread_create(&thread_id,
+ NULL,
+ thread_fn,
+ str_array[i]);
+ if (ret != 0) {
+ return false;
+ }
+ }
+
+ /* Now wait for NUM_THREADS transfers of the talloc'ed memory. */
+ for (i = 0; i < NUM_THREADS; i++) {
+ ret = pthread_mutex_lock(&mtx);
+ if (ret != 0) {
+ talloc_free(mem_ctx);
+ return false;
+ }
+
+ /* Wait for intermediate_ptr to have our data. */
+ while (intermediate_ptr == NULL) {
+ ret = pthread_cond_wait(&condvar, &mtx);
+ if (ret != 0) {
+ talloc_free(mem_ctx);
+ return false;
+ }
+ }
+
+ /* and move it onto our toplevel hierarchy. */
+ (void)talloc_move(mem_ctx, &intermediate_ptr);
+
+ /* Tell the sub-threads we're ready for another. */
+ pthread_cond_broadcast(&condvar);
+ pthread_mutex_unlock(&mtx);
+ }
+
+ /* Dump the hierarchy. */
+ talloc_report(mem_ctx, stdout);
+ talloc_free(mem_ctx);
+ return true;
+}
+@endcode
+*/
#ifndef HAVE_STRLCPY
-/* like strncpy but does not 0 fill the buffer and always null
- terminates. bufsize is the size of the destination buffer */
+/*
+ * Like strncpy but does not 0 fill the buffer and always null
+ * terminates. bufsize is the size of the destination buffer.
+ * Returns the length of s.
+ */
size_t rep_strlcpy(char *d, const char *s, size_t bufsize)
{
size_t len = strlen(s);
size_t ret = len;
- if (bufsize <= 0) return 0;
- if (len >= bufsize) len = bufsize-1;
+
+ if (bufsize <= 0) {
+ return 0;
+ }
+ if (len >= bufsize) {
+ len = bufsize - 1;
+ }
memcpy(d, s, len);
d[len] = 0;
return ret;
}
#else
#ifdef HAVE_BSD_STRTOLL
-#ifdef HAVE_STRTOQ
long long int rep_strtoll(const char *str, char **endptr, int base)
{
- long long int nb = strtoq(str, endptr, base);
- /* In linux EINVAL is only returned if base is not ok */
+ long long int nb = strtoll(str, endptr, base);
+ /* With glibc EINVAL is only returned if base is not ok */
if (errno == EINVAL) {
if (base == 0 || (base >1 && base <37)) {
/* Base was ok so it's because we were not
}
return nb;
}
-#else
-#error "You need the strtoq function"
-#endif /* HAVE_STRTOQ */
#endif /* HAVE_BSD_STRTOLL */
#endif /* HAVE_STRTOLL */
#if defined(HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP) && \
!defined(HAVE_PTHREAD_MUTEXATTR_SETROBUST)
-
#define pthread_mutexattr_setrobust pthread_mutexattr_setrobust_np
+#endif
-/*
- * We assume that PTHREAD_MUTEX_ROBUST_NP goes along with
- * pthread_mutexattr_setrobust_np()
- */
+#if defined(HAVE_DECL_PTHREAD_MUTEX_ROBUST_NP) && \
+ !defined(HAVE_DECL_PTHREAD_MUTEX_ROBUST)
#define PTHREAD_MUTEX_ROBUST PTHREAD_MUTEX_ROBUST_NP
-
#endif
#if defined(HAVE_PTHREAD_MUTEX_CONSISTENT_NP) && \
de && i < READDIR_SIZE;
de=readdir(d), i++) {
offsets[i] = telldir(d);
- strcpy(names[i], de->d_name);
+ /* strlcpy not available here */
+ snprintf(names[i], sizeof(names[i]), "%s", de->d_name);
}
if (i == 0) {
}
if (ftruncate(fd, size) != 0) {
printf("failure: ftruncate [\n%s\n]\n", strerror(errno));
+ close(fd);
return false;
}
if (fstat(fd, &st) != 0) {
printf("failure: ftruncate [\nfstat failed - %s\n]\n", strerror(errno));
+ close(fd);
return false;
}
if (st.st_size != size) {
printf("failure: ftruncate [\ngave wrong size %d - expected %d\n]\n",
(int)st.st_size, size);
+ close(fd);
return false;
}
unlink(TESTFILE);
printf("success: ftruncate\n");
+ close(fd);
return true;
}
x = strndup("bla", 10);
if (strcmp(x, "bla") != 0) {
printf("failure: strndup [\ninvalid\n]\n");
+ free(x);
return false;
}
free(x);
printf("failure: utime [\n"
"fstat (1) failed - %s\n]\n",
strerror(errno));
+ close(fd);
return false;
}
printf("failure: utime [\n"
"utime(&u) failed - %s\n]\n",
strerror(errno));
+ close(fd);
return false;
}
printf("failure: utime [\n"
"fstat (2) failed - %s\n]\n",
strerror(errno));
+ close(fd);
return false;
}
printf("failure: utime [\n"
"utime(NULL) failed - %s\n]\n",
strerror(errno));
+ close(fd);
return false;
}
printf("failure: utime [\n"
"fstat (3) failed - %s\n]\n",
strerror(errno));
+ close(fd);
return false;
}
"%s: %s(%d) %s %s(%d)\n]\n", \
__location__, \
#a, (int)a, #c, #b, (int)b); \
+ close(fd); \
return false; \
} \
} while(0)
unlink(TESTFILE);
printf("success: utime\n");
+ close(fd);
return true;
}
printf("failure: utimes [\n"
"fstat (1) failed - %s\n]\n",
strerror(errno));
+ close(fd);
return false;
}
printf("failure: utimes [\n"
"utimes(tv) failed - %s\n]\n",
strerror(errno));
+ close(fd);
return false;
}
printf("failure: utimes [\n"
"fstat (2) failed - %s\n]\n",
strerror(errno));
+ close(fd);
return false;
}
"%s: %s(%d) != %s(%d)\n]\n", \
__location__, \
#a, (int)a, #b, (int)b); \
+ close(fd); \
return false; \
} \
} while(0)
unlink(TESTFILE);
printf("success: utimes\n");
+ close(fd);
return true;
}
import wafsamba, samba_dist
import Options
-samba_dist.DIST_DIRS('lib/replace buildtools:buildtools')
+samba_dist.DIST_DIRS('lib/replace buildtools:buildtools third_party/waf:third_party/waf')
def set_options(opt):
opt.BUILTIN_DEFAULT('NONE')
conf.CHECK_HEADERS('sys/uio.h ifaddrs.h direct.h dirent.h')
conf.CHECK_HEADERS('windows.h winsock2.h ws2tcpip.h')
conf.CHECK_HEADERS('errno.h')
- conf.CHECK_HEADERS('gcrypt.h getopt.h iconv.h')
+ conf.CHECK_HEADERS('getopt.h iconv.h')
conf.CHECK_HEADERS('memory.h nss.h sasl/sasl.h')
conf.CHECK_FUNCS_IN('inotify_init', 'inotify', checklibc=True,
conf.CHECK_HEADERS('utmp.h utmpx.h lastlog.h malloc.h')
conf.CHECK_HEADERS('syscall.h sys/syscall.h inttypes.h')
conf.CHECK_HEADERS('sys/atomic.h')
+ conf.CHECK_HEADERS('libgen.h')
# Check for process set name support
conf.CHECK_CODE('''
conf.CHECK_TYPE_IN('sa_family_t', 'sys/socket.h')
conf.CHECK_TYPE_IN('sig_atomic_t', 'signal.h', define='HAVE_SIG_ATOMIC_T_TYPE')
+ conf.CHECK_FUNCS('sigsetmask siggetmask sigprocmask sigblock sigaction sigset')
conf.CHECK_FUNCS_IN('''inet_ntoa inet_aton inet_ntop inet_pton connect gethostbyname
getaddrinfo getnameinfo freeaddrinfo gai_strerror socketpair''',
conf.CHECK_FUNCS('link readlink symlink realpath snprintf vsnprintf')
conf.CHECK_FUNCS('asprintf vasprintf setenv unsetenv strnlen strtoull __strtoull')
conf.CHECK_FUNCS('strtouq strtoll __strtoll strtoq memalign posix_memalign')
- conf.CHECK_FUNCS('prctl')
+ conf.CHECK_FUNCS('prctl dirname basename')
# libbsd on some platforms provides strlcpy and strlcat
if not conf.CHECK_FUNCS('strlcpy strlcat'):
if conf.CONFIG_SET('HAVE_PTHREAD'):
- conf.CHECK_DECLS('pthread_mutexattr_setrobust', headers='pthread.h')
- if not conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEXATTR_SETROBUST'):
- conf.CHECK_DECLS('pthread_mutexattr_setrobust_np',
- headers='pthread.h')
-
conf.CHECK_FUNCS_IN('pthread_mutexattr_setrobust', 'pthread',
checklibc=True, headers='pthread.h')
if not conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST'):
conf.CHECK_FUNCS_IN('pthread_mutexattr_setrobust_np', 'pthread',
checklibc=True, headers='pthread.h')
- conf.CHECK_DECLS('pthread_mutex_consistent', headers='pthread.h')
- if not conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_CONSISTENT'):
- conf.CHECK_DECLS('pthread_mutex_consistent_np',
- headers='pthread.h')
+ conf.CHECK_DECLS('PTHREAD_MUTEX_ROBUST', headers='pthread.h')
+ if not conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_ROBUST'):
+ conf.CHECK_DECLS('PTHREAD_MUTEX_ROBUST_NP', headers='pthread.h')
conf.CHECK_FUNCS_IN('pthread_mutex_consistent', 'pthread',
checklibc=True, headers='pthread.h')
if ((conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST') or
conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP')) and
+ (conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_ROBUST') or
+ conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_ROBUST_NP')) and
(conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT') or
conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT_NP'))):
conf.DEFINE('HAVE_ROBUST_MUTEXES', 1)
<?xml version="1.0"?>
<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<refentry>
+ <refentryinfo><date>2015-04-10</date></refentryinfo>
<refmeta>
<refentrytitle>talloc</refentrytitle>
<manvolnum>3</manvolnum>
</listitem>
<listitem>
<para>
- you can talloc_free() the pointer itself. That will destroy
- the most recently established parent to the pointer and leave
- the pointer as a child of its current parent.
+ you can talloc_free() the pointer itself if it has at maximum one
+ parent. This behaviour has been changed since the release of version
+ 2.0. Further informations in the description of "talloc_free".
</para>
</listitem>
</itemizedlist>
linkend="talloc_unlink"><quote>talloc_unlink()</quote></link>.
</para>
</refsect2>
- <refsect2 id="talloc_unlink"><title>int talloc_unlink(const void *ctx, const void *ptr);</title>
+ <refsect2 id="talloc_unlink"><title>int talloc_unlink(const void *ctx, void *ptr);</title>
<para>
The talloc_unlink() function removes a specific parent from
<emphasis role="italic">ptr</emphasis>. The <emphasis
Name: pytalloc-util
Description: Utility functions for using talloc objects with Python
Version: @TALLOC_VERSION@
-Libs: @LIB_RPATH@ -L${libdir} -lpytalloc-util
+Libs: @LIB_RPATH@ -L${libdir} -lpytalloc-util@PYTHON_SO_ABI_FLAG@
Cflags: -I${includedir}
URL: http://talloc.samba.org/
#include <talloc.h>
#include <pytalloc.h>
-void inittalloc(void);
+static PyTypeObject TallocObject_Type;
+
+#if PY_MAJOR_VERSION >= 3
+#define PyStr_FromFormat PyUnicode_FromFormat
+#else
+#define PyStr_FromFormat PyString_FromFormat
+#endif
/* print a talloc tree report for a talloc python object */
static PyObject *pytalloc_report_full(PyObject *self, PyObject *args)
pytalloc_Object *talloc_obj = (pytalloc_Object *)obj;
PyTypeObject *type = (PyTypeObject*)PyObject_Type(obj);
- return PyString_FromFormat("<%s talloc object at 0x%p>",
- type->tp_name, talloc_obj->ptr);
+ return PyStr_FromFormat("<%s talloc object at 0x%p>",
+ type->tp_name, talloc_obj->ptr);
}
/**
/**
* Default (but only slightly more useful than the default) implementation of cmp.
*/
+#if PY_MAJOR_VERSION >= 3
+static PyObject *pytalloc_default_richcmp(PyObject *obj1, PyObject *obj2, int op)
+{
+ void *ptr1;
+ void *ptr2;
+ if (Py_TYPE(obj1) == Py_TYPE(obj2)) {
+ /* When types match, compare pointers */
+ ptr1 = pytalloc_get_ptr(obj1);
+ ptr2 = pytalloc_get_ptr(obj2);
+ } else if (PyObject_TypeCheck(obj2, &TallocObject_Type)) {
+ /* Otherwise, compare types */
+ ptr1 = Py_TYPE(obj1);
+ ptr2 = Py_TYPE(obj2);
+ } else {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ switch (op) {
+ case Py_EQ: return PyBool_FromLong(ptr1 == ptr2);
+ case Py_NE: return PyBool_FromLong(ptr1 != ptr2);
+ case Py_LT: return PyBool_FromLong(ptr1 < ptr2);
+ case Py_GT: return PyBool_FromLong(ptr1 > ptr2);
+ case Py_LE: return PyBool_FromLong(ptr1 <= ptr2);
+ case Py_GE: return PyBool_FromLong(ptr1 >= ptr2);
+ }
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+}
+#else
static int pytalloc_default_cmp(PyObject *_obj1, PyObject *_obj2)
{
pytalloc_Object *obj1 = (pytalloc_Object *)_obj1,
*obj2 = (pytalloc_Object *)_obj2;
if (obj1->ob_type != obj2->ob_type)
- return (obj1->ob_type - obj2->ob_type);
+ return ((char *)obj1->ob_type - (char *)obj2->ob_type);
return ((char *)pytalloc_get_ptr(obj1) - (char *)pytalloc_get_ptr(obj2));
}
+#endif
static PyTypeObject TallocObject_Type = {
.tp_name = "talloc.Object",
.tp_dealloc = (destructor)pytalloc_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
.tp_repr = pytalloc_default_repr,
+#if PY_MAJOR_VERSION >= 3
+ .tp_richcompare = pytalloc_default_richcmp,
+#else
.tp_compare = pytalloc_default_cmp,
+#endif
};
-void inittalloc(void)
+#define MODULE_DOC PyDoc_STR("Python wrapping of talloc-maintained objects.")
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ .m_name = "talloc",
+ .m_doc = MODULE_DOC,
+ .m_size = -1,
+ .m_methods = talloc_methods,
+};
+#endif
+
+static PyObject *module_init(void);
+static PyObject *module_init(void)
{
PyObject *m;
if (PyType_Ready(&TallocObject_Type) < 0)
- return;
+ return NULL;
- m = Py_InitModule3("talloc", talloc_methods,
- "Python wrapping of talloc-maintained objects.");
+#if PY_MAJOR_VERSION >= 3
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule3("talloc", talloc_methods, MODULE_DOC);
+#endif
if (m == NULL)
- return;
+ return NULL;
Py_INCREF(&TallocObject_Type);
PyModule_AddObject(m, "Object", (PyObject *)&TallocObject_Type);
+ return m;
+}
+
+#if PY_MAJOR_VERSION >= 3
+PyMODINIT_FUNC PyInit_talloc(void);
+PyMODINIT_FUNC PyInit_talloc(void)
+{
+ return module_init();
+}
+#else
+void inittalloc(void);
+void inittalloc(void)
+{
+ module_init();
}
+#endif
#define pytalloc_new(type, typeobj) pytalloc_steal(typeobj, talloc_zero(NULL, type))
+#if PY_MAJOR_VERSION < 3
PyObject *pytalloc_CObject_FromTallocPtr(void *);
+#endif
#endif /* _PYTALLOC_H_ */
bindings for you but it will make it easier to write C bindings that involve
talloc, and take away some of the boiler plate.
+Python 3
+--------
+
+pytalloc can be used with Python 3. Usage from Python extension remains
+the same, but for the C utilities, the library to link to is tagged with
+Python's PEP3149 ABI tag, for example "pytalloc.cpython34m".
+To make a build for Python 3, configure with PYTHON=/usr/bin/python3.
+.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
pytalloc_Object
Python. The caller is responsible for incrementing the talloc reference count before calling
this function - it will dereference the talloc pointer when it is garbage collected.
+This function is only available on Python 2.
+
Debug function for talloc in Python
-----------------------------------
return (PyObject *)ret;
}
+#if PY_MAJOR_VERSION < 3
+
static void py_cobject_talloc_free(void *ptr)
{
talloc_free(ptr);
return PyCObject_FromVoidPtr(ptr, py_cobject_talloc_free);
}
+#endif
+
_PUBLIC_ int pytalloc_Check(PyObject *obj)
{
PyTypeObject *tp = pytalloc_GetObjectType();
+++ /dev/null
-/*
- Unix SMB/CIFS implementation.
- Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-/* Don't expose talloc contexts in Python code. Python does reference
- counting for us, so just create a new top-level talloc context.
- */
-%typemap(in, numinputs=0, noblock=1) TALLOC_CTX * {
- $1 = NULL;
-}
-
-%define %talloctype(TYPE)
-%nodefaultctor TYPE;
-%extend TYPE {
- ~TYPE() { talloc_free($self); }
-}
-%enddef
- when using talloc_enable_leak_report(), giving directly NULL as a
parent context implicitly refers to a hidden "null context" global
variable, so this should not be used in a multi-threaded environment
-without proper synchronization ;
+without proper synchronization. In threaded code turn off null tracking using
+talloc_disable_null_tracking(). ;
- the context returned by talloc_autofree_context() is also global so
shouldn't be used by several threads simultaneously without
synchronization.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-int talloc_unlink(const void *context, const void *ptr);
+int talloc_unlink(const void *context, void *ptr);
The talloc_unlink() function removes a specific parent from ptr. The
context passed must either be a context used in talloc_reference()
--- /dev/null
+/*
+ Samba Unix SMB/CIFS implementation.
+
+ C utilities for the pytalloc test suite.
+ Provides the "_test_pytalloc" Python module.
+
+ NOTE: Please read talloc_guide.txt for full documentation
+
+ Copyright (C) Petr Viktorin 2015
+
+ ** NOTE! The following LGPL license applies to the talloc
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <Python.h>
+#include <talloc.h>
+#include <pytalloc.h>
+
+static PyObject *testpytalloc_new(PyTypeObject *mod)
+{
+ char *obj = talloc_strdup(NULL, "This is a test string");;
+ return pytalloc_steal(pytalloc_GetObjectType(), obj);
+}
+
+static PyObject *testpytalloc_get_object_type(PyObject *mod) {
+ PyObject *type = (PyObject *)pytalloc_GetObjectType();
+ Py_INCREF(type);
+ return type;
+}
+
+static PyObject *testpytalloc_reference(PyObject *mod, PyObject *args) {
+ pytalloc_Object *source = NULL;
+ void *ptr;
+
+ if (!PyArg_ParseTuple(args, "O!", pytalloc_GetObjectType(), &source))
+ return NULL;
+
+ ptr = source->ptr;
+ return pytalloc_reference_ex(pytalloc_GetObjectType(), ptr, ptr);
+}
+
+static PyMethodDef test_talloc_methods[] = {
+ { "new", (PyCFunction)testpytalloc_new, METH_NOARGS,
+ "create a talloc Object with a testing string"},
+ { "get_object_type", (PyCFunction)testpytalloc_get_object_type, METH_NOARGS,
+ "call pytalloc_GetObjectType"},
+ { "reference", (PyCFunction)testpytalloc_reference, METH_VARARGS,
+ "call pytalloc_reference_ex"},
+ { NULL }
+};
+
+static PyTypeObject DObject_Type;
+
+static int dobject_destructor(void *ptr)
+{
+ PyObject *destructor_func = *talloc_get_type(ptr, PyObject*);
+ PyObject *ret;
+ ret = PyObject_CallObject(destructor_func, NULL);
+ Py_DECREF(destructor_func);
+ if (ret == NULL) {
+ PyErr_Print();
+ } else {
+ Py_DECREF(ret);
+ }
+ return 0;
+}
+
+static PyObject *dobject_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
+{
+ PyObject *destructor_func = NULL;
+ PyObject **obj;
+
+ if (!PyArg_ParseTuple(args, "O", &destructor_func))
+ return NULL;
+ Py_INCREF(destructor_func);
+
+ obj = talloc(NULL, PyObject*);
+ *obj = destructor_func;
+
+ talloc_set_destructor((void*)obj, dobject_destructor);
+ return pytalloc_steal(&DObject_Type, obj);
+}
+
+static PyTypeObject DObject_Type = {
+ .tp_name = "_test_pytalloc.DObject",
+ .tp_basicsize = sizeof(pytalloc_Object),
+ .tp_methods = NULL,
+ .tp_new = dobject_new,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+ .tp_doc = "test talloc object that calls a function when underlying data is freed\n",
+};
+
+#define MODULE_DOC PyDoc_STR("Test utility module for pytalloc")
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ .m_name = "_test_pytalloc",
+ .m_doc = PyDoc_STR("Test utility module for pytalloc"),
+ .m_size = -1,
+ .m_methods = test_talloc_methods,
+};
+#endif
+
+static PyObject *module_init(void);
+static PyObject *module_init(void)
+{
+ PyObject *m;
+
+ DObject_Type.tp_base = pytalloc_GetObjectType();
+ if (PyType_Ready(&DObject_Type) < 0) {
+ return NULL;
+ }
+
+#if PY_MAJOR_VERSION >= 3
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule3("_test_pytalloc", test_talloc_methods, MODULE_DOC);
+#endif
+
+ if (m == NULL) {
+ return NULL;
+ }
+
+ Py_INCREF(&DObject_Type);
+ Py_INCREF(DObject_Type.tp_base);
+ PyModule_AddObject(m, "DObject", (PyObject *)&DObject_Type);
+
+ return m;
+}
+
+
+#if PY_MAJOR_VERSION >= 3
+PyMODINIT_FUNC PyInit__test_pytalloc(void);
+PyMODINIT_FUNC PyInit__test_pytalloc(void)
+{
+ return module_init();
+}
+#else
+void init_test_pytalloc(void);
+void init_test_pytalloc(void)
+{
+ module_init();
+}
+#endif
--- /dev/null
+#!/usr/bin/env python
+# Simple tests for the talloc python bindings.
+# Copyright (C) 2015 Petr Viktorin <pviktori@redhat.com>
+
+import unittest
+import subprocess
+import sys
+import re
+import gc
+
+import talloc
+import _test_pytalloc
+
+def dummy_func():
+ pass
+
+
+class TallocTests(unittest.TestCase):
+
+ def test_report_full(self):
+ # report_full is hardcoded to print to stdout, so use a subprocess
+ process = subprocess.Popen([
+ sys.executable, '-c',
+ """if True:
+ import talloc, _test_pytalloc
+ obj = _test_pytalloc.new()
+ talloc.report_full(obj)
+ """
+ ], stdout=subprocess.PIPE)
+ output, stderr = process.communicate()
+ output = str(output)
+ self.assertTrue("full talloc report on 'talloc.Object" in output)
+ self.assertTrue("This is a test string" in output)
+
+ def test_totalblocks(self):
+ obj = _test_pytalloc.new()
+ # Two blocks: the string, and the name
+ self.assertEqual(talloc.total_blocks(obj), 2)
+
+ def test_repr(self):
+ obj = _test_pytalloc.new()
+ prefix = '<talloc.Object talloc object at'
+ self.assertTrue(repr(obj).startswith(prefix))
+ self.assertEqual(repr(obj), str(obj))
+
+ def test_destructor(self):
+ # Check correct lifetime of the talloc'd data
+ lst = []
+ obj = _test_pytalloc.DObject(lambda: lst.append('dead'))
+ self.assertEqual(lst, [])
+ del obj
+ gc.collect()
+ self.assertEqual(lst, ['dead'])
+
+
+class TallocComparisonTests(unittest.TestCase):
+
+ def test_compare_same(self):
+ obj1 = _test_pytalloc.new()
+ self.assertTrue(obj1 == obj1)
+ self.assertFalse(obj1 != obj1)
+ self.assertTrue(obj1 <= obj1)
+ self.assertFalse(obj1 < obj1)
+ self.assertTrue(obj1 >= obj1)
+ self.assertFalse(obj1 > obj1)
+
+ def test_compare_different(self):
+ # object comparison is consistent
+ obj1, obj2 = sorted([
+ _test_pytalloc.new(),
+ _test_pytalloc.new()])
+ self.assertFalse(obj1 == obj2)
+ self.assertTrue(obj1 != obj2)
+ self.assertTrue(obj1 <= obj2)
+ self.assertTrue(obj1 < obj2)
+ self.assertFalse(obj1 >= obj2)
+ self.assertFalse(obj1 > obj2)
+
+ def test_compare_different_types(self):
+ # object comparison falls back to comparing types
+ if sys.version_info >= (3, 0):
+ # In Python 3, types are unorderable -- nothing to test
+ return
+ if talloc.Object < _test_pytalloc.DObject:
+ obj1 = _test_pytalloc.new()
+ obj2 = _test_pytalloc.DObject(dummy_func)
+ else:
+ obj2 = _test_pytalloc.new()
+ obj1 = _test_pytalloc.DObject(dummy_func)
+ self.assertFalse(obj1 == obj2)
+ self.assertTrue(obj1 != obj2)
+ self.assertTrue(obj1 <= obj2)
+ self.assertTrue(obj1 < obj2)
+ self.assertFalse(obj1 >= obj2)
+ self.assertFalse(obj1 > obj2)
+
+
+class TallocUtilTests(unittest.TestCase):
+
+ def test_get_type(self):
+ self.assertTrue(talloc.Object is _test_pytalloc.get_object_type())
+
+ def test_refrence(self):
+ # Check correct lifetime of the talloc'd data with multiple references
+ lst = []
+ obj = _test_pytalloc.DObject(lambda: lst.append('dead'))
+ ref = _test_pytalloc.reference(obj)
+ del obj
+ gc.collect()
+ self.assertEqual(lst, [])
+ del ref
+ gc.collect()
+ self.assertEqual(lst, ['dead'])
+
+
+if __name__ == '__main__':
+ unittest.TestProgram()
#include "system/time.h"
#include <talloc.h>
+#ifdef HAVE_PTHREAD
+#include <pthread.h>
+#endif
+
#include "talloc_testsuite.h"
static struct timeval timeval_current(void)
return true;
}
+#ifdef HAVE_PTHREAD
+
+#define NUM_THREADS 100
+
+/* Sync variables. */
+static pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t condvar = PTHREAD_COND_INITIALIZER;
+static void *intermediate_ptr;
+
+/* Subthread. */
+static void *thread_fn(void *arg)
+{
+ int ret;
+ const char *ctx_name = (const char *)arg;
+ void *sub_ctx = NULL;
+ /*
+ * Do stuff that creates a new talloc hierarchy in
+ * this thread.
+ */
+ void *top_ctx = talloc_named_const(NULL, 0, "top");
+ if (top_ctx == NULL) {
+ return NULL;
+ }
+ sub_ctx = talloc_named_const(top_ctx, 100, ctx_name);
+ if (sub_ctx == NULL) {
+ return NULL;
+ }
+
+ /*
+ * Now transfer a pointer from our hierarchy
+ * onto the intermediate ptr.
+ */
+ ret = pthread_mutex_lock(&mtx);
+ if (ret != 0) {
+ talloc_free(top_ctx);
+ return NULL;
+ }
+ /* Wait for intermediate_ptr to be free. */
+ while (intermediate_ptr != NULL) {
+ ret = pthread_cond_wait(&condvar, &mtx);
+ if (ret != 0) {
+ talloc_free(top_ctx);
+ return NULL;
+ }
+ }
+
+ /* and move our memory onto it from our toplevel hierarchy. */
+ intermediate_ptr = talloc_move(NULL, &sub_ctx);
+
+ /* Tell the main thread it's ready for pickup. */
+ pthread_cond_broadcast(&condvar);
+ pthread_mutex_unlock(&mtx);
+
+ talloc_free(top_ctx);
+ return NULL;
+}
+
+/* Main thread. */
+static bool test_pthread_talloc_passing(void)
+{
+ int i;
+ int ret;
+ char str_array[NUM_THREADS][20];
+ pthread_t thread_id;
+ void *mem_ctx;
+
+ /*
+ * Important ! Null tracking breaks threaded talloc.
+ * It *must* be turned off.
+ */
+ talloc_disable_null_tracking();
+
+ printf("test: pthread_talloc_passing\n# PTHREAD TALLOC PASSING\n");
+
+ /* Main thread toplevel context. */
+ mem_ctx = talloc_named_const(NULL, 0, "toplevel");
+ if (mem_ctx == NULL) {
+ printf("failed to create toplevel context\n");
+ return false;
+ }
+
+ /*
+ * Spin off NUM_THREADS threads.
+ * They will use their own toplevel contexts.
+ */
+ for (i = 0; i < NUM_THREADS; i++) {
+ (void)snprintf(str_array[i],
+ 20,
+ "thread:%d",
+ i);
+ if (str_array[i] == NULL) {
+ printf("snprintf %d failed\n", i);
+ return false;
+ }
+ ret = pthread_create(&thread_id,
+ NULL,
+ thread_fn,
+ str_array[i]);
+ if (ret != 0) {
+ printf("failed to create thread %d (%d)\n", i, ret);
+ return false;
+ }
+ }
+
+ printf("Created %d threads\n", NUM_THREADS);
+
+ /* Now wait for NUM_THREADS transfers of the talloc'ed memory. */
+ for (i = 0; i < NUM_THREADS; i++) {
+ ret = pthread_mutex_lock(&mtx);
+ if (ret != 0) {
+ printf("pthread_mutex_lock %d failed (%d)\n", i, ret);
+ talloc_free(mem_ctx);
+ return false;
+ }
+
+ /* Wait for intermediate_ptr to have our data. */
+ while (intermediate_ptr == NULL) {
+ ret = pthread_cond_wait(&condvar, &mtx);
+ if (ret != 0) {
+ printf("pthread_cond_wait %d failed (%d)\n", i,
+ ret);
+ talloc_free(mem_ctx);
+ return false;
+ }
+ }
+
+ /* and move it onto our toplevel hierarchy. */
+ (void)talloc_move(mem_ctx, &intermediate_ptr);
+
+ /* Tell the sub-threads we're ready for another. */
+ pthread_cond_broadcast(&condvar);
+ pthread_mutex_unlock(&mtx);
+ }
+
+ CHECK_SIZE("pthread_talloc_passing", mem_ctx, NUM_THREADS * 100);
+#if 1
+ /* Dump the hierarchy. */
+ talloc_report(mem_ctx, stdout);
+#endif
+ talloc_free(mem_ctx);
+ printf("success: pthread_talloc_passing\n");
+ return true;
+}
+#endif
+
static void test_reset(void)
{
talloc_set_log_fn(test_log_stdout);
ret &= test_free_children();
test_reset();
ret &= test_memlimit();
+#ifdef HAVE_PTHREAD
+ test_reset();
+ ret &= test_pthread_talloc_passing();
+#endif
if (ret) {
#node = producer.bld.path.make_node('pdebug.svg')
f = open('pdebug.svg', 'w')
f.write("".join(out))
-
-
setattr(t, 'post_run', post_run)
setattr(t, 'old_can_retrieve_cache', t.can_retrieve_cache)
setattr(t, 'can_retrieve_cache', can_retrieve_cache)
-
self.check_message_2(kw['okmsg'])
return ret
-
--- /dev/null
+#! /usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2015
+
+"""
+Force tasks to use file timestamps to force partial rebuilds when touch-ing build files
+
+touch out/libfoo.a
+... rebuild what depends on libfoo.a
+
+to use::
+ def options(opt):
+ opt.tool_options('build_file_tracker')
+"""
+
+import os
+import Task, Utils
+
+def signature(self):
+ try: return self.cache_sig[0]
+ except AttributeError: pass
+
+ self.m = Utils.md5()
+
+ # explicit deps
+ exp_sig = self.sig_explicit_deps()
+
+ # env vars
+ var_sig = self.sig_vars()
+
+ # implicit deps
+ imp_sig = Task.SIG_NIL
+ if self.scan:
+ try:
+ imp_sig = self.sig_implicit_deps()
+ except ValueError:
+ return self.signature()
+
+ # timestamp dependency on build files only (source files are hashed)
+ buf = []
+ for k in self.inputs + getattr(self, 'dep_nodes', []) + self.generator.bld.node_deps.get(self.unique_id(), []):
+ if k.id & 3 == 3:
+ t = os.stat(k.abspath(self.env)).st_mtime
+ buf.append(t)
+ self.m.update(str(buf))
+
+ # we now have the signature (first element) and the details (for debugging)
+ ret = self.m.digest()
+ self.cache_sig = (ret, exp_sig, imp_sig, var_sig)
+ return ret
+
+Task.Task.signature_bak = Task.Task.signature # unused, kept just in case
+Task.Task.signature = signature # overridden
def detect(conf):
fluid = conf.find_program('fluid', var='FLUID', mandatory=True)
conf.check_cfg(path='fltk-config', package='', args='--cxxflags --ldflags', uselib_store='FLTK', mandatory=True)
-
cls.post_run = post_run
cls.scan = scan
cls.sig_implicit_deps = sig_implicit_deps
-
self.path.find_or_declare(self.target))
self.go_link_task.set_run_after(self.go_compile_task)
self.go_link_task.dep_nodes.extend(self.go_compile_task.outputs)
-
Build.BuildContext.raw_compile = Build.BuildContext.compile
Build.BuildContext.compile = compile
Build.BuildContext.sweep = sweep
-
check_task_classes(self)
return comp(self)
Build.BuildContext.compile = compile
-
--- /dev/null
+#! /usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2015 (ita)
+#
+# prefer the waf 1.8 version
+
+"""
+The full samba build can be faster by ~10%, but there are a few limitations:
+* only one build process should be run at a time as the servers would use the same ports
+* only one build command is going to be called ("waf build configure build" would not work)
+
+def build(bld):
+
+ mod = Utils.load_tool('prefork')
+ mod.build(bld)
+ ...
+ (build declarations after)
+"""
+
+import os, re, socket, threading, sys, subprocess, time, atexit, traceback
+try:
+ import SocketServer
+except ImportError:
+ import socketserver as SocketServer
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+try:
+ import cPickle
+except ImportError:
+ import pickle as cPickle
+
+DEFAULT_PORT = 51200
+
+HEADER_SIZE = 128
+
+REQ = 'REQ'
+RES = 'RES'
+BYE = 'BYE'
+
+def make_header(params):
+ header = ','.join(params)
+ if sys.hexversion > 0x3000000:
+ header = header.encode('iso8859-1')
+ header = header.ljust(HEADER_SIZE)
+ assert(len(header) == HEADER_SIZE)
+ return header
+
+
+re_valid_query = re.compile('^[a-zA-Z0-9_, ]+$')
+class req(SocketServer.StreamRequestHandler):
+ def handle(self):
+ while 1:
+ try:
+ self.process_command()
+ except Exception as e:
+ print(e)
+ break
+
+ def process_command(self):
+ query = self.rfile.read(HEADER_SIZE)
+ if not query:
+ return
+ #print(len(query))
+ assert(len(query) == HEADER_SIZE)
+ if sys.hexversion > 0x3000000:
+ query = query.decode('iso8859-1')
+ #print "%r" % query
+ if not re_valid_query.match(query):
+ raise ValueError('Invalid query %r' % query)
+
+ query = query.strip().split(',')
+
+ if query[0] == REQ:
+ self.run_command(query[1:])
+ elif query[0] == BYE:
+ raise ValueError('Exit')
+ else:
+ raise ValueError('Invalid query %r' % query)
+
+ def run_command(self, query):
+
+ size = int(query[0])
+ data = self.rfile.read(size)
+ assert(len(data) == size)
+ kw = cPickle.loads(data)
+
+ # run command
+ ret = out = err = exc = None
+ cmd = kw['cmd']
+ del kw['cmd']
+ #print(cmd)
+
+ try:
+ if kw['stdout'] or kw['stderr']:
+ p = subprocess.Popen(cmd, **kw)
+ (out, err) = p.communicate()
+ ret = p.returncode
+ else:
+ ret = subprocess.Popen(cmd, **kw).wait()
+ except Exception as e:
+ ret = -1
+ exc = str(e) + traceback.format_exc()
+
+ # write the results
+ if out or err or exc:
+ data = (out, err, exc)
+ data = cPickle.dumps(data, -1)
+ else:
+ data = ''
+
+ params = [RES, str(ret), str(len(data))]
+
+ self.wfile.write(make_header(params))
+
+ if data:
+ self.wfile.write(data)
+
+def create_server(conn, cls):
+ #SocketServer.ThreadingTCPServer.allow_reuse_address = True
+ #server = SocketServer.ThreadingTCPServer(conn, req)
+
+ SocketServer.TCPServer.allow_reuse_address = True
+ server = SocketServer.TCPServer(conn, req)
+ #server.timeout = 6000 # seconds
+ server.serve_forever(poll_interval=0.001)
+
+if __name__ == '__main__':
+ if len(sys.argv) > 1:
+ port = int(sys.argv[1])
+ else:
+ port = DEFAULT_PORT
+ #conn = (socket.gethostname(), port)
+ conn = ("127.0.0.1", port)
+ #print("listening - %r %r\n" % conn)
+ create_server(conn, req)
+else:
+
+ import Runner, Utils
+
+ def init_task_pool(self):
+ # lazy creation, and set a common pool for all task consumers
+ pool = self.pool = []
+ for i in range(self.numjobs):
+ consumer = Runner.get_pool()
+ pool.append(consumer)
+ consumer.idx = i
+ self.ready = Queue(0)
+ def setq(consumer):
+ consumer.ready = self.ready
+ try:
+ threading.current_thread().idx = consumer.idx
+ except Exception as e:
+ print(e)
+ for x in pool:
+ x.ready.put(setq)
+ return pool
+ Runner.Parallel.init_task_pool = init_task_pool
+
+ PORT = 51200
+
+ def make_server(idx):
+ port = PORT + idx
+ cmd = [sys.executable, os.path.abspath(__file__), str(port)]
+ proc = subprocess.Popen(cmd)
+ proc.port = port
+ return proc
+
+ def make_conn(srv):
+ #port = PORT + idx
+ port = srv.port
+ conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ conn.connect(('127.0.0.1', port))
+ return conn
+
+ SERVERS = []
+ CONNS = []
+ def close_all():
+ while CONNS:
+ conn = CONNS.pop()
+ try:
+ conn.close()
+ except:
+ pass
+ while SERVERS:
+ srv = SERVERS.pop()
+ try:
+ srv.kill()
+ except:
+ pass
+ atexit.register(close_all)
+
+ def put_data(conn, data):
+ conn.send(data)
+
+ def read_data(conn, siz):
+ ret = conn.recv(siz)
+ if not ret:
+ print("closed connection?")
+
+ assert(len(ret) == siz)
+ return ret
+
+ def exec_command(cmd, **kw):
+ if 'log' in kw:
+ log = kw['log']
+ kw['stdout'] = kw['stderr'] = subprocess.PIPE
+ del(kw['log'])
+ else:
+ kw['stdout'] = kw['stderr'] = None
+ kw['shell'] = isinstance(cmd, str)
+
+ idx = threading.current_thread().idx
+ kw['cmd'] = cmd
+
+ data = cPickle.dumps(kw, -1)
+ params = [REQ, str(len(data))]
+ header = make_header(params)
+
+ conn = CONNS[idx]
+
+ put_data(conn, header)
+ put_data(conn, data)
+
+ data = read_data(conn, HEADER_SIZE)
+ if sys.hexversion > 0x3000000:
+ data = data.decode('iso8859-1')
+
+ lst = data.split(',')
+ ret = int(lst[1])
+ dlen = int(lst[2])
+
+ out = err = None
+ if dlen:
+ data = read_data(conn, dlen)
+ (out, err, exc) = cPickle.loads(data)
+ if exc:
+ raise Utils.WafError('Execution failure: %s' % exc)
+
+ if out:
+ log.write(out)
+ if err:
+ log.write(err)
+
+ return ret
+
+ def __init__(self):
+ threading.Thread.__init__(self)
+
+ # identifier of the current thread
+ self.idx = len(SERVERS)
+
+ # create a server and wait for the connection
+ srv = make_server(self.idx)
+ SERVERS.append(srv)
+
+ conn = None
+ for x in range(30):
+ try:
+ conn = make_conn(srv)
+ break
+ except socket.error:
+ time.sleep(0.01)
+ if not conn:
+ raise ValueError('Could not start the server!')
+ CONNS.append(conn)
+
+ self.setDaemon(1)
+ self.start()
+ Runner.TaskConsumer.__init__ = __init__
+
+ def build(bld):
+ # dangerous, there is no other command hopefully
+ Utils.exec_command = exec_command
def detect(conf):
swig = conf.find_program('swig', var='SWIG', mandatory=True)
-
def detect(conf):
conf.find_program('valadoc', var='VALADOC', mandatory=False)
-
install_as = group_method(install_as)
install_files = group_method(install_files)
symlink_as = group_method(symlink_as)
-
"decorator: attach new configuration tests (registered as strings)"
ConfigurationContext.tests[f.__name__] = f
return conf(f)
-
-
# negative '<-' uninstall
INSTALL = 1337
UNINSTALL = -1337
-
object.__delattr__(self, name)
else:
del self[name]
-
# may be initialized more than once
init_log()
-
def update_build_dir(self, env=None):
if not env:
- for env in bld.all_envs:
+ for env in self.bld.all_envs:
self.update_build_dir(env)
return
class Nodu(Node):
pass
-
def parse_args(self, args=None):
parse_args_impl(self.parser, args)
-
#print loop
assert (self.count == 0 or self.stop)
-
# FIXME remove in Waf 1.6 (kept for compatibility)
def add_subdir(dir, bld):
bld.recurse(dir, 'build')
-
delattr(x, 'cache_sig')
except AttributeError:
pass
-
else: self.source += lst
def clone(self, env):
- """when creating a clone in a task generator method,
- make sure to set posted=False on the clone
+ """when creating a clone in a task generator method,
+ make sure to set posted=False on the clone
else the other task generator will not create its tasks"""
newobj = task_gen(bld=self.bld)
for x in self.__dict__:
if getattr(self, 'cwd', None):
tsk.cwd = self.cwd
- if getattr(self, 'on_results', None):
+ if getattr(self, 'on_results', None) or getattr(self, 'update_outputs', None):
Task.update_outputs(cls)
if getattr(self, 'always', None):
self.bld.prev = self
feature('seq')(sequence_order)
-
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
-
v = conf.env
conf.check_tool('ar')
if not v['AR']: conf.fatal('ar is required for static libraries - not found')
-
-
def detect(conf):
bison = conf.find_program('bison', var='BISON', mandatory=True)
conf.env['BISONFLAGS'] = '-d'
-
cls = Task.simple_task_type('cc_link', link_str, color='YELLOW', ext_in='.o', ext_out='.bin', shell=False)
cls.maxjobs = 1
cls.install = Utils.nada
-
def add_as_needed(conf):
if conf.env.DEST_BINFMT == 'elf' and 'gcc' in (conf.env.CXX_NAME, conf.env.CC_NAME):
conf.env.append_unique('LINKFLAGS', '--as-needed')
-
for c_compiler in test_for_compiler.split():
opt.tool_options('%s' % c_compiler, option_group=cc_compiler_opts)
-
for cxx_compiler in test_for_compiler.split():
opt.tool_options('%s' % cxx_compiler, option_group=cxx_compiler_opts)
-
for d_compiler in ['gdc', 'dmd']:
opt.tool_options('%s' % d_compiler, option_group=d_compiler_opts)
-
@conftest
def cxx_load_tools(conf):
conf.check_tool('cxx')
-
def set_options(opt):
opt.add_option('--with-csc-binary', type='string', dest='cscbinary')
-
cls = Task.simple_task_type('cxx_link', link_str, color='YELLOW', ext_in='.o', ext_out='.bin', shell=False)
cls.maxjobs = 1
cls.install = Utils.nada
-
print imp + " ",
print
"""
-
def detect(conf):
dbus_binding_tool = conf.find_program('dbus-binding-tool', var='DBUS_BINDING_TOOL')
-
if conf.env.D_COMPILER.find('ldc') > -1:
conf.common_flags_ldc()
-
def detect(conf):
conf.find_program('flex', var='FLEX', mandatory=True)
conf.env['FLEXFLAGS'] = ''
-
conf.find_program(['gas', 'as'], var='AS')
if not conf.env.AS: conf.env.AS = conf.env.CC
#conf.env.ASFLAGS = ['-c'] <- may be necesary for .S files
-
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
-
conf.check_tool('d')
conf.common_flags_gdc()
conf.d_platform_flags()
-
def detect(conf):
glib_genmarshal = conf.find_program('glib-genmarshal', var='GLIB_GENMARSHAL')
mk_enums_tool = conf.find_program('glib-mkenums', var='GLIB_MKENUMS')
-
def set_options(opt):
opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]')
-
str_default = default
str_help = '%s [Default: %s]' % (help, str_default)
dirs_options.add_option(option_name, help=str_help, default='', dest=name.upper())
-
gob2 = conf.find_program('gob2', var='GOB2', mandatory=True)
conf.env['GOB2'] = gob2
conf.env['GOB2FLAGS'] = ''
-
v['SHLIB_MARKER'] = ''
v['STATICLIB_MARKER'] = ''
- v['SONAME_ST'] = ''
+ v['SONAME_ST'] = ''
@conftest
def gxx_modifier_aix(conf):
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
-
def set_options(opt):
opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]')
opt.add_option('--datadir', type='string', default='', dest='datadir', help='read-only application data')
-
break
else:
conf.fatal('could not find lib jvm in %r (see config.log)' % libDirs)
-
conf.env['MSGFMT'] = conf.find_program('msgfmt')
Task.simple_task_type('msgfmt', '${MSGFMT} ${SRC} -o ${TGT}', color='BLUE', shell=False)
-
if __name__ == '__main__':
useCmdLine()
-
def detect(conf):
conf.find_program('luac', var='LUAC', mandatory = True)
-
Task.task_type_from_func('copy', vars=[], func=action_process_file_func)
TaskGen.task_gen.classes['command-output'] = cmd_output_taskgen
-
self.do_manifest = False
outfile = self.outputs[0].bldpath(env)
-
+
manifest = None
for out_node in self.outputs:
if out_node.name.endswith('.manifest'):
manifest = out_node.bldpath(env)
break
if manifest is None:
- # Should never get here. If we do, it means the manifest file was
- # never added to the outputs list, thus we don't have a manifest file
+ # Should never get here. If we do, it means the manifest file was
+ # never added to the outputs list, thus we don't have a manifest file
# to embed, so we just return.
return 0
cls = Task.TaskBase.classes.get(k, None)
if cls:
cls.exec_command = exec_command_msvc
-
try: obj_ext = self.obj_ext
except AttributeError: obj_ext = '_%d.o' % self.idx
- task = self.create_task('nasm', node, node.change_ext(obj_ext))
+ task = self.create_task('nasm', node, node.change_ext(obj_ext))
self.compiled_tasks.append(task)
self.meths.append('apply_nasm_vars')
def detect(conf):
nasm = conf.find_program(['nasm', 'yasm'], var='NASM', mandatory=True)
-
v['LIBPATH_OCAML'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep
v['CPPPATH_OCAML'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep
v['LIB_OCAML'] = 'camlrun'
-
Task.task_type_from_func('macapp', vars=[], func=app_build, after="cxx_link cc_link static_link")
Task.task_type_from_func('macplist', vars=[], func=plist_build, after="cxx_link cc_link static_link")
-
def set_options(opt):
opt.add_option("--with-perl-binary", type="string", dest="perlbinary", help = 'Specify alternate perl binary', default=None)
opt.add_option("--with-perl-archdir", type="string", dest="perlarchdir", help = 'Specify directory where to install arch specific files', default=None)
-
find_deps(node)
return (nodes, names)
-
-
try:
# Get some python configuration variables using distutils
- v = 'prefix SO SYSLIBS LDFLAGS SHLIBS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET'.split()
+ v = 'prefix SO SYSLIBS LDFLAGS SHLIBS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET LDVERSION'.split()
(python_prefix, python_SO, python_SYSLIBS, python_LDFLAGS, python_SHLIBS,
python_LIBDIR, python_LIBPL, INCLUDEPY, Py_ENABLE_SHARED,
- python_MACOSX_DEPLOYMENT_TARGET) = \
+ python_MACOSX_DEPLOYMENT_TARGET, python_LDVERSION) = \
_get_python_variables(python, ["get_config_var('%s') or ''" % x for x in v],
['from distutils.sysconfig import get_config_var'])
except RuntimeError:
INCLUDEPY = %r
Py_ENABLE_SHARED = %r
MACOSX_DEPLOYMENT_TARGET = %r
+LDVERSION = %r
""" % (python, python_prefix, python_SO, python_SYSLIBS, python_LDFLAGS, python_SHLIBS,
- python_LIBDIR, python_LIBPL, INCLUDEPY, Py_ENABLE_SHARED, python_MACOSX_DEPLOYMENT_TARGET))
+ python_LIBDIR, python_LIBPL, INCLUDEPY, Py_ENABLE_SHARED, python_MACOSX_DEPLOYMENT_TARGET,
+ python_LDVERSION))
# Allow some python overrides from env vars for cross-compiling
os_env = dict(os.environ)
parse_flags(python_LDFLAGS, 'PYEMBED', env)
result = False
- name = 'python' + env['PYTHON_VERSION']
+ if not python_LDVERSION:
+ python_LDVERSION = env['PYTHON_VERSION']
+ name = 'python' + python_LDVERSION
if python_LIBDIR is not None:
path = [python_LIBDIR]
if not result:
conf.log.write("\n\n# try again with -L$prefix/libs, and pythonXY name rather than pythonX.Y (win32)\n")
path = [os.path.join(python_prefix, "libs")]
- name = 'python' + env['PYTHON_VERSION'].replace('.', '')
+ name = 'python' + python_LDVERSION.replace('.', '')
result = conf.check(lib=name, uselib='PYEMBED', libpath=path)
if result:
default=1,
help='Do not install optimised compiled .pyo files (configuration) [Default:install]',
dest='pyo')
-
opt.add_option('--no-qt4-framework', action="store_false", help='do not use the framework version of Qt4 in OS X', dest='use_qt4_osxframework',default=True)
opt.add_option('--translate', action="store_true", help="collect translation strings", dest="trans_qt4", default=False)
-
opt.add_option('--with-ruby-archdir', type='string', dest='rubyarchdir', help='Specify directory where to install arch specific files')
opt.add_option('--with-ruby-libdir', type='string', dest='rubylibdir', help='Specify alternate ruby library path')
opt.add_option('--with-ruby-binary', type='string', dest='rubybinary', help='Specify alternate ruby binary')
-
task.env.SRCFILE = srcfile
ret = fun(task)
if ret:
- error('error when calling %s %s' % (command, latex_compile_cmd))
+ error('error when calling %s %s' % (command, latex_fun))
return ret
return None # ok
cls.scan = scan
cls = b('pdflatex', pdflatex_build, vars=pdflatex_vardeps)
cls.scan = scan
-
for (f, code, out, err) in lst:
if code:
Utils.pprint('CYAN', ' %s' % f)
-
-
valaopts.add_option ('--vala-target-glib', default=None,
dest='vala_target_glib', metavar='MAJOR.MINOR',
help='Target version of glib for Vala GObject code generation')
-
conf.fatal('winrc was not found!')
v['WINRCFLAGS'] = ''
-
"print messages in color"
sys.stderr.write("%s%s%s %s%s" % (Logs.colors(col), str, Logs.colors.NORMAL, label, sep))
-def check_dir(dir):
+def check_dir(path):
"""If a folder doesn't exists, create it."""
- try:
- os.lstat(dir)
- except OSError:
+ if not os.path.isdir(path):
try:
- os.makedirs(dir)
+ os.makedirs(path)
except OSError, e:
- raise WafError("Cannot create folder '%s' (original error: %s)" % (dir, e))
+ if not os.path.isdir(path):
+ raise Errors.WafError('Cannot create the folder %r' % path, ex=e)
+
def cmd_output(cmd, **kw):
return ret
wrap.__cache__ = cache
return wrap
-
sys.stderr = sys.stdout = AnsiTerm()
os.environ['TERM'] = 'vt100'
-
self.wait()
return (stdout, stderr)
-
for v in all_modifs[k]:
modif(os.path.join(dir, 'wafadmin'), k, v)
#print('substitutions finished')
-
#!/usr/bin/env python
APPNAME = 'talloc'
-VERSION = '2.1.2'
+VERSION = '2.1.3'
blddir = 'bin'
import wafsamba, samba_dist, Options
# setup what directories to put in a tarball
-samba_dist.DIST_DIRS('lib/talloc:. lib/replace:lib/replace buildtools:buildtools')
+samba_dist.DIST_DIRS("""lib/talloc:. lib/replace:lib/replace
+buildtools:buildtools third_party/waf:third_party/waf""")
def set_options(opt):
if not conf.env.disable_python:
# also disable if we don't have the python libs installed
- conf.find_program('python', var='PYTHON')
- conf.check_tool('python')
- conf.check_python_version((2,4,2))
+ conf.SAMBA_CHECK_PYTHON(mandatory=False, version=(2,4,2))
conf.SAMBA_CHECK_PYTHON_HEADERS(mandatory=False)
if not conf.env.HAVE_PYTHON_H:
Logs.warn('Disabling pytalloc-util as python devel libs not found')
public_headers=[],
enabled=bld.env.TALLOC_COMPAT1)
+ testsuite_deps = 'talloc'
+ if bld.CONFIG_SET('HAVE_PTHREAD'):
+ testsuite_deps += ' pthread'
+
bld.SAMBA_BINARY('talloc_testsuite',
'testsuite_main.c testsuite.c',
- deps='talloc',
+ testsuite_deps,
install=False)
else:
manpages='man/talloc.3')
if not bld.CONFIG_SET('USING_SYSTEM_PYTALLOC_UTIL') and not bld.env.disable_python:
- bld.SAMBA_LIBRARY('pytalloc-util',
- source='pytalloc_util.c',
- public_deps='talloc',
- pyembed=True,
- vnum=VERSION,
- hide_symbols=True,
- abi_directory='ABI',
- abi_match='pytalloc_*',
- private_library=private_library,
- public_headers='pytalloc.h',
- pc_files='pytalloc-util.pc'
- )
- bld.SAMBA_PYTHON('pytalloc',
- 'pytalloc.c',
- deps='talloc pytalloc-util',
- enabled=True,
- realname='talloc.so')
+ for env in bld.gen_python_environments(['PKGCONFIGDIR']):
+ name = bld.pyembed_libname('pytalloc-util')
+
+ bld.SAMBA_LIBRARY(name,
+ source='pytalloc_util.c',
+ public_deps='talloc',
+ pyembed=True,
+ vnum=VERSION,
+ hide_symbols=True,
+ abi_directory='ABI',
+ abi_match='pytalloc_*',
+ private_library=private_library,
+ public_headers='pytalloc.h',
+ pc_files='pytalloc-util.pc'
+ )
+ bld.SAMBA_PYTHON('pytalloc',
+ 'pytalloc.c',
+ deps='talloc ' + name,
+ enabled=True,
+ realname='talloc.so')
+
+ bld.SAMBA_PYTHON('test_pytalloc',
+ 'test_pytalloc.c',
+ deps='pytalloc',
+ enabled=True,
+ realname='_test_pytalloc.so',
+ install=False)
+
def test(ctx):
'''run talloc testsuite'''
cmd = os.path.join(Utils.g_module.blddir, 'talloc_testsuite')
ret = samba_utils.RUN_COMMAND(cmd)
print("testsuite returned %d" % ret)
- sys.exit(ret)
+ pyret = samba_utils.RUN_PYTHON_TESTS(['test_pytalloc.py'])
+ print("python testsuite returned %d" % pyret)
+ sys.exit(ret or pyret)
def dist():
'''makes a tarball for distribution'''