2 Samba Unix SMB/CIFS implementation.
4 Samba trivial allocation library - new interface
6 NOTE: Please read talloc_guide.txt for full documentation
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 inspired by http://swapped.cc/halloc/
36 #ifdef HAVE_SYS_AUXV_H
40 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
41 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
44 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
45 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
48 /* Special macros that are no-ops except when run under Valgrind on
49 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
50 #ifdef HAVE_VALGRIND_MEMCHECK_H
51 /* memcheck.h includes valgrind.h */
52 #include <valgrind/memcheck.h>
53 #elif defined(HAVE_VALGRIND_H)
57 /* use this to force every realloc to change the pointer, to stress test
58 code that might not cope */
59 #define ALWAYS_REALLOC 0
62 #define MAX_TALLOC_SIZE 0x10000000
64 #define TALLOC_FLAG_FREE 0x01
65 #define TALLOC_FLAG_LOOP 0x02
66 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
67 #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
70 * Bits above this are random, used to make it harder to fake talloc
71 * headers during an attack. Try not to change this without good reason.
73 #define TALLOC_FLAG_MASK 0x0F
75 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
77 #define TALLOC_MAGIC_BASE 0xe814ec70
78 #define TALLOC_MAGIC_NON_RANDOM ( \
79 ~TALLOC_FLAG_MASK & ( \
81 (TALLOC_BUILD_VERSION_MAJOR << 24) + \
82 (TALLOC_BUILD_VERSION_MINOR << 16) + \
83 (TALLOC_BUILD_VERSION_RELEASE << 8)))
84 static unsigned int talloc_magic = TALLOC_MAGIC_NON_RANDOM;
86 /* by default we abort when given a bad pointer (such as when talloc_free() is called
87 on a pointer that came from malloc() */
89 #define TALLOC_ABORT(reason) abort()
92 #ifndef discard_const_p
93 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
94 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
96 # define discard_const_p(type, ptr) ((type *)(ptr))
100 /* these macros gain us a few percent of speed on gcc */
102 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
103 as its first argument */
105 #define likely(x) __builtin_expect(!!(x), 1)
108 #define unlikely(x) __builtin_expect(!!(x), 0)
112 #define likely(x) (x)
115 #define unlikely(x) (x)
119 /* this null_context is only used if talloc_enable_leak_report() or
120 talloc_enable_leak_report_full() is called, otherwise it remains
123 static void *null_context;
124 static bool talloc_report_null;
125 static bool talloc_report_null_full;
126 static void *autofree_context;
128 static void talloc_setup_atexit(void);
130 /* used to enable fill of memory on free, which can be useful for
131 * catching use after free errors when valgrind is too slow
139 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
142 * do not wipe the header, to allow the
143 * double-free logic to still work
145 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
146 if (unlikely(talloc_fill.enabled)) { \
147 size_t _flen = (_tc)->size; \
148 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
149 memset(_fptr, talloc_fill.fill_value, _flen); \
153 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
154 /* Mark the whole chunk as not accessable */
155 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
156 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
157 char *_fptr = (char *)(_tc); \
158 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
161 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
164 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
165 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
166 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
169 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
170 if (unlikely(talloc_fill.enabled)) { \
171 size_t _flen = (_tc)->size - (_new_size); \
172 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
173 _fptr += (_new_size); \
174 memset(_fptr, talloc_fill.fill_value, _flen); \
178 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
179 /* Mark the unused bytes not accessable */
180 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
181 size_t _flen = (_tc)->size - (_new_size); \
182 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
183 _fptr += (_new_size); \
184 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
187 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
190 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
191 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
192 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
195 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
196 if (unlikely(talloc_fill.enabled)) { \
197 size_t _flen = (_tc)->size - (_new_size); \
198 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
199 _fptr += (_new_size); \
200 memset(_fptr, talloc_fill.fill_value, _flen); \
204 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
205 /* Mark the unused bytes as undefined */
206 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
207 size_t _flen = (_tc)->size - (_new_size); \
208 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
209 _fptr += (_new_size); \
210 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
213 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
216 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
217 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
218 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
221 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
222 /* Mark the new bytes as undefined */
223 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
224 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
225 size_t _new_used = TC_HDR_SIZE + (_new_size); \
226 size_t _flen = _new_used - _old_used; \
227 char *_fptr = _old_used + (char *)(_tc); \
228 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
231 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
234 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
235 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
238 struct talloc_reference_handle {
239 struct talloc_reference_handle *next, *prev;
241 const char *location;
244 struct talloc_memlimit {
245 struct talloc_chunk *parent;
246 struct talloc_memlimit *upper;
251 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
252 static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
254 static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
256 static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
258 static inline void _tc_set_name_const(struct talloc_chunk *tc,
260 static struct talloc_chunk *_vasprintf_tc(const void *t,
264 typedef int (*talloc_destructor_t)(void *);
266 struct talloc_pool_hdr;
268 struct talloc_chunk {
270 * flags includes the talloc magic, which is randomised to
271 * make overwrite attacks harder
276 * If you have a logical tree like:
282 * <child 1> <child 2> <child 3>
284 * The actual talloc tree is:
288 * <child 1> - <child 2> - <child 3>
290 * The children are linked with next/prev pointers, and
291 * child 1 is linked to the parent with parent/child
295 struct talloc_chunk *next, *prev;
296 struct talloc_chunk *parent, *child;
297 struct talloc_reference_handle *refs;
298 talloc_destructor_t destructor;
304 * if 'limit' is set it means all *new* children of the context will
305 * be limited to a total aggregate size ox max_size for memory
307 * cur_size is used to keep track of the current use
309 struct talloc_memlimit *limit;
312 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
313 * is a pointer to the struct talloc_chunk of the pool that it was
314 * allocated from. This way children can quickly find the pool to chew
317 struct talloc_pool_hdr *pool;
320 /* 16 byte alignment seems to keep everyone happy */
321 #define TC_ALIGN16(s) (((s)+15)&~15)
322 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
323 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
325 _PUBLIC_ int talloc_version_major(void)
327 return TALLOC_VERSION_MAJOR;
330 _PUBLIC_ int talloc_version_minor(void)
332 return TALLOC_VERSION_MINOR;
335 _PUBLIC_ int talloc_test_get_magic(void)
340 static inline void _talloc_chunk_set_free(struct talloc_chunk *tc,
341 const char *location)
344 * Mark this memory as free, and also over-stamp the talloc
345 * magic with the old-style magic.
347 * Why? This tries to avoid a memory read use-after-free from
348 * disclosing our talloc magic, which would then allow an
349 * attacker to prepare a valid header and so run a destructor.
352 tc->flags = TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE
353 | (tc->flags & TALLOC_FLAG_MASK);
355 /* we mark the freed memory with where we called the free
356 * from. This means on a double free error we can report where
357 * the first free came from
364 static inline void _talloc_chunk_set_not_free(struct talloc_chunk *tc)
367 * Mark this memory as not free.
369 * Why? This is memory either in a pool (and so available for
370 * talloc's re-use or after the realloc(). We need to mark
371 * the memory as free() before any realloc() call as we can't
372 * write to the memory after that.
374 * We put back the normal magic instead of the 'not random'
378 tc->flags = talloc_magic |
379 ((tc->flags & TALLOC_FLAG_MASK) & ~TALLOC_FLAG_FREE);
382 static void (*talloc_log_fn)(const char *message);
384 _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
386 talloc_log_fn = log_fn;
389 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
390 void talloc_lib_init(void) __attribute__((constructor));
391 void talloc_lib_init(void)
393 uint32_t random_value;
394 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
397 * Use the kernel-provided random values used for
398 * ASLR. This won't change per-exec, which is ideal for us
400 p = (uint8_t *) getauxval(AT_RANDOM);
403 * We get 16 bytes from getauxval. By calling rand(),
404 * a totally insecure PRNG, but one that will
405 * deterministically have a different value when called
406 * twice, we ensure that if two talloc-like libraries
407 * are somehow loaded in the same address space, that
408 * because we choose different bytes, we will keep the
409 * protection against collision of multiple talloc
412 * This protection is important because the effects of
413 * passing a talloc pointer from one to the other may
414 * be very hard to determine.
416 int offset = rand() % (16 - sizeof(random_value));
417 memcpy(&random_value, p + offset, sizeof(random_value));
422 * Otherwise, hope the location we are loaded in
423 * memory is randomised by someone else
425 random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
427 talloc_magic = random_value & ~TALLOC_FLAG_MASK;
430 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
433 #ifdef HAVE_DESTRUCTOR_ATTRIBUTE
434 void talloc_lib_fini(void) __attribute__((destructor));
435 void talloc_lib_fini(void)
436 #else /* ! HAVE_DESTRUCTOR_ATTRIBUTE */
437 static void talloc_lib_fini(void)
438 #endif /* ! HAVE_DESTRUCTOR_ATTRIBUTE */
440 TALLOC_FREE(autofree_context);
442 if (talloc_total_size(null_context) == 0) {
446 if (talloc_report_null_full) {
447 talloc_report_full(null_context, stderr);
448 } else if (talloc_report_null) {
449 talloc_report(null_context, stderr);
453 static void talloc_setup_atexit(void)
455 #ifndef HAVE_DESTRUCTOR_ATTRIBUTE
462 #warning "No __attribute__((destructor)) support found on this platform, using atexit"
463 atexit(talloc_lib_fini);
465 #endif /* ! HAVE_DESTRUCTOR_ATTRIBUTE */
468 static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
469 static void talloc_log(const char *fmt, ...)
474 if (!talloc_log_fn) {
479 message = talloc_vasprintf(NULL, fmt, ap);
482 talloc_log_fn(message);
483 talloc_free(message);
486 static void talloc_log_stderr(const char *message)
488 fprintf(stderr, "%s", message);
491 _PUBLIC_ void talloc_set_log_stderr(void)
493 talloc_set_log_fn(talloc_log_stderr);
496 static void (*talloc_abort_fn)(const char *reason);
498 _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
500 talloc_abort_fn = abort_fn;
503 static void talloc_abort(const char *reason)
505 talloc_log("%s\n", reason);
507 if (!talloc_abort_fn) {
508 TALLOC_ABORT(reason);
511 talloc_abort_fn(reason);
514 static void talloc_abort_access_after_free(void)
516 talloc_abort("Bad talloc magic value - access after free");
519 static void talloc_abort_unknown_value(void)
521 talloc_abort("Bad talloc magic value - unknown value");
524 /* panic if we get a bad magic value */
525 static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
527 const char *pp = (const char *)ptr;
528 struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
529 if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
530 if ((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK))
531 == (TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE)) {
532 talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
533 talloc_abort_access_after_free();
537 talloc_abort_unknown_value();
543 /* hook into the front of the list */
544 #define _TLIST_ADD(list, p) \
548 (p)->next = (p)->prev = NULL; \
550 (list)->prev = (p); \
551 (p)->next = (list); \
557 /* remove an element from a list - element doesn't have to be in list. */
558 #define _TLIST_REMOVE(list, p) \
560 if ((p) == (list)) { \
561 (list) = (p)->next; \
562 if (list) (list)->prev = NULL; \
564 if ((p)->prev) (p)->prev->next = (p)->next; \
565 if ((p)->next) (p)->next->prev = (p)->prev; \
567 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
572 return the parent chunk of a pointer
574 static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
576 struct talloc_chunk *tc;
578 if (unlikely(ptr == NULL)) {
582 tc = talloc_chunk_from_ptr(ptr);
583 while (tc->prev) tc=tc->prev;
588 _PUBLIC_ void *talloc_parent(const void *ptr)
590 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
591 return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
597 _PUBLIC_ const char *talloc_parent_name(const void *ptr)
599 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
600 return tc? tc->name : NULL;
604 A pool carries an in-pool object count count in the first 16 bytes.
605 bytes. This is done to support talloc_steal() to a parent outside of the
606 pool. The count includes the pool itself, so a talloc_free() on a pool will
607 only destroy the pool if the count has dropped to zero. A talloc_free() of a
608 pool member will reduce the count, and eventually also call free(3) on the
611 The object count is not put into "struct talloc_chunk" because it is only
612 relevant for talloc pools and the alignment to 16 bytes would increase the
613 memory footprint of each talloc chunk by those 16 bytes.
616 struct talloc_pool_hdr {
618 unsigned int object_count;
622 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
624 static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
626 return (struct talloc_pool_hdr *)((char *)c - TP_HDR_SIZE);
629 static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
631 return (struct talloc_chunk *)((char *)h + TP_HDR_SIZE);
634 static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
636 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
637 return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
640 static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
642 return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
645 /* If tc is inside a pool, this gives the next neighbour. */
646 static inline void *tc_next_chunk(struct talloc_chunk *tc)
648 return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
651 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
653 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
654 return tc_next_chunk(tc);
657 /* Mark the whole remaining pool as not accessable */
658 static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
660 size_t flen = tc_pool_space_left(pool_hdr);
662 if (unlikely(talloc_fill.enabled)) {
663 memset(pool_hdr->end, talloc_fill.fill_value, flen);
666 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
667 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
675 static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
676 size_t size, size_t prefix_len)
678 struct talloc_pool_hdr *pool_hdr = NULL;
680 struct talloc_chunk *result;
683 if (parent == NULL) {
687 if (parent->flags & TALLOC_FLAG_POOL) {
688 pool_hdr = talloc_pool_from_chunk(parent);
690 else if (parent->flags & TALLOC_FLAG_POOLMEM) {
691 pool_hdr = parent->pool;
694 if (pool_hdr == NULL) {
698 space_left = tc_pool_space_left(pool_hdr);
701 * Align size to 16 bytes
703 chunk_size = TC_ALIGN16(size + prefix_len);
705 if (space_left < chunk_size) {
709 result = (struct talloc_chunk *)((char *)pool_hdr->end + prefix_len);
711 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
712 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
715 pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
717 result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
718 result->pool = pool_hdr;
720 pool_hdr->object_count++;
726 Allocate a bit of memory as a child of an existing pointer
728 static inline void *__talloc_with_prefix(const void *context,
731 struct talloc_chunk **tc_ret)
733 struct talloc_chunk *tc = NULL;
734 struct talloc_memlimit *limit = NULL;
735 size_t total_len = TC_HDR_SIZE + size + prefix_len;
736 struct talloc_chunk *parent = NULL;
738 if (unlikely(context == NULL)) {
739 context = null_context;
742 if (unlikely(size >= MAX_TALLOC_SIZE)) {
746 if (unlikely(total_len < TC_HDR_SIZE)) {
750 if (likely(context != NULL)) {
751 parent = talloc_chunk_from_ptr(context);
753 if (parent->limit != NULL) {
754 limit = parent->limit;
757 tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len);
764 * Only do the memlimit check/update on actual allocation.
766 if (!talloc_memlimit_check(limit, total_len)) {
771 ptr = malloc(total_len);
772 if (unlikely(ptr == NULL)) {
775 tc = (struct talloc_chunk *)(ptr + prefix_len);
776 tc->flags = talloc_magic;
779 talloc_memlimit_grow(limit, total_len);
784 tc->destructor = NULL;
789 if (likely(context != NULL)) {
791 parent->child->parent = NULL;
792 tc->next = parent->child;
801 tc->next = tc->prev = tc->parent = NULL;
805 return TC_PTR_FROM_CHUNK(tc);
808 static inline void *__talloc(const void *context,
810 struct talloc_chunk **tc)
812 return __talloc_with_prefix(context, size, 0, tc);
816 * Create a talloc pool
819 static inline void *_talloc_pool(const void *context, size_t size)
821 struct talloc_chunk *tc;
822 struct talloc_pool_hdr *pool_hdr;
825 result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
827 if (unlikely(result == NULL)) {
831 pool_hdr = talloc_pool_from_chunk(tc);
833 tc->flags |= TALLOC_FLAG_POOL;
836 pool_hdr->object_count = 1;
837 pool_hdr->end = result;
838 pool_hdr->poolsize = size;
840 tc_invalidate_pool(pool_hdr);
845 _PUBLIC_ void *talloc_pool(const void *context, size_t size)
847 return _talloc_pool(context, size);
851 * Create a talloc pool correctly sized for a basic size plus
852 * a number of subobjects whose total size is given. Essentially
853 * a custom allocator for talloc to reduce fragmentation.
856 _PUBLIC_ void *_talloc_pooled_object(const void *ctx,
858 const char *type_name,
859 unsigned num_subobjects,
860 size_t total_subobjects_size)
862 size_t poolsize, subobjects_slack, tmp;
863 struct talloc_chunk *tc;
864 struct talloc_pool_hdr *pool_hdr;
867 poolsize = type_size + total_subobjects_size;
869 if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
873 if (num_subobjects == UINT_MAX) {
876 num_subobjects += 1; /* the object body itself */
879 * Alignment can increase the pool size by at most 15 bytes per object
880 * plus alignment for the object itself
882 subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
883 if (subobjects_slack < num_subobjects) {
887 tmp = poolsize + subobjects_slack;
888 if ((tmp < poolsize) || (tmp < subobjects_slack)) {
893 ret = _talloc_pool(ctx, poolsize);
898 tc = talloc_chunk_from_ptr(ret);
899 tc->size = type_size;
901 pool_hdr = talloc_pool_from_chunk(tc);
903 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
904 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
907 pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
909 _tc_set_name_const(tc, type_name);
917 setup a destructor to be called on free of a pointer
918 the destructor should return 0 on success, or -1 on failure.
919 if the destructor fails then the free is failed, and the memory can
920 be continued to be used
922 _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
924 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
925 tc->destructor = destructor;
929 increase the reference count on a piece of memory.
931 _PUBLIC_ int talloc_increase_ref_count(const void *ptr)
933 if (unlikely(!talloc_reference(null_context, ptr))) {
940 helper for talloc_reference()
942 this is referenced by a function pointer and should not be inline
944 static int talloc_reference_destructor(struct talloc_reference_handle *handle)
946 struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
947 _TLIST_REMOVE(ptr_tc->refs, handle);
952 more efficient way to add a name to a pointer - the name must point to a
955 static inline void _tc_set_name_const(struct talloc_chunk *tc,
962 internal talloc_named_const()
964 static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
967 struct talloc_chunk *tc;
969 ptr = __talloc(context, size, &tc);
970 if (unlikely(ptr == NULL)) {
974 _tc_set_name_const(tc, name);
980 make a secondary reference to a pointer, hanging off the given context.
981 the pointer remains valid until both the original caller and this given
984 the major use for this is when two different structures need to reference the
985 same underlying data, and you want to be able to free the two instances separately,
988 _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
990 struct talloc_chunk *tc;
991 struct talloc_reference_handle *handle;
992 if (unlikely(ptr == NULL)) return NULL;
994 tc = talloc_chunk_from_ptr(ptr);
995 handle = (struct talloc_reference_handle *)_talloc_named_const(context,
996 sizeof(struct talloc_reference_handle),
997 TALLOC_MAGIC_REFERENCE);
998 if (unlikely(handle == NULL)) return NULL;
1000 /* note that we hang the destructor off the handle, not the
1001 main context as that allows the caller to still setup their
1002 own destructor on the context if they want to */
1003 talloc_set_destructor(handle, talloc_reference_destructor);
1004 handle->ptr = discard_const_p(void, ptr);
1005 handle->location = location;
1006 _TLIST_ADD(tc->refs, handle);
1010 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
1012 static inline void _tc_free_poolmem(struct talloc_chunk *tc,
1013 const char *location)
1015 struct talloc_pool_hdr *pool;
1016 struct talloc_chunk *pool_tc;
1020 pool_tc = talloc_chunk_from_pool(pool);
1021 next_tc = tc_next_chunk(tc);
1023 _talloc_chunk_set_free(tc, location);
1025 TC_INVALIDATE_FULL_CHUNK(tc);
1027 if (unlikely(pool->object_count == 0)) {
1028 talloc_abort("Pool object count zero!");
1032 pool->object_count--;
1034 if (unlikely(pool->object_count == 1
1035 && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
1037 * if there is just one object left in the pool
1038 * and pool->flags does not have TALLOC_FLAG_FREE,
1039 * it means this is the pool itself and
1040 * the rest is available for new objects
1043 pool->end = tc_pool_first_chunk(pool);
1044 tc_invalidate_pool(pool);
1048 if (unlikely(pool->object_count == 0)) {
1050 * we mark the freed memory with where we called the free
1051 * from. This means on a double free error we can report where
1052 * the first free came from
1054 pool_tc->name = location;
1056 if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
1057 _tc_free_poolmem(pool_tc, location);
1060 * The tc_memlimit_update_on_free()
1061 * call takes into account the
1062 * prefix TP_HDR_SIZE allocated before
1063 * the pool talloc_chunk.
1065 tc_memlimit_update_on_free(pool_tc);
1066 TC_INVALIDATE_FULL_CHUNK(pool_tc);
1072 if (pool->end == next_tc) {
1074 * if pool->pool still points to end of
1075 * 'tc' (which is stored in the 'next_tc' variable),
1076 * we can reclaim the memory of 'tc'.
1083 * Do nothing. The memory is just "wasted", waiting for the pool
1084 * itself to be freed.
1088 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1090 const char *location);
1092 static inline int _talloc_free_internal(void *ptr, const char *location);
1095 internal free call that takes a struct talloc_chunk *.
1097 static inline int _tc_free_internal(struct talloc_chunk *tc,
1098 const char *location)
1101 void *ptr = TC_PTR_FROM_CHUNK(tc);
1103 if (unlikely(tc->refs)) {
1105 /* check if this is a reference from a child or
1106 * grandchild back to it's parent or grandparent
1108 * in that case we need to remove the reference and
1109 * call another instance of talloc_free() on the current
1112 is_child = talloc_is_parent(tc->refs, ptr);
1113 _talloc_free_internal(tc->refs, location);
1115 return _talloc_free_internal(ptr, location);
1120 if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1121 /* we have a free loop - stop looping */
1125 if (unlikely(tc->destructor)) {
1126 talloc_destructor_t d = tc->destructor;
1129 * Protect the destructor against some overwrite
1130 * attacks, by explicitly checking it has the right
1133 if (talloc_chunk_from_ptr(ptr) != tc) {
1135 * This can't actually happen, the
1136 * call itself will panic.
1138 TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1141 if (d == (talloc_destructor_t)-1) {
1144 tc->destructor = (talloc_destructor_t)-1;
1147 * Only replace the destructor pointer if
1148 * calling the destructor didn't modify it.
1150 if (tc->destructor == (talloc_destructor_t)-1) {
1155 tc->destructor = NULL;
1159 _TLIST_REMOVE(tc->parent->child, tc);
1160 if (tc->parent->child) {
1161 tc->parent->child->parent = tc->parent;
1164 if (tc->prev) tc->prev->next = tc->next;
1165 if (tc->next) tc->next->prev = tc->prev;
1166 tc->prev = tc->next = NULL;
1169 tc->flags |= TALLOC_FLAG_LOOP;
1171 _tc_free_children_internal(tc, ptr, location);
1173 _talloc_chunk_set_free(tc, location);
1175 if (tc->flags & TALLOC_FLAG_POOL) {
1176 struct talloc_pool_hdr *pool;
1178 pool = talloc_pool_from_chunk(tc);
1180 if (unlikely(pool->object_count == 0)) {
1181 talloc_abort("Pool object count zero!");
1185 pool->object_count--;
1187 if (likely(pool->object_count != 0)) {
1192 * With object_count==0, a pool becomes a normal piece of
1193 * memory to free. If it's allocated inside a pool, it needs
1194 * to be freed as poolmem, else it needs to be just freed.
1201 if (tc->flags & TALLOC_FLAG_POOLMEM) {
1202 _tc_free_poolmem(tc, location);
1206 tc_memlimit_update_on_free(tc);
1208 TC_INVALIDATE_FULL_CHUNK(tc);
1214 internal talloc_free call
1216 static inline int _talloc_free_internal(void *ptr, const char *location)
1218 struct talloc_chunk *tc;
1220 if (unlikely(ptr == NULL)) {
1224 /* possibly initialised the talloc fill value */
1225 if (unlikely(!talloc_fill.initialised)) {
1226 const char *fill = getenv(TALLOC_FILL_ENV);
1228 talloc_fill.enabled = true;
1229 talloc_fill.fill_value = strtoul(fill, NULL, 0);
1231 talloc_fill.initialised = true;
1234 tc = talloc_chunk_from_ptr(ptr);
1235 return _tc_free_internal(tc, location);
1238 static inline size_t _talloc_total_limit_size(const void *ptr,
1239 struct talloc_memlimit *old_limit,
1240 struct talloc_memlimit *new_limit);
1243 move a lump of memory from one talloc context to another return the
1244 ptr on success, or NULL if it could not be transferred.
1245 passing NULL as ptr will always return NULL with no side effects.
1247 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1249 struct talloc_chunk *tc, *new_tc;
1250 size_t ctx_size = 0;
1252 if (unlikely(!ptr)) {
1256 if (unlikely(new_ctx == NULL)) {
1257 new_ctx = null_context;
1260 tc = talloc_chunk_from_ptr(ptr);
1262 if (tc->limit != NULL) {
1264 ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1266 /* Decrement the memory limit from the source .. */
1267 talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1269 if (tc->limit->parent == tc) {
1270 tc->limit->upper = NULL;
1276 if (unlikely(new_ctx == NULL)) {
1278 _TLIST_REMOVE(tc->parent->child, tc);
1279 if (tc->parent->child) {
1280 tc->parent->child->parent = tc->parent;
1283 if (tc->prev) tc->prev->next = tc->next;
1284 if (tc->next) tc->next->prev = tc->prev;
1287 tc->parent = tc->next = tc->prev = NULL;
1288 return discard_const_p(void, ptr);
1291 new_tc = talloc_chunk_from_ptr(new_ctx);
1293 if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1294 return discard_const_p(void, ptr);
1298 _TLIST_REMOVE(tc->parent->child, tc);
1299 if (tc->parent->child) {
1300 tc->parent->child->parent = tc->parent;
1303 if (tc->prev) tc->prev->next = tc->next;
1304 if (tc->next) tc->next->prev = tc->prev;
1305 tc->prev = tc->next = NULL;
1308 tc->parent = new_tc;
1309 if (new_tc->child) new_tc->child->parent = NULL;
1310 _TLIST_ADD(new_tc->child, tc);
1312 if (tc->limit || new_tc->limit) {
1313 ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1315 /* .. and increment it in the destination. */
1316 if (new_tc->limit) {
1317 talloc_memlimit_grow(new_tc->limit, ctx_size);
1321 return discard_const_p(void, ptr);
1325 move a lump of memory from one talloc context to another return the
1326 ptr on success, or NULL if it could not be transferred.
1327 passing NULL as ptr will always return NULL with no side effects.
1329 _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1331 struct talloc_chunk *tc;
1333 if (unlikely(ptr == NULL)) {
1337 tc = talloc_chunk_from_ptr(ptr);
1339 if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1340 struct talloc_reference_handle *h;
1342 talloc_log("WARNING: talloc_steal with references at %s\n",
1345 for (h=tc->refs; h; h=h->next) {
1346 talloc_log("\treference at %s\n",
1352 /* this test is probably too expensive to have on in the
1353 normal build, but it useful for debugging */
1354 if (talloc_is_parent(new_ctx, ptr)) {
1355 talloc_log("WARNING: stealing into talloc child at %s\n", location);
1359 return _talloc_steal_internal(new_ctx, ptr);
1363 this is like a talloc_steal(), but you must supply the old
1364 parent. This resolves the ambiguity in a talloc_steal() which is
1365 called on a context that has more than one parent (via references)
1367 The old parent can be either a reference or a parent
1369 _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1371 struct talloc_chunk *tc;
1372 struct talloc_reference_handle *h;
1374 if (unlikely(ptr == NULL)) {
1378 if (old_parent == talloc_parent(ptr)) {
1379 return _talloc_steal_internal(new_parent, ptr);
1382 tc = talloc_chunk_from_ptr(ptr);
1383 for (h=tc->refs;h;h=h->next) {
1384 if (talloc_parent(h) == old_parent) {
1385 if (_talloc_steal_internal(new_parent, h) != h) {
1388 return discard_const_p(void, ptr);
1392 /* it wasn't a parent */
1397 remove a secondary reference to a pointer. This undo's what
1398 talloc_reference() has done. The context and pointer arguments
1399 must match those given to a talloc_reference()
1401 static inline int talloc_unreference(const void *context, const void *ptr)
1403 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1404 struct talloc_reference_handle *h;
1406 if (unlikely(context == NULL)) {
1407 context = null_context;
1410 for (h=tc->refs;h;h=h->next) {
1411 struct talloc_chunk *p = talloc_parent_chunk(h);
1413 if (context == NULL) break;
1414 } else if (TC_PTR_FROM_CHUNK(p) == context) {
1422 return _talloc_free_internal(h, __location__);
1426 remove a specific parent context from a pointer. This is a more
1427 controlled variant of talloc_free()
1429 _PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1431 struct talloc_chunk *tc_p, *new_p, *tc_c;
1438 if (context == NULL) {
1439 context = null_context;
1442 if (talloc_unreference(context, ptr) == 0) {
1446 if (context != NULL) {
1447 tc_c = talloc_chunk_from_ptr(context);
1451 if (tc_c != talloc_parent_chunk(ptr)) {
1455 tc_p = talloc_chunk_from_ptr(ptr);
1457 if (tc_p->refs == NULL) {
1458 return _talloc_free_internal(ptr, __location__);
1461 new_p = talloc_parent_chunk(tc_p->refs);
1463 new_parent = TC_PTR_FROM_CHUNK(new_p);
1468 if (talloc_unreference(new_parent, ptr) != 0) {
1472 _talloc_steal_internal(new_parent, ptr);
1478 add a name to an existing pointer - va_list version
1480 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1482 va_list ap) PRINTF_ATTRIBUTE(2,0);
1484 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1488 struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc),
1491 if (likely(name_tc)) {
1492 tc->name = TC_PTR_FROM_CHUNK(name_tc);
1493 _tc_set_name_const(name_tc, ".name");
1501 add a name to an existing pointer
1503 _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1505 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1509 name = tc_set_name_v(tc, fmt, ap);
1516 create a named talloc pointer. Any talloc pointer can be named, and
1517 talloc_named() operates just like talloc() except that it allows you
1518 to name the pointer.
1520 _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1525 struct talloc_chunk *tc;
1527 ptr = __talloc(context, size, &tc);
1528 if (unlikely(ptr == NULL)) return NULL;
1531 name = tc_set_name_v(tc, fmt, ap);
1534 if (unlikely(name == NULL)) {
1535 _talloc_free_internal(ptr, __location__);
1543 return the name of a talloc ptr, or "UNNAMED"
1545 static inline const char *__talloc_get_name(const void *ptr)
1547 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1548 if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1549 return ".reference";
1551 if (likely(tc->name)) {
1557 _PUBLIC_ const char *talloc_get_name(const void *ptr)
1559 return __talloc_get_name(ptr);
1563 check if a pointer has the given name. If it does, return the pointer,
1564 otherwise return NULL
1566 _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1569 if (unlikely(ptr == NULL)) return NULL;
1570 pname = __talloc_get_name(ptr);
1571 if (likely(pname == name || strcmp(pname, name) == 0)) {
1572 return discard_const_p(void, ptr);
1577 static void talloc_abort_type_mismatch(const char *location,
1579 const char *expected)
1583 reason = talloc_asprintf(NULL,
1584 "%s: Type mismatch: name[%s] expected[%s]",
1589 reason = "Type mismatch";
1592 talloc_abort(reason);
1595 _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1599 if (unlikely(ptr == NULL)) {
1600 talloc_abort_type_mismatch(location, NULL, name);
1604 pname = __talloc_get_name(ptr);
1605 if (likely(pname == name || strcmp(pname, name) == 0)) {
1606 return discard_const_p(void, ptr);
1609 talloc_abort_type_mismatch(location, pname, name);
1614 this is for compatibility with older versions of talloc
1616 _PUBLIC_ void *talloc_init(const char *fmt, ...)
1621 struct talloc_chunk *tc;
1623 ptr = __talloc(NULL, 0, &tc);
1624 if (unlikely(ptr == NULL)) return NULL;
1627 name = tc_set_name_v(tc, fmt, ap);
1630 if (unlikely(name == NULL)) {
1631 _talloc_free_internal(ptr, __location__);
1638 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1640 const char *location)
1643 /* we need to work out who will own an abandoned child
1644 if it cannot be freed. In priority order, the first
1645 choice is owner of any remaining reference to this
1646 pointer, the second choice is our parent, and the
1647 final choice is the null context. */
1648 void *child = TC_PTR_FROM_CHUNK(tc->child);
1649 const void *new_parent = null_context;
1650 if (unlikely(tc->child->refs)) {
1651 struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1652 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1654 if (unlikely(_tc_free_internal(tc->child, location) == -1)) {
1655 if (talloc_parent_chunk(child) != tc) {
1657 * Destructor already reparented this child.
1658 * No further reparenting needed.
1662 if (new_parent == null_context) {
1663 struct talloc_chunk *p = talloc_parent_chunk(ptr);
1664 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1666 _talloc_steal_internal(new_parent, child);
1672 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1673 should probably not be used in new code. It's in here to keep the talloc
1674 code consistent across Samba 3 and 4.
1676 _PUBLIC_ void talloc_free_children(void *ptr)
1678 struct talloc_chunk *tc_name = NULL;
1679 struct talloc_chunk *tc;
1681 if (unlikely(ptr == NULL)) {
1685 tc = talloc_chunk_from_ptr(ptr);
1687 /* we do not want to free the context name if it is a child .. */
1688 if (likely(tc->child)) {
1689 for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1690 if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1693 _TLIST_REMOVE(tc->child, tc_name);
1695 tc->child->parent = tc;
1700 _tc_free_children_internal(tc, ptr, __location__);
1702 /* .. so we put it back after all other children have been freed */
1705 tc->child->parent = NULL;
1707 tc_name->parent = tc;
1708 _TLIST_ADD(tc->child, tc_name);
1713 Allocate a bit of memory as a child of an existing pointer
1715 _PUBLIC_ void *_talloc(const void *context, size_t size)
1717 struct talloc_chunk *tc;
1718 return __talloc(context, size, &tc);
1722 externally callable talloc_set_name_const()
1724 _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1726 _tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1730 create a named talloc pointer. Any talloc pointer can be named, and
1731 talloc_named() operates just like talloc() except that it allows you
1732 to name the pointer.
1734 _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1736 return _talloc_named_const(context, size, name);
1740 free a talloc pointer. This also frees all child pointers of this
1743 return 0 if the memory is actually freed, otherwise -1. The memory
1744 will not be freed if the ref_count is > 1 or the destructor (if
1745 any) returns non-zero
1747 _PUBLIC_ int _talloc_free(void *ptr, const char *location)
1749 struct talloc_chunk *tc;
1751 if (unlikely(ptr == NULL)) {
1755 tc = talloc_chunk_from_ptr(ptr);
1757 if (unlikely(tc->refs != NULL)) {
1758 struct talloc_reference_handle *h;
1760 if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1761 /* in this case we do know which parent should
1762 get this pointer, as there is really only
1764 return talloc_unlink(null_context, ptr);
1767 talloc_log("ERROR: talloc_free with references at %s\n",
1770 for (h=tc->refs; h; h=h->next) {
1771 talloc_log("\treference at %s\n",
1777 return _talloc_free_internal(ptr, location);
1783 A talloc version of realloc. The context argument is only used if
1786 _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1788 struct talloc_chunk *tc;
1790 bool malloced = false;
1791 struct talloc_pool_hdr *pool_hdr = NULL;
1792 size_t old_size = 0;
1793 size_t new_size = 0;
1795 /* size zero is equivalent to free() */
1796 if (unlikely(size == 0)) {
1797 talloc_unlink(context, ptr);
1801 if (unlikely(size >= MAX_TALLOC_SIZE)) {
1805 /* realloc(NULL) is equivalent to malloc() */
1807 return _talloc_named_const(context, size, name);
1810 tc = talloc_chunk_from_ptr(ptr);
1812 /* don't allow realloc on referenced pointers */
1813 if (unlikely(tc->refs)) {
1817 /* don't let anybody try to realloc a talloc_pool */
1818 if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1822 if (tc->limit && (size > tc->size)) {
1823 if (!talloc_memlimit_check(tc->limit, (size - tc->size))) {
1829 /* handle realloc inside a talloc_pool */
1830 if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1831 pool_hdr = tc->pool;
1834 #if (ALWAYS_REALLOC == 0)
1835 /* don't shrink if we have less than 1k to gain */
1836 if (size < tc->size && tc->limit == NULL) {
1838 void *next_tc = tc_next_chunk(tc);
1839 TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1841 if (next_tc == pool_hdr->end) {
1842 /* note: tc->size has changed, so this works */
1843 pool_hdr->end = tc_next_chunk(tc);
1846 } else if ((tc->size - size) < 1024) {
1848 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1849 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1850 * after each realloc call, which slows down
1851 * testing a lot :-(.
1853 * That is why we only mark memory as undefined here.
1855 TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1857 /* do not shrink if we have less than 1k to gain */
1861 } else if (tc->size == size) {
1863 * do not change the pointer if it is exactly
1871 * by resetting magic we catch users of the old memory
1873 * We mark this memory as free, and also over-stamp the talloc
1874 * magic with the old-style magic.
1876 * Why? This tries to avoid a memory read use-after-free from
1877 * disclosing our talloc magic, which would then allow an
1878 * attacker to prepare a valid header and so run a destructor.
1880 * What else? We have to re-stamp back a valid normal magic
1881 * on this memory once realloc() is done, as it will have done
1882 * a memcpy() into the new valid memory. We can't do this in
1883 * reverse as that would be a real use-after-free.
1885 _talloc_chunk_set_free(tc, NULL);
1889 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1890 pool_hdr->object_count--;
1892 if (new_ptr == NULL) {
1893 new_ptr = malloc(TC_HDR_SIZE+size);
1899 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1900 TC_INVALIDATE_FULL_CHUNK(tc);
1903 /* We're doing malloc then free here, so record the difference. */
1904 old_size = tc->size;
1906 new_ptr = malloc(size + TC_HDR_SIZE);
1908 memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
1914 struct talloc_chunk *pool_tc;
1915 void *next_tc = tc_next_chunk(tc);
1916 size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1917 size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1918 size_t space_needed;
1920 unsigned int chunk_count = pool_hdr->object_count;
1922 pool_tc = talloc_chunk_from_pool(pool_hdr);
1923 if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1927 if (chunk_count == 1) {
1929 * optimize for the case where 'tc' is the only
1930 * chunk in the pool.
1932 char *start = tc_pool_first_chunk(pool_hdr);
1933 space_needed = new_chunk_size;
1934 space_left = (char *)tc_pool_end(pool_hdr) - start;
1936 if (space_left >= space_needed) {
1937 size_t old_used = TC_HDR_SIZE + tc->size;
1938 size_t new_used = TC_HDR_SIZE + size;
1941 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1945 * start -> tc may have
1946 * been freed and thus been marked as
1947 * VALGRIND_MEM_NOACCESS. Set it to
1948 * VALGRIND_MEM_UNDEFINED so we can
1949 * copy into it without valgrind errors.
1950 * We can't just mark
1951 * new_ptr -> new_ptr + old_used
1952 * as this may overlap on top of tc,
1953 * (which is why we use memmove, not
1954 * memcpy below) hence the MIN.
1956 size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1957 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1961 memmove(new_ptr, tc, old_used);
1963 tc = (struct talloc_chunk *)new_ptr;
1964 TC_UNDEFINE_GROW_CHUNK(tc, size);
1967 * first we do not align the pool pointer
1968 * because we want to invalidate the padding
1971 pool_hdr->end = new_used + (char *)new_ptr;
1972 tc_invalidate_pool(pool_hdr);
1974 /* now the aligned pointer */
1975 pool_hdr->end = new_chunk_size + (char *)new_ptr;
1982 if (new_chunk_size == old_chunk_size) {
1983 TC_UNDEFINE_GROW_CHUNK(tc, size);
1984 _talloc_chunk_set_not_free(tc);
1989 if (next_tc == pool_hdr->end) {
1991 * optimize for the case where 'tc' is the last
1992 * chunk in the pool.
1994 space_needed = new_chunk_size - old_chunk_size;
1995 space_left = tc_pool_space_left(pool_hdr);
1997 if (space_left >= space_needed) {
1998 TC_UNDEFINE_GROW_CHUNK(tc, size);
1999 _talloc_chunk_set_not_free(tc);
2001 pool_hdr->end = tc_next_chunk(tc);
2006 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
2008 if (new_ptr == NULL) {
2009 new_ptr = malloc(TC_HDR_SIZE+size);
2015 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
2017 _tc_free_poolmem(tc, __location__ "_talloc_realloc");
2021 /* We're doing realloc here, so record the difference. */
2022 old_size = tc->size;
2024 new_ptr = realloc(tc, size + TC_HDR_SIZE);
2028 if (unlikely(!new_ptr)) {
2030 * Ok, this is a strange spot. We have to put back
2031 * the old talloc_magic and any flags, except the
2032 * TALLOC_FLAG_FREE as this was not free'ed by the
2033 * realloc() call after all
2035 _talloc_chunk_set_not_free(tc);
2040 * tc is now the new value from realloc(), the old memory we
2041 * can't access any more and was preemptively marked as
2042 * TALLOC_FLAG_FREE before the call. Now we mark it as not
2045 tc = (struct talloc_chunk *)new_ptr;
2046 _talloc_chunk_set_not_free(tc);
2048 tc->flags &= ~TALLOC_FLAG_POOLMEM;
2051 tc->parent->child = tc;
2054 tc->child->parent = tc;
2058 tc->prev->next = tc;
2061 tc->next->prev = tc;
2064 if (new_size > old_size) {
2065 talloc_memlimit_grow(tc->limit, new_size - old_size);
2066 } else if (new_size < old_size) {
2067 talloc_memlimit_shrink(tc->limit, old_size - new_size);
2071 _tc_set_name_const(tc, name);
2073 return TC_PTR_FROM_CHUNK(tc);
2077 a wrapper around talloc_steal() for situations where you are moving a pointer
2078 between two structures, and want the old pointer to be set to NULL
2080 _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
2082 const void **pptr = discard_const_p(const void *,_pptr);
2083 void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
2088 enum talloc_mem_count_type {
2094 static inline size_t _talloc_total_mem_internal(const void *ptr,
2095 enum talloc_mem_count_type type,
2096 struct talloc_memlimit *old_limit,
2097 struct talloc_memlimit *new_limit)
2100 struct talloc_chunk *c, *tc;
2109 tc = talloc_chunk_from_ptr(ptr);
2111 if (old_limit || new_limit) {
2112 if (tc->limit && tc->limit->upper == old_limit) {
2113 tc->limit->upper = new_limit;
2117 /* optimize in the memlimits case */
2118 if (type == TOTAL_MEM_LIMIT &&
2119 tc->limit != NULL &&
2120 tc->limit != old_limit &&
2121 tc->limit->parent == tc) {
2122 return tc->limit->cur_size;
2125 if (tc->flags & TALLOC_FLAG_LOOP) {
2129 tc->flags |= TALLOC_FLAG_LOOP;
2131 if (old_limit || new_limit) {
2132 if (old_limit == tc->limit) {
2133 tc->limit = new_limit;
2138 case TOTAL_MEM_SIZE:
2139 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2143 case TOTAL_MEM_BLOCKS:
2146 case TOTAL_MEM_LIMIT:
2147 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2149 * Don't count memory allocated from a pool
2150 * when calculating limits. Only count the
2153 if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2154 if (tc->flags & TALLOC_FLAG_POOL) {
2156 * If this is a pool, the allocated
2157 * size is in the pool header, and
2158 * remember to add in the prefix
2161 struct talloc_pool_hdr *pool_hdr
2162 = talloc_pool_from_chunk(tc);
2163 total = pool_hdr->poolsize +
2167 total = tc->size + TC_HDR_SIZE;
2173 for (c = tc->child; c; c = c->next) {
2174 total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2175 old_limit, new_limit);
2178 tc->flags &= ~TALLOC_FLAG_LOOP;
2184 return the total size of a talloc pool (subtree)
2186 _PUBLIC_ size_t talloc_total_size(const void *ptr)
2188 return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2192 return the total number of blocks in a talloc pool (subtree)
2194 _PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2196 return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2200 return the number of external references to a pointer
2202 _PUBLIC_ size_t talloc_reference_count(const void *ptr)
2204 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2205 struct talloc_reference_handle *h;
2208 for (h=tc->refs;h;h=h->next) {
2215 report on memory usage by all children of a pointer, giving a full tree view
2217 _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2218 void (*callback)(const void *ptr,
2219 int depth, int max_depth,
2221 void *private_data),
2224 struct talloc_chunk *c, *tc;
2229 if (ptr == NULL) return;
2231 tc = talloc_chunk_from_ptr(ptr);
2233 if (tc->flags & TALLOC_FLAG_LOOP) {
2237 callback(ptr, depth, max_depth, 0, private_data);
2239 if (max_depth >= 0 && depth >= max_depth) {
2243 tc->flags |= TALLOC_FLAG_LOOP;
2244 for (c=tc->child;c;c=c->next) {
2245 if (c->name == TALLOC_MAGIC_REFERENCE) {
2246 struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2247 callback(h->ptr, depth + 1, max_depth, 1, private_data);
2249 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2252 tc->flags &= ~TALLOC_FLAG_LOOP;
2255 static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2257 const char *name = __talloc_get_name(ptr);
2258 struct talloc_chunk *tc;
2259 FILE *f = (FILE *)_f;
2262 fprintf(f, "%*sreference to: %s\n", depth*4, "", name);
2266 tc = talloc_chunk_from_ptr(ptr);
2267 if (tc->limit && tc->limit->parent == tc) {
2268 fprintf(f, "%*s%-30s is a memlimit context"
2269 " (max_size = %lu bytes, cur_size = %lu bytes)\n",
2272 (unsigned long)tc->limit->max_size,
2273 (unsigned long)tc->limit->cur_size);
2277 fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2278 (max_depth < 0 ? "full " :""), name,
2279 (unsigned long)talloc_total_size(ptr),
2280 (unsigned long)talloc_total_blocks(ptr));
2284 fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2287 (unsigned long)talloc_total_size(ptr),
2288 (unsigned long)talloc_total_blocks(ptr),
2289 (int)talloc_reference_count(ptr), ptr);
2292 fprintf(f, "content: ");
2293 if (talloc_total_size(ptr)) {
2294 int tot = talloc_total_size(ptr);
2297 for (i = 0; i < tot; i++) {
2298 if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2299 fprintf(f, "%c", ((char *)ptr)[i]);
2301 fprintf(f, "~%02x", ((char *)ptr)[i]);
2310 report on memory usage by all children of a pointer, giving a full tree view
2312 _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2315 talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2321 report on memory usage by all children of a pointer, giving a full tree view
2323 _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2325 talloc_report_depth_file(ptr, 0, -1, f);
2329 report on memory usage by all children of a pointer
2331 _PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2333 talloc_report_depth_file(ptr, 0, 1, f);
2337 enable tracking of the NULL context
2339 _PUBLIC_ void talloc_enable_null_tracking(void)
2341 if (null_context == NULL) {
2342 null_context = _talloc_named_const(NULL, 0, "null_context");
2343 if (autofree_context != NULL) {
2344 talloc_reparent(NULL, null_context, autofree_context);
2350 enable tracking of the NULL context, not moving the autofree context
2351 into the NULL context. This is needed for the talloc testsuite
2353 _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2355 if (null_context == NULL) {
2356 null_context = _talloc_named_const(NULL, 0, "null_context");
2361 disable tracking of the NULL context
2363 _PUBLIC_ void talloc_disable_null_tracking(void)
2365 if (null_context != NULL) {
2366 /* we have to move any children onto the real NULL
2368 struct talloc_chunk *tc, *tc2;
2369 tc = talloc_chunk_from_ptr(null_context);
2370 for (tc2 = tc->child; tc2; tc2=tc2->next) {
2371 if (tc2->parent == tc) tc2->parent = NULL;
2372 if (tc2->prev == tc) tc2->prev = NULL;
2374 for (tc2 = tc->next; tc2; tc2=tc2->next) {
2375 if (tc2->parent == tc) tc2->parent = NULL;
2376 if (tc2->prev == tc) tc2->prev = NULL;
2381 talloc_free(null_context);
2382 null_context = NULL;
2386 enable leak reporting on exit
2388 _PUBLIC_ void talloc_enable_leak_report(void)
2390 talloc_enable_null_tracking();
2391 talloc_report_null = true;
2392 talloc_setup_atexit();
2396 enable full leak reporting on exit
2398 _PUBLIC_ void talloc_enable_leak_report_full(void)
2400 talloc_enable_null_tracking();
2401 talloc_report_null_full = true;
2402 talloc_setup_atexit();
2406 talloc and zero memory.
2408 _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2410 void *p = _talloc_named_const(ctx, size, name);
2413 memset(p, '\0', size);
2420 memdup with a talloc.
2422 _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2424 void *newp = _talloc_named_const(t, size, name);
2427 memcpy(newp, p, size);
2433 static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2436 struct talloc_chunk *tc;
2438 ret = (char *)__talloc(t, len + 1, &tc);
2439 if (unlikely(!ret)) return NULL;
2441 memcpy(ret, p, len);
2444 _tc_set_name_const(tc, ret);
2449 strdup with a talloc
2451 _PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2453 if (unlikely(!p)) return NULL;
2454 return __talloc_strlendup(t, p, strlen(p));
2458 strndup with a talloc
2460 _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2462 if (unlikely(!p)) return NULL;
2463 return __talloc_strlendup(t, p, strnlen(p, n));
2466 static inline char *__talloc_strlendup_append(char *s, size_t slen,
2467 const char *a, size_t alen)
2471 ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2472 if (unlikely(!ret)) return NULL;
2474 /* append the string and the trailing \0 */
2475 memcpy(&ret[slen], a, alen);
2478 _tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2483 * Appends at the end of the string.
2485 _PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2488 return talloc_strdup(NULL, a);
2495 return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2499 * Appends at the end of the talloc'ed buffer,
2500 * not the end of the string.
2502 _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2507 return talloc_strdup(NULL, a);
2514 slen = talloc_get_size(s);
2515 if (likely(slen > 0)) {
2519 return __talloc_strlendup_append(s, slen, a, strlen(a));
2523 * Appends at the end of the string.
2525 _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2528 return talloc_strndup(NULL, a, n);
2535 return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2539 * Appends at the end of the talloc'ed buffer,
2540 * not the end of the string.
2542 _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2547 return talloc_strndup(NULL, a, n);
2554 slen = talloc_get_size(s);
2555 if (likely(slen > 0)) {
2559 return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2562 #ifndef HAVE_VA_COPY
2563 #ifdef HAVE___VA_COPY
2564 #define va_copy(dest, src) __va_copy(dest, src)
2566 #define va_copy(dest, src) (dest) = (src)
2570 static struct talloc_chunk *_vasprintf_tc(const void *t,
2572 va_list ap) PRINTF_ATTRIBUTE(2,0);
2574 static struct talloc_chunk *_vasprintf_tc(const void *t,
2582 struct talloc_chunk *tc;
2585 /* this call looks strange, but it makes it work on older solaris boxes */
2587 vlen = vsnprintf(buf, sizeof(buf), fmt, ap2);
2589 if (unlikely(vlen < 0)) {
2593 if (unlikely(len + 1 < len)) {
2597 ret = (char *)__talloc(t, len+1, &tc);
2598 if (unlikely(!ret)) return NULL;
2600 if (len < sizeof(buf)) {
2601 memcpy(ret, buf, len+1);
2604 vsnprintf(ret, len+1, fmt, ap2);
2608 _tc_set_name_const(tc, ret);
2612 _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2614 struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap);
2618 return TC_PTR_FROM_CHUNK(tc);
2623 Perform string formatting, and return a pointer to newly allocated
2624 memory holding the result, inside a memory pool.
2626 _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2632 ret = talloc_vasprintf(t, fmt, ap);
2637 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2638 const char *fmt, va_list ap)
2639 PRINTF_ATTRIBUTE(3,0);
2641 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2642 const char *fmt, va_list ap)
2649 alen = vsnprintf(&c, 1, fmt, ap2);
2653 /* Either the vsnprintf failed or the format resulted in
2654 * no characters being formatted. In the former case, we
2655 * ought to return NULL, in the latter we ought to return
2656 * the original string. Most current callers of this
2657 * function expect it to never return NULL.
2662 s = talloc_realloc(NULL, s, char, slen + alen + 1);
2663 if (!s) return NULL;
2666 vsnprintf(s + slen, alen + 1, fmt, ap2);
2669 _tc_set_name_const(talloc_chunk_from_ptr(s), s);
2674 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2675 * and return @p s, which may have moved. Good for gradually
2676 * accumulating output into a string buffer. Appends at the end
2679 _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2682 return talloc_vasprintf(NULL, fmt, ap);
2685 return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2689 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2690 * and return @p s, which may have moved. Always appends at the
2691 * end of the talloc'ed buffer, not the end of the string.
2693 _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2698 return talloc_vasprintf(NULL, fmt, ap);
2701 slen = talloc_get_size(s);
2702 if (likely(slen > 0)) {
2706 return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2710 Realloc @p s to append the formatted result of @p fmt and return @p
2711 s, which may have moved. Good for gradually accumulating output
2712 into a string buffer.
2714 _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2719 s = talloc_vasprintf_append(s, fmt, ap);
2725 Realloc @p s to append the formatted result of @p fmt and return @p
2726 s, which may have moved. Good for gradually accumulating output
2729 _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2734 s = talloc_vasprintf_append_buffer(s, fmt, ap);
2740 alloc an array, checking for integer overflow in the array size
2742 _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2744 if (count >= MAX_TALLOC_SIZE/el_size) {
2747 return _talloc_named_const(ctx, el_size * count, name);
2751 alloc an zero array, checking for integer overflow in the array size
2753 _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2755 if (count >= MAX_TALLOC_SIZE/el_size) {
2758 return _talloc_zero(ctx, el_size * count, name);
2762 realloc an array, checking for integer overflow in the array size
2764 _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2766 if (count >= MAX_TALLOC_SIZE/el_size) {
2769 return _talloc_realloc(ctx, ptr, el_size * count, name);
2773 a function version of talloc_realloc(), so it can be passed as a function pointer
2774 to libraries that want a realloc function (a realloc function encapsulates
2775 all the basic capabilities of an allocation library, which is why this is useful)
2777 _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2779 return _talloc_realloc(context, ptr, size, NULL);
2783 static int talloc_autofree_destructor(void *ptr)
2785 autofree_context = NULL;
2790 return a context which will be auto-freed on exit
2791 this is useful for reducing the noise in leak reports
2793 _PUBLIC_ void *talloc_autofree_context(void)
2795 if (autofree_context == NULL) {
2796 autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2797 talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2798 talloc_setup_atexit();
2800 return autofree_context;
2803 _PUBLIC_ size_t talloc_get_size(const void *context)
2805 struct talloc_chunk *tc;
2807 if (context == NULL) {
2811 tc = talloc_chunk_from_ptr(context);
2817 find a parent of this context that has the given name, if any
2819 _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2821 struct talloc_chunk *tc;
2823 if (context == NULL) {
2827 tc = talloc_chunk_from_ptr(context);
2829 if (tc->name && strcmp(tc->name, name) == 0) {
2830 return TC_PTR_FROM_CHUNK(tc);
2832 while (tc && tc->prev) tc = tc->prev;
2841 show the parentage of a context
2843 _PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2845 struct talloc_chunk *tc;
2847 if (context == NULL) {
2848 fprintf(file, "talloc no parents for NULL\n");
2852 tc = talloc_chunk_from_ptr(context);
2853 fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context));
2855 fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
2856 while (tc && tc->prev) tc = tc->prev;
2865 return 1 if ptr is a parent of context
2867 static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2869 struct talloc_chunk *tc;
2871 if (context == NULL) {
2875 tc = talloc_chunk_from_ptr(context);
2880 if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2881 while (tc && tc->prev) tc = tc->prev;
2891 return 1 if ptr is a parent of context
2893 _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2895 return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2899 return the total size of memory used by this context and all children
2901 static inline size_t _talloc_total_limit_size(const void *ptr,
2902 struct talloc_memlimit *old_limit,
2903 struct talloc_memlimit *new_limit)
2905 return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2906 old_limit, new_limit);
2909 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2911 struct talloc_memlimit *l;
2913 for (l = limit; l != NULL; l = l->upper) {
2914 if (l->max_size != 0 &&
2915 ((l->max_size <= l->cur_size) ||
2916 (l->max_size - l->cur_size < size))) {
2925 Update memory limits when freeing a talloc_chunk.
2927 static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2929 size_t limit_shrink_size;
2936 * Pool entries don't count. Only the pools
2937 * themselves are counted as part of the memory
2938 * limits. Note that this also takes care of
2939 * nested pools which have both flags
2940 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2942 if (tc->flags & TALLOC_FLAG_POOLMEM) {
2947 * If we are part of a memory limited context hierarchy
2948 * we need to subtract the memory used from the counters
2951 limit_shrink_size = tc->size+TC_HDR_SIZE;
2954 * If we're deallocating a pool, take into
2955 * account the prefix size added for the pool.
2958 if (tc->flags & TALLOC_FLAG_POOL) {
2959 limit_shrink_size += TP_HDR_SIZE;
2962 talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2964 if (tc->limit->parent == tc) {
2972 Increase memory limit accounting after a malloc/realloc.
2974 static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2977 struct talloc_memlimit *l;
2979 for (l = limit; l != NULL; l = l->upper) {
2980 size_t new_cur_size = l->cur_size + size;
2981 if (new_cur_size < l->cur_size) {
2982 talloc_abort("logic error in talloc_memlimit_grow\n");
2985 l->cur_size = new_cur_size;
2990 Decrease memory limit accounting after a free/realloc.
2992 static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
2995 struct talloc_memlimit *l;
2997 for (l = limit; l != NULL; l = l->upper) {
2998 if (l->cur_size < size) {
2999 talloc_abort("logic error in talloc_memlimit_shrink\n");
3002 l->cur_size = l->cur_size - size;
3006 _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
3008 struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
3009 struct talloc_memlimit *orig_limit;
3010 struct talloc_memlimit *limit = NULL;
3012 if (tc->limit && tc->limit->parent == tc) {
3013 tc->limit->max_size = max_size;
3016 orig_limit = tc->limit;
3018 limit = malloc(sizeof(struct talloc_memlimit));
3019 if (limit == NULL) {
3023 limit->max_size = max_size;
3024 limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
3027 limit->upper = orig_limit;
3029 limit->upper = NULL;