2 Samba Unix SMB/CIFS implementation.
4 Samba trivial allocation library - new interface
6 NOTE: Please read talloc_guide.txt for full documentation
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 inspired by http://swapped.cc/halloc/
36 #ifdef HAVE_SYS_AUXV_H
40 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
41 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
44 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
45 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
48 /* Special macros that are no-ops except when run under Valgrind on
49 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
50 #ifdef HAVE_VALGRIND_MEMCHECK_H
51 /* memcheck.h includes valgrind.h */
52 #include <valgrind/memcheck.h>
53 #elif defined(HAVE_VALGRIND_H)
57 /* use this to force every realloc to change the pointer, to stress test
58 code that might not cope */
59 #define ALWAYS_REALLOC 0
62 #define MAX_TALLOC_SIZE 0x10000000
64 #define TALLOC_FLAG_FREE 0x01
65 #define TALLOC_FLAG_LOOP 0x02
66 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
67 #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
70 * Bits above this are random, used to make it harder to fake talloc
71 * headers during an attack. Try not to change this without good reason.
73 #define TALLOC_FLAG_MASK 0x0F
75 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
77 #define TALLOC_MAGIC_BASE 0xe814ec70
78 static unsigned int talloc_magic = (
81 (TALLOC_BUILD_VERSION_MAJOR << 24) +
82 (TALLOC_BUILD_VERSION_MINOR << 16) +
83 (TALLOC_BUILD_VERSION_RELEASE << 8)));
85 /* by default we abort when given a bad pointer (such as when talloc_free() is called
86 on a pointer that came from malloc() */
88 #define TALLOC_ABORT(reason) abort()
91 #ifndef discard_const_p
92 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
93 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
95 # define discard_const_p(type, ptr) ((type *)(ptr))
99 /* these macros gain us a few percent of speed on gcc */
101 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
102 as its first argument */
104 #define likely(x) __builtin_expect(!!(x), 1)
107 #define unlikely(x) __builtin_expect(!!(x), 0)
111 #define likely(x) (x)
114 #define unlikely(x) (x)
118 /* this null_context is only used if talloc_enable_leak_report() or
119 talloc_enable_leak_report_full() is called, otherwise it remains
122 static void *null_context;
123 static void *autofree_context;
125 /* used to enable fill of memory on free, which can be useful for
126 * catching use after free errors when valgrind is too slow
134 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
137 * do not wipe the header, to allow the
138 * double-free logic to still work
140 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
141 if (unlikely(talloc_fill.enabled)) { \
142 size_t _flen = (_tc)->size; \
143 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
144 memset(_fptr, talloc_fill.fill_value, _flen); \
148 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
149 /* Mark the whole chunk as not accessable */
150 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
151 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
152 char *_fptr = (char *)(_tc); \
153 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
156 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
159 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
160 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
161 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
164 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
165 if (unlikely(talloc_fill.enabled)) { \
166 size_t _flen = (_tc)->size - (_new_size); \
167 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
168 _fptr += (_new_size); \
169 memset(_fptr, talloc_fill.fill_value, _flen); \
173 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
174 /* Mark the unused bytes not accessable */
175 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
176 size_t _flen = (_tc)->size - (_new_size); \
177 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
178 _fptr += (_new_size); \
179 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
182 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
185 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
186 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
187 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
190 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
191 if (unlikely(talloc_fill.enabled)) { \
192 size_t _flen = (_tc)->size - (_new_size); \
193 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
194 _fptr += (_new_size); \
195 memset(_fptr, talloc_fill.fill_value, _flen); \
199 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
200 /* Mark the unused bytes as undefined */
201 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
202 size_t _flen = (_tc)->size - (_new_size); \
203 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
204 _fptr += (_new_size); \
205 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
208 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
211 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
212 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
213 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
216 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
217 /* Mark the new bytes as undefined */
218 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
219 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
220 size_t _new_used = TC_HDR_SIZE + (_new_size); \
221 size_t _flen = _new_used - _old_used; \
222 char *_fptr = _old_used + (char *)(_tc); \
223 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
226 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
229 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
230 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
233 struct talloc_reference_handle {
234 struct talloc_reference_handle *next, *prev;
236 const char *location;
239 struct talloc_memlimit {
240 struct talloc_chunk *parent;
241 struct talloc_memlimit *upper;
246 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
247 static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
249 static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
251 static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
253 static inline void _tc_set_name_const(struct talloc_chunk *tc,
255 static struct talloc_chunk *_vasprintf_tc(const void *t,
259 typedef int (*talloc_destructor_t)(void *);
261 struct talloc_pool_hdr;
263 struct talloc_chunk {
265 * flags includes the talloc magic, which is randomised to
266 * make overwrite attacks harder
271 * If you have a logical tree like:
277 * <child 1> <child 2> <child 3>
279 * The actual talloc tree is:
283 * <child 1> - <child 2> - <child 3>
285 * The children are linked with next/prev pointers, and
286 * child 1 is linked to the parent with parent/child
290 struct talloc_chunk *next, *prev;
291 struct talloc_chunk *parent, *child;
292 struct talloc_reference_handle *refs;
293 talloc_destructor_t destructor;
299 * if 'limit' is set it means all *new* children of the context will
300 * be limited to a total aggregate size ox max_size for memory
302 * cur_size is used to keep track of the current use
304 struct talloc_memlimit *limit;
307 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
308 * is a pointer to the struct talloc_chunk of the pool that it was
309 * allocated from. This way children can quickly find the pool to chew
312 struct talloc_pool_hdr *pool;
315 /* 16 byte alignment seems to keep everyone happy */
316 #define TC_ALIGN16(s) (((s)+15)&~15)
317 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
318 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
320 _PUBLIC_ int talloc_version_major(void)
322 return TALLOC_VERSION_MAJOR;
325 _PUBLIC_ int talloc_version_minor(void)
327 return TALLOC_VERSION_MINOR;
330 _PUBLIC_ int talloc_test_get_magic(void)
335 static void (*talloc_log_fn)(const char *message);
337 _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
339 talloc_log_fn = log_fn;
342 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
343 void talloc_lib_init(void) __attribute__((constructor));
344 void talloc_lib_init(void)
346 uint32_t random_value;
347 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
350 * Use the kernel-provided random values used for
351 * ASLR. This won't change per-exec, which is ideal for us
353 p = (uint8_t *) getauxval(AT_RANDOM);
356 * We get 16 bytes from getauxval. By calling rand(),
357 * a totally insecure PRNG, but one that will
358 * deterministically have a different value when called
359 * twice, we ensure that if two talloc-like libraries
360 * are somehow loaded in the same address space, that
361 * because we choose different bytes, we will keep the
362 * protection against collision of multiple talloc
365 * This protection is important because the effects of
366 * passing a talloc pointer from one to the other may
367 * be very hard to determine.
369 int offset = rand() % (16 - sizeof(random_value));
370 memcpy(&random_value, p + offset, sizeof(random_value));
375 * Otherwise, hope the location we are loaded in
376 * memory is randomised by someone else
378 random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
380 talloc_magic = random_value & ~TALLOC_FLAG_MASK;
383 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
386 static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
387 static void talloc_log(const char *fmt, ...)
392 if (!talloc_log_fn) {
397 message = talloc_vasprintf(NULL, fmt, ap);
400 talloc_log_fn(message);
401 talloc_free(message);
404 static void talloc_log_stderr(const char *message)
406 fprintf(stderr, "%s", message);
409 _PUBLIC_ void talloc_set_log_stderr(void)
411 talloc_set_log_fn(talloc_log_stderr);
414 static void (*talloc_abort_fn)(const char *reason);
416 _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
418 talloc_abort_fn = abort_fn;
421 static void talloc_abort(const char *reason)
423 talloc_log("%s\n", reason);
425 if (!talloc_abort_fn) {
426 TALLOC_ABORT(reason);
429 talloc_abort_fn(reason);
432 static void talloc_abort_access_after_free(void)
434 talloc_abort("Bad talloc magic value - access after free");
437 static void talloc_abort_unknown_value(void)
439 talloc_abort("Bad talloc magic value - unknown value");
442 /* panic if we get a bad magic value */
443 static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
445 const char *pp = (const char *)ptr;
446 struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
447 if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
448 if ((tc->flags & (~TALLOC_FLAG_MASK)) != talloc_magic) {
449 talloc_abort_unknown_value();
453 talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
454 talloc_abort_access_after_free();
460 /* hook into the front of the list */
461 #define _TLIST_ADD(list, p) \
465 (p)->next = (p)->prev = NULL; \
467 (list)->prev = (p); \
468 (p)->next = (list); \
474 /* remove an element from a list - element doesn't have to be in list. */
475 #define _TLIST_REMOVE(list, p) \
477 if ((p) == (list)) { \
478 (list) = (p)->next; \
479 if (list) (list)->prev = NULL; \
481 if ((p)->prev) (p)->prev->next = (p)->next; \
482 if ((p)->next) (p)->next->prev = (p)->prev; \
484 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
489 return the parent chunk of a pointer
491 static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
493 struct talloc_chunk *tc;
495 if (unlikely(ptr == NULL)) {
499 tc = talloc_chunk_from_ptr(ptr);
500 while (tc->prev) tc=tc->prev;
505 _PUBLIC_ void *talloc_parent(const void *ptr)
507 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
508 return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
514 _PUBLIC_ const char *talloc_parent_name(const void *ptr)
516 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
517 return tc? tc->name : NULL;
521 A pool carries an in-pool object count count in the first 16 bytes.
522 bytes. This is done to support talloc_steal() to a parent outside of the
523 pool. The count includes the pool itself, so a talloc_free() on a pool will
524 only destroy the pool if the count has dropped to zero. A talloc_free() of a
525 pool member will reduce the count, and eventually also call free(3) on the
528 The object count is not put into "struct talloc_chunk" because it is only
529 relevant for talloc pools and the alignment to 16 bytes would increase the
530 memory footprint of each talloc chunk by those 16 bytes.
533 struct talloc_pool_hdr {
535 unsigned int object_count;
539 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
541 static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
543 return (struct talloc_pool_hdr *)((char *)c - TP_HDR_SIZE);
546 static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
548 return (struct talloc_chunk *)((char *)h + TP_HDR_SIZE);
551 static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
553 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
554 return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
557 static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
559 return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
562 /* If tc is inside a pool, this gives the next neighbour. */
563 static inline void *tc_next_chunk(struct talloc_chunk *tc)
565 return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
568 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
570 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
571 return tc_next_chunk(tc);
574 /* Mark the whole remaining pool as not accessable */
575 static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
577 size_t flen = tc_pool_space_left(pool_hdr);
579 if (unlikely(talloc_fill.enabled)) {
580 memset(pool_hdr->end, talloc_fill.fill_value, flen);
583 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
584 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
592 static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
593 size_t size, size_t prefix_len)
595 struct talloc_pool_hdr *pool_hdr = NULL;
597 struct talloc_chunk *result;
600 if (parent == NULL) {
604 if (parent->flags & TALLOC_FLAG_POOL) {
605 pool_hdr = talloc_pool_from_chunk(parent);
607 else if (parent->flags & TALLOC_FLAG_POOLMEM) {
608 pool_hdr = parent->pool;
611 if (pool_hdr == NULL) {
615 space_left = tc_pool_space_left(pool_hdr);
618 * Align size to 16 bytes
620 chunk_size = TC_ALIGN16(size + prefix_len);
622 if (space_left < chunk_size) {
626 result = (struct talloc_chunk *)((char *)pool_hdr->end + prefix_len);
628 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
629 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
632 pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
634 result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
635 result->pool = pool_hdr;
637 pool_hdr->object_count++;
643 Allocate a bit of memory as a child of an existing pointer
645 static inline void *__talloc_with_prefix(const void *context,
648 struct talloc_chunk **tc_ret)
650 struct talloc_chunk *tc = NULL;
651 struct talloc_memlimit *limit = NULL;
652 size_t total_len = TC_HDR_SIZE + size + prefix_len;
653 struct talloc_chunk *parent = NULL;
655 if (unlikely(context == NULL)) {
656 context = null_context;
659 if (unlikely(size >= MAX_TALLOC_SIZE)) {
663 if (unlikely(total_len < TC_HDR_SIZE)) {
667 if (likely(context != NULL)) {
668 parent = talloc_chunk_from_ptr(context);
670 if (parent->limit != NULL) {
671 limit = parent->limit;
674 tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len);
681 * Only do the memlimit check/update on actual allocation.
683 if (!talloc_memlimit_check(limit, total_len)) {
688 ptr = malloc(total_len);
689 if (unlikely(ptr == NULL)) {
692 tc = (struct talloc_chunk *)(ptr + prefix_len);
693 tc->flags = talloc_magic;
696 talloc_memlimit_grow(limit, total_len);
701 tc->destructor = NULL;
706 if (likely(context != NULL)) {
708 parent->child->parent = NULL;
709 tc->next = parent->child;
718 tc->next = tc->prev = tc->parent = NULL;
722 return TC_PTR_FROM_CHUNK(tc);
725 static inline void *__talloc(const void *context,
727 struct talloc_chunk **tc)
729 return __talloc_with_prefix(context, size, 0, tc);
733 * Create a talloc pool
736 static inline void *_talloc_pool(const void *context, size_t size)
738 struct talloc_chunk *tc;
739 struct talloc_pool_hdr *pool_hdr;
742 result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
744 if (unlikely(result == NULL)) {
748 pool_hdr = talloc_pool_from_chunk(tc);
750 tc->flags |= TALLOC_FLAG_POOL;
753 pool_hdr->object_count = 1;
754 pool_hdr->end = result;
755 pool_hdr->poolsize = size;
757 tc_invalidate_pool(pool_hdr);
762 _PUBLIC_ void *talloc_pool(const void *context, size_t size)
764 return _talloc_pool(context, size);
768 * Create a talloc pool correctly sized for a basic size plus
769 * a number of subobjects whose total size is given. Essentially
770 * a custom allocator for talloc to reduce fragmentation.
773 _PUBLIC_ void *_talloc_pooled_object(const void *ctx,
775 const char *type_name,
776 unsigned num_subobjects,
777 size_t total_subobjects_size)
779 size_t poolsize, subobjects_slack, tmp;
780 struct talloc_chunk *tc;
781 struct talloc_pool_hdr *pool_hdr;
784 poolsize = type_size + total_subobjects_size;
786 if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
790 if (num_subobjects == UINT_MAX) {
793 num_subobjects += 1; /* the object body itself */
796 * Alignment can increase the pool size by at most 15 bytes per object
797 * plus alignment for the object itself
799 subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
800 if (subobjects_slack < num_subobjects) {
804 tmp = poolsize + subobjects_slack;
805 if ((tmp < poolsize) || (tmp < subobjects_slack)) {
810 ret = _talloc_pool(ctx, poolsize);
815 tc = talloc_chunk_from_ptr(ret);
816 tc->size = type_size;
818 pool_hdr = talloc_pool_from_chunk(tc);
820 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
821 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
824 pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
826 _tc_set_name_const(tc, type_name);
834 setup a destructor to be called on free of a pointer
835 the destructor should return 0 on success, or -1 on failure.
836 if the destructor fails then the free is failed, and the memory can
837 be continued to be used
839 _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
841 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
842 tc->destructor = destructor;
846 increase the reference count on a piece of memory.
848 _PUBLIC_ int talloc_increase_ref_count(const void *ptr)
850 if (unlikely(!talloc_reference(null_context, ptr))) {
857 helper for talloc_reference()
859 this is referenced by a function pointer and should not be inline
861 static int talloc_reference_destructor(struct talloc_reference_handle *handle)
863 struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
864 _TLIST_REMOVE(ptr_tc->refs, handle);
869 more efficient way to add a name to a pointer - the name must point to a
872 static inline void _tc_set_name_const(struct talloc_chunk *tc,
879 internal talloc_named_const()
881 static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
884 struct talloc_chunk *tc;
886 ptr = __talloc(context, size, &tc);
887 if (unlikely(ptr == NULL)) {
891 _tc_set_name_const(tc, name);
897 make a secondary reference to a pointer, hanging off the given context.
898 the pointer remains valid until both the original caller and this given
901 the major use for this is when two different structures need to reference the
902 same underlying data, and you want to be able to free the two instances separately,
905 _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
907 struct talloc_chunk *tc;
908 struct talloc_reference_handle *handle;
909 if (unlikely(ptr == NULL)) return NULL;
911 tc = talloc_chunk_from_ptr(ptr);
912 handle = (struct talloc_reference_handle *)_talloc_named_const(context,
913 sizeof(struct talloc_reference_handle),
914 TALLOC_MAGIC_REFERENCE);
915 if (unlikely(handle == NULL)) return NULL;
917 /* note that we hang the destructor off the handle, not the
918 main context as that allows the caller to still setup their
919 own destructor on the context if they want to */
920 talloc_set_destructor(handle, talloc_reference_destructor);
921 handle->ptr = discard_const_p(void, ptr);
922 handle->location = location;
923 _TLIST_ADD(tc->refs, handle);
927 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
929 static inline void _tc_free_poolmem(struct talloc_chunk *tc,
930 const char *location)
932 struct talloc_pool_hdr *pool;
933 struct talloc_chunk *pool_tc;
937 pool_tc = talloc_chunk_from_pool(pool);
938 next_tc = tc_next_chunk(tc);
940 tc->flags |= TALLOC_FLAG_FREE;
942 /* we mark the freed memory with where we called the free
943 * from. This means on a double free error we can report where
944 * the first free came from
948 TC_INVALIDATE_FULL_CHUNK(tc);
950 if (unlikely(pool->object_count == 0)) {
951 talloc_abort("Pool object count zero!");
955 pool->object_count--;
957 if (unlikely(pool->object_count == 1
958 && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
960 * if there is just one object left in the pool
961 * and pool->flags does not have TALLOC_FLAG_FREE,
962 * it means this is the pool itself and
963 * the rest is available for new objects
966 pool->end = tc_pool_first_chunk(pool);
967 tc_invalidate_pool(pool);
971 if (unlikely(pool->object_count == 0)) {
973 * we mark the freed memory with where we called the free
974 * from. This means on a double free error we can report where
975 * the first free came from
977 pool_tc->name = location;
979 if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
980 _tc_free_poolmem(pool_tc, location);
983 * The tc_memlimit_update_on_free()
984 * call takes into account the
985 * prefix TP_HDR_SIZE allocated before
986 * the pool talloc_chunk.
988 tc_memlimit_update_on_free(pool_tc);
989 TC_INVALIDATE_FULL_CHUNK(pool_tc);
995 if (pool->end == next_tc) {
997 * if pool->pool still points to end of
998 * 'tc' (which is stored in the 'next_tc' variable),
999 * we can reclaim the memory of 'tc'.
1006 * Do nothing. The memory is just "wasted", waiting for the pool
1007 * itself to be freed.
1011 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1013 const char *location);
1015 static inline int _talloc_free_internal(void *ptr, const char *location);
1018 internal free call that takes a struct talloc_chunk *.
1020 static inline int _tc_free_internal(struct talloc_chunk *tc,
1021 const char *location)
1024 void *ptr = TC_PTR_FROM_CHUNK(tc);
1026 if (unlikely(tc->refs)) {
1028 /* check if this is a reference from a child or
1029 * grandchild back to it's parent or grandparent
1031 * in that case we need to remove the reference and
1032 * call another instance of talloc_free() on the current
1035 is_child = talloc_is_parent(tc->refs, ptr);
1036 _talloc_free_internal(tc->refs, location);
1038 return _talloc_free_internal(ptr, location);
1043 if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1044 /* we have a free loop - stop looping */
1048 if (unlikely(tc->destructor)) {
1049 talloc_destructor_t d = tc->destructor;
1052 * Protect the destructor against some overwrite
1053 * attacks, by explicitly checking it has the right
1056 if (talloc_chunk_from_ptr(ptr) != tc) {
1058 * This can't actually happen, the
1059 * call itself will panic.
1061 TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1064 if (d == (talloc_destructor_t)-1) {
1067 tc->destructor = (talloc_destructor_t)-1;
1070 * Only replace the destructor pointer if
1071 * calling the destructor didn't modify it.
1073 if (tc->destructor == (talloc_destructor_t)-1) {
1078 tc->destructor = NULL;
1082 _TLIST_REMOVE(tc->parent->child, tc);
1083 if (tc->parent->child) {
1084 tc->parent->child->parent = tc->parent;
1087 if (tc->prev) tc->prev->next = tc->next;
1088 if (tc->next) tc->next->prev = tc->prev;
1089 tc->prev = tc->next = NULL;
1092 tc->flags |= TALLOC_FLAG_LOOP;
1094 _tc_free_children_internal(tc, ptr, location);
1096 tc->flags |= TALLOC_FLAG_FREE;
1098 /* we mark the freed memory with where we called the free
1099 * from. This means on a double free error we can report where
1100 * the first free came from
1102 tc->name = location;
1104 if (tc->flags & TALLOC_FLAG_POOL) {
1105 struct talloc_pool_hdr *pool;
1107 pool = talloc_pool_from_chunk(tc);
1109 if (unlikely(pool->object_count == 0)) {
1110 talloc_abort("Pool object count zero!");
1114 pool->object_count--;
1116 if (likely(pool->object_count != 0)) {
1121 * With object_count==0, a pool becomes a normal piece of
1122 * memory to free. If it's allocated inside a pool, it needs
1123 * to be freed as poolmem, else it needs to be just freed.
1130 if (tc->flags & TALLOC_FLAG_POOLMEM) {
1131 _tc_free_poolmem(tc, location);
1135 tc_memlimit_update_on_free(tc);
1137 TC_INVALIDATE_FULL_CHUNK(tc);
1143 internal talloc_free call
1145 static inline int _talloc_free_internal(void *ptr, const char *location)
1147 struct talloc_chunk *tc;
1149 if (unlikely(ptr == NULL)) {
1153 /* possibly initialised the talloc fill value */
1154 if (unlikely(!talloc_fill.initialised)) {
1155 const char *fill = getenv(TALLOC_FILL_ENV);
1157 talloc_fill.enabled = true;
1158 talloc_fill.fill_value = strtoul(fill, NULL, 0);
1160 talloc_fill.initialised = true;
1163 tc = talloc_chunk_from_ptr(ptr);
1164 return _tc_free_internal(tc, location);
1167 static inline size_t _talloc_total_limit_size(const void *ptr,
1168 struct talloc_memlimit *old_limit,
1169 struct talloc_memlimit *new_limit);
1172 move a lump of memory from one talloc context to another return the
1173 ptr on success, or NULL if it could not be transferred.
1174 passing NULL as ptr will always return NULL with no side effects.
1176 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1178 struct talloc_chunk *tc, *new_tc;
1179 size_t ctx_size = 0;
1181 if (unlikely(!ptr)) {
1185 if (unlikely(new_ctx == NULL)) {
1186 new_ctx = null_context;
1189 tc = talloc_chunk_from_ptr(ptr);
1191 if (tc->limit != NULL) {
1193 ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1195 /* Decrement the memory limit from the source .. */
1196 talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1198 if (tc->limit->parent == tc) {
1199 tc->limit->upper = NULL;
1205 if (unlikely(new_ctx == NULL)) {
1207 _TLIST_REMOVE(tc->parent->child, tc);
1208 if (tc->parent->child) {
1209 tc->parent->child->parent = tc->parent;
1212 if (tc->prev) tc->prev->next = tc->next;
1213 if (tc->next) tc->next->prev = tc->prev;
1216 tc->parent = tc->next = tc->prev = NULL;
1217 return discard_const_p(void, ptr);
1220 new_tc = talloc_chunk_from_ptr(new_ctx);
1222 if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1223 return discard_const_p(void, ptr);
1227 _TLIST_REMOVE(tc->parent->child, tc);
1228 if (tc->parent->child) {
1229 tc->parent->child->parent = tc->parent;
1232 if (tc->prev) tc->prev->next = tc->next;
1233 if (tc->next) tc->next->prev = tc->prev;
1234 tc->prev = tc->next = NULL;
1237 tc->parent = new_tc;
1238 if (new_tc->child) new_tc->child->parent = NULL;
1239 _TLIST_ADD(new_tc->child, tc);
1241 if (tc->limit || new_tc->limit) {
1242 ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1244 /* .. and increment it in the destination. */
1245 if (new_tc->limit) {
1246 talloc_memlimit_grow(new_tc->limit, ctx_size);
1250 return discard_const_p(void, ptr);
1254 move a lump of memory from one talloc context to another return the
1255 ptr on success, or NULL if it could not be transferred.
1256 passing NULL as ptr will always return NULL with no side effects.
1258 _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1260 struct talloc_chunk *tc;
1262 if (unlikely(ptr == NULL)) {
1266 tc = talloc_chunk_from_ptr(ptr);
1268 if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1269 struct talloc_reference_handle *h;
1271 talloc_log("WARNING: talloc_steal with references at %s\n",
1274 for (h=tc->refs; h; h=h->next) {
1275 talloc_log("\treference at %s\n",
1281 /* this test is probably too expensive to have on in the
1282 normal build, but it useful for debugging */
1283 if (talloc_is_parent(new_ctx, ptr)) {
1284 talloc_log("WARNING: stealing into talloc child at %s\n", location);
1288 return _talloc_steal_internal(new_ctx, ptr);
1292 this is like a talloc_steal(), but you must supply the old
1293 parent. This resolves the ambiguity in a talloc_steal() which is
1294 called on a context that has more than one parent (via references)
1296 The old parent can be either a reference or a parent
1298 _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1300 struct talloc_chunk *tc;
1301 struct talloc_reference_handle *h;
1303 if (unlikely(ptr == NULL)) {
1307 if (old_parent == talloc_parent(ptr)) {
1308 return _talloc_steal_internal(new_parent, ptr);
1311 tc = talloc_chunk_from_ptr(ptr);
1312 for (h=tc->refs;h;h=h->next) {
1313 if (talloc_parent(h) == old_parent) {
1314 if (_talloc_steal_internal(new_parent, h) != h) {
1317 return discard_const_p(void, ptr);
1321 /* it wasn't a parent */
1326 remove a secondary reference to a pointer. This undo's what
1327 talloc_reference() has done. The context and pointer arguments
1328 must match those given to a talloc_reference()
1330 static inline int talloc_unreference(const void *context, const void *ptr)
1332 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1333 struct talloc_reference_handle *h;
1335 if (unlikely(context == NULL)) {
1336 context = null_context;
1339 for (h=tc->refs;h;h=h->next) {
1340 struct talloc_chunk *p = talloc_parent_chunk(h);
1342 if (context == NULL) break;
1343 } else if (TC_PTR_FROM_CHUNK(p) == context) {
1351 return _talloc_free_internal(h, __location__);
1355 remove a specific parent context from a pointer. This is a more
1356 controlled variant of talloc_free()
1358 _PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1360 struct talloc_chunk *tc_p, *new_p, *tc_c;
1367 if (context == NULL) {
1368 context = null_context;
1371 if (talloc_unreference(context, ptr) == 0) {
1375 if (context != NULL) {
1376 tc_c = talloc_chunk_from_ptr(context);
1380 if (tc_c != talloc_parent_chunk(ptr)) {
1384 tc_p = talloc_chunk_from_ptr(ptr);
1386 if (tc_p->refs == NULL) {
1387 return _talloc_free_internal(ptr, __location__);
1390 new_p = talloc_parent_chunk(tc_p->refs);
1392 new_parent = TC_PTR_FROM_CHUNK(new_p);
1397 if (talloc_unreference(new_parent, ptr) != 0) {
1401 _talloc_steal_internal(new_parent, ptr);
1407 add a name to an existing pointer - va_list version
1409 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1411 va_list ap) PRINTF_ATTRIBUTE(2,0);
1413 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1417 struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc),
1420 if (likely(name_tc)) {
1421 tc->name = TC_PTR_FROM_CHUNK(name_tc);
1422 _tc_set_name_const(name_tc, ".name");
1430 add a name to an existing pointer
1432 _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1434 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1438 name = tc_set_name_v(tc, fmt, ap);
1445 create a named talloc pointer. Any talloc pointer can be named, and
1446 talloc_named() operates just like talloc() except that it allows you
1447 to name the pointer.
1449 _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1454 struct talloc_chunk *tc;
1456 ptr = __talloc(context, size, &tc);
1457 if (unlikely(ptr == NULL)) return NULL;
1460 name = tc_set_name_v(tc, fmt, ap);
1463 if (unlikely(name == NULL)) {
1464 _talloc_free_internal(ptr, __location__);
1472 return the name of a talloc ptr, or "UNNAMED"
1474 static inline const char *__talloc_get_name(const void *ptr)
1476 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1477 if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1478 return ".reference";
1480 if (likely(tc->name)) {
1486 _PUBLIC_ const char *talloc_get_name(const void *ptr)
1488 return __talloc_get_name(ptr);
1492 check if a pointer has the given name. If it does, return the pointer,
1493 otherwise return NULL
1495 _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1498 if (unlikely(ptr == NULL)) return NULL;
1499 pname = __talloc_get_name(ptr);
1500 if (likely(pname == name || strcmp(pname, name) == 0)) {
1501 return discard_const_p(void, ptr);
1506 static void talloc_abort_type_mismatch(const char *location,
1508 const char *expected)
1512 reason = talloc_asprintf(NULL,
1513 "%s: Type mismatch: name[%s] expected[%s]",
1518 reason = "Type mismatch";
1521 talloc_abort(reason);
1524 _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1528 if (unlikely(ptr == NULL)) {
1529 talloc_abort_type_mismatch(location, NULL, name);
1533 pname = __talloc_get_name(ptr);
1534 if (likely(pname == name || strcmp(pname, name) == 0)) {
1535 return discard_const_p(void, ptr);
1538 talloc_abort_type_mismatch(location, pname, name);
1543 this is for compatibility with older versions of talloc
1545 _PUBLIC_ void *talloc_init(const char *fmt, ...)
1550 struct talloc_chunk *tc;
1552 ptr = __talloc(NULL, 0, &tc);
1553 if (unlikely(ptr == NULL)) return NULL;
1556 name = tc_set_name_v(tc, fmt, ap);
1559 if (unlikely(name == NULL)) {
1560 _talloc_free_internal(ptr, __location__);
1567 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1569 const char *location)
1572 /* we need to work out who will own an abandoned child
1573 if it cannot be freed. In priority order, the first
1574 choice is owner of any remaining reference to this
1575 pointer, the second choice is our parent, and the
1576 final choice is the null context. */
1577 void *child = TC_PTR_FROM_CHUNK(tc->child);
1578 const void *new_parent = null_context;
1579 if (unlikely(tc->child->refs)) {
1580 struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1581 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1583 if (unlikely(_tc_free_internal(tc->child, location) == -1)) {
1584 if (talloc_parent_chunk(child) != tc) {
1586 * Destructor already reparented this child.
1587 * No further reparenting needed.
1591 if (new_parent == null_context) {
1592 struct talloc_chunk *p = talloc_parent_chunk(ptr);
1593 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1595 _talloc_steal_internal(new_parent, child);
1601 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1602 should probably not be used in new code. It's in here to keep the talloc
1603 code consistent across Samba 3 and 4.
1605 _PUBLIC_ void talloc_free_children(void *ptr)
1607 struct talloc_chunk *tc_name = NULL;
1608 struct talloc_chunk *tc;
1610 if (unlikely(ptr == NULL)) {
1614 tc = talloc_chunk_from_ptr(ptr);
1616 /* we do not want to free the context name if it is a child .. */
1617 if (likely(tc->child)) {
1618 for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1619 if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1622 _TLIST_REMOVE(tc->child, tc_name);
1624 tc->child->parent = tc;
1629 _tc_free_children_internal(tc, ptr, __location__);
1631 /* .. so we put it back after all other children have been freed */
1634 tc->child->parent = NULL;
1636 tc_name->parent = tc;
1637 _TLIST_ADD(tc->child, tc_name);
1642 Allocate a bit of memory as a child of an existing pointer
1644 _PUBLIC_ void *_talloc(const void *context, size_t size)
1646 struct talloc_chunk *tc;
1647 return __talloc(context, size, &tc);
1651 externally callable talloc_set_name_const()
1653 _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1655 _tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1659 create a named talloc pointer. Any talloc pointer can be named, and
1660 talloc_named() operates just like talloc() except that it allows you
1661 to name the pointer.
1663 _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1665 return _talloc_named_const(context, size, name);
1669 free a talloc pointer. This also frees all child pointers of this
1672 return 0 if the memory is actually freed, otherwise -1. The memory
1673 will not be freed if the ref_count is > 1 or the destructor (if
1674 any) returns non-zero
1676 _PUBLIC_ int _talloc_free(void *ptr, const char *location)
1678 struct talloc_chunk *tc;
1680 if (unlikely(ptr == NULL)) {
1684 tc = talloc_chunk_from_ptr(ptr);
1686 if (unlikely(tc->refs != NULL)) {
1687 struct talloc_reference_handle *h;
1689 if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1690 /* in this case we do know which parent should
1691 get this pointer, as there is really only
1693 return talloc_unlink(null_context, ptr);
1696 talloc_log("ERROR: talloc_free with references at %s\n",
1699 for (h=tc->refs; h; h=h->next) {
1700 talloc_log("\treference at %s\n",
1706 return _talloc_free_internal(ptr, location);
1712 A talloc version of realloc. The context argument is only used if
1715 _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1717 struct talloc_chunk *tc;
1719 bool malloced = false;
1720 struct talloc_pool_hdr *pool_hdr = NULL;
1721 size_t old_size = 0;
1722 size_t new_size = 0;
1724 /* size zero is equivalent to free() */
1725 if (unlikely(size == 0)) {
1726 talloc_unlink(context, ptr);
1730 if (unlikely(size >= MAX_TALLOC_SIZE)) {
1734 /* realloc(NULL) is equivalent to malloc() */
1736 return _talloc_named_const(context, size, name);
1739 tc = talloc_chunk_from_ptr(ptr);
1741 /* don't allow realloc on referenced pointers */
1742 if (unlikely(tc->refs)) {
1746 /* don't let anybody try to realloc a talloc_pool */
1747 if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1751 if (tc->limit && (size > tc->size)) {
1752 if (!talloc_memlimit_check(tc->limit, (size - tc->size))) {
1758 /* handle realloc inside a talloc_pool */
1759 if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1760 pool_hdr = tc->pool;
1763 #if (ALWAYS_REALLOC == 0)
1764 /* don't shrink if we have less than 1k to gain */
1765 if (size < tc->size && tc->limit == NULL) {
1767 void *next_tc = tc_next_chunk(tc);
1768 TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1770 if (next_tc == pool_hdr->end) {
1771 /* note: tc->size has changed, so this works */
1772 pool_hdr->end = tc_next_chunk(tc);
1775 } else if ((tc->size - size) < 1024) {
1777 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1778 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1779 * after each realloc call, which slows down
1780 * testing a lot :-(.
1782 * That is why we only mark memory as undefined here.
1784 TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1786 /* do not shrink if we have less than 1k to gain */
1790 } else if (tc->size == size) {
1792 * do not change the pointer if it is exactly
1799 /* by resetting magic we catch users of the old memory */
1800 tc->flags |= TALLOC_FLAG_FREE;
1804 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1805 pool_hdr->object_count--;
1807 if (new_ptr == NULL) {
1808 new_ptr = malloc(TC_HDR_SIZE+size);
1814 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1815 TC_INVALIDATE_FULL_CHUNK(tc);
1818 /* We're doing malloc then free here, so record the difference. */
1819 old_size = tc->size;
1821 new_ptr = malloc(size + TC_HDR_SIZE);
1823 memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
1829 struct talloc_chunk *pool_tc;
1830 void *next_tc = tc_next_chunk(tc);
1831 size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1832 size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1833 size_t space_needed;
1835 unsigned int chunk_count = pool_hdr->object_count;
1837 pool_tc = talloc_chunk_from_pool(pool_hdr);
1838 if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1842 if (chunk_count == 1) {
1844 * optimize for the case where 'tc' is the only
1845 * chunk in the pool.
1847 char *start = tc_pool_first_chunk(pool_hdr);
1848 space_needed = new_chunk_size;
1849 space_left = (char *)tc_pool_end(pool_hdr) - start;
1851 if (space_left >= space_needed) {
1852 size_t old_used = TC_HDR_SIZE + tc->size;
1853 size_t new_used = TC_HDR_SIZE + size;
1856 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1860 * start -> tc may have
1861 * been freed and thus been marked as
1862 * VALGRIND_MEM_NOACCESS. Set it to
1863 * VALGRIND_MEM_UNDEFINED so we can
1864 * copy into it without valgrind errors.
1865 * We can't just mark
1866 * new_ptr -> new_ptr + old_used
1867 * as this may overlap on top of tc,
1868 * (which is why we use memmove, not
1869 * memcpy below) hence the MIN.
1871 size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1872 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1876 memmove(new_ptr, tc, old_used);
1878 tc = (struct talloc_chunk *)new_ptr;
1879 TC_UNDEFINE_GROW_CHUNK(tc, size);
1882 * first we do not align the pool pointer
1883 * because we want to invalidate the padding
1886 pool_hdr->end = new_used + (char *)new_ptr;
1887 tc_invalidate_pool(pool_hdr);
1889 /* now the aligned pointer */
1890 pool_hdr->end = new_chunk_size + (char *)new_ptr;
1897 if (new_chunk_size == old_chunk_size) {
1898 TC_UNDEFINE_GROW_CHUNK(tc, size);
1899 tc->flags &= ~TALLOC_FLAG_FREE;
1904 if (next_tc == pool_hdr->end) {
1906 * optimize for the case where 'tc' is the last
1907 * chunk in the pool.
1909 space_needed = new_chunk_size - old_chunk_size;
1910 space_left = tc_pool_space_left(pool_hdr);
1912 if (space_left >= space_needed) {
1913 TC_UNDEFINE_GROW_CHUNK(tc, size);
1914 tc->flags &= ~TALLOC_FLAG_FREE;
1916 pool_hdr->end = tc_next_chunk(tc);
1921 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1923 if (new_ptr == NULL) {
1924 new_ptr = malloc(TC_HDR_SIZE+size);
1930 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1932 _tc_free_poolmem(tc, __location__ "_talloc_realloc");
1936 /* We're doing realloc here, so record the difference. */
1937 old_size = tc->size;
1939 new_ptr = realloc(tc, size + TC_HDR_SIZE);
1943 if (unlikely(!new_ptr)) {
1944 tc->flags &= ~TALLOC_FLAG_FREE;
1948 tc = (struct talloc_chunk *)new_ptr;
1949 tc->flags &= ~TALLOC_FLAG_FREE;
1951 tc->flags &= ~TALLOC_FLAG_POOLMEM;
1954 tc->parent->child = tc;
1957 tc->child->parent = tc;
1961 tc->prev->next = tc;
1964 tc->next->prev = tc;
1967 if (new_size > old_size) {
1968 talloc_memlimit_grow(tc->limit, new_size - old_size);
1969 } else if (new_size < old_size) {
1970 talloc_memlimit_shrink(tc->limit, old_size - new_size);
1974 _tc_set_name_const(tc, name);
1976 return TC_PTR_FROM_CHUNK(tc);
1980 a wrapper around talloc_steal() for situations where you are moving a pointer
1981 between two structures, and want the old pointer to be set to NULL
1983 _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
1985 const void **pptr = discard_const_p(const void *,_pptr);
1986 void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
1991 enum talloc_mem_count_type {
1997 static inline size_t _talloc_total_mem_internal(const void *ptr,
1998 enum talloc_mem_count_type type,
1999 struct talloc_memlimit *old_limit,
2000 struct talloc_memlimit *new_limit)
2003 struct talloc_chunk *c, *tc;
2012 tc = talloc_chunk_from_ptr(ptr);
2014 if (old_limit || new_limit) {
2015 if (tc->limit && tc->limit->upper == old_limit) {
2016 tc->limit->upper = new_limit;
2020 /* optimize in the memlimits case */
2021 if (type == TOTAL_MEM_LIMIT &&
2022 tc->limit != NULL &&
2023 tc->limit != old_limit &&
2024 tc->limit->parent == tc) {
2025 return tc->limit->cur_size;
2028 if (tc->flags & TALLOC_FLAG_LOOP) {
2032 tc->flags |= TALLOC_FLAG_LOOP;
2034 if (old_limit || new_limit) {
2035 if (old_limit == tc->limit) {
2036 tc->limit = new_limit;
2041 case TOTAL_MEM_SIZE:
2042 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2046 case TOTAL_MEM_BLOCKS:
2049 case TOTAL_MEM_LIMIT:
2050 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2052 * Don't count memory allocated from a pool
2053 * when calculating limits. Only count the
2056 if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2057 if (tc->flags & TALLOC_FLAG_POOL) {
2059 * If this is a pool, the allocated
2060 * size is in the pool header, and
2061 * remember to add in the prefix
2064 struct talloc_pool_hdr *pool_hdr
2065 = talloc_pool_from_chunk(tc);
2066 total = pool_hdr->poolsize +
2070 total = tc->size + TC_HDR_SIZE;
2076 for (c = tc->child; c; c = c->next) {
2077 total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2078 old_limit, new_limit);
2081 tc->flags &= ~TALLOC_FLAG_LOOP;
2087 return the total size of a talloc pool (subtree)
2089 _PUBLIC_ size_t talloc_total_size(const void *ptr)
2091 return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2095 return the total number of blocks in a talloc pool (subtree)
2097 _PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2099 return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2103 return the number of external references to a pointer
2105 _PUBLIC_ size_t talloc_reference_count(const void *ptr)
2107 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2108 struct talloc_reference_handle *h;
2111 for (h=tc->refs;h;h=h->next) {
2118 report on memory usage by all children of a pointer, giving a full tree view
2120 _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2121 void (*callback)(const void *ptr,
2122 int depth, int max_depth,
2124 void *private_data),
2127 struct talloc_chunk *c, *tc;
2132 if (ptr == NULL) return;
2134 tc = talloc_chunk_from_ptr(ptr);
2136 if (tc->flags & TALLOC_FLAG_LOOP) {
2140 callback(ptr, depth, max_depth, 0, private_data);
2142 if (max_depth >= 0 && depth >= max_depth) {
2146 tc->flags |= TALLOC_FLAG_LOOP;
2147 for (c=tc->child;c;c=c->next) {
2148 if (c->name == TALLOC_MAGIC_REFERENCE) {
2149 struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2150 callback(h->ptr, depth + 1, max_depth, 1, private_data);
2152 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2155 tc->flags &= ~TALLOC_FLAG_LOOP;
2158 static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2160 const char *name = __talloc_get_name(ptr);
2161 struct talloc_chunk *tc;
2162 FILE *f = (FILE *)_f;
2165 fprintf(f, "%*sreference to: %s\n", depth*4, "", name);
2169 tc = talloc_chunk_from_ptr(ptr);
2170 if (tc->limit && tc->limit->parent == tc) {
2171 fprintf(f, "%*s%-30s is a memlimit context"
2172 " (max_size = %lu bytes, cur_size = %lu bytes)\n",
2175 (unsigned long)tc->limit->max_size,
2176 (unsigned long)tc->limit->cur_size);
2180 fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2181 (max_depth < 0 ? "full " :""), name,
2182 (unsigned long)talloc_total_size(ptr),
2183 (unsigned long)talloc_total_blocks(ptr));
2187 fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2190 (unsigned long)talloc_total_size(ptr),
2191 (unsigned long)talloc_total_blocks(ptr),
2192 (int)talloc_reference_count(ptr), ptr);
2195 fprintf(f, "content: ");
2196 if (talloc_total_size(ptr)) {
2197 int tot = talloc_total_size(ptr);
2200 for (i = 0; i < tot; i++) {
2201 if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2202 fprintf(f, "%c", ((char *)ptr)[i]);
2204 fprintf(f, "~%02x", ((char *)ptr)[i]);
2213 report on memory usage by all children of a pointer, giving a full tree view
2215 _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2218 talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2224 report on memory usage by all children of a pointer, giving a full tree view
2226 _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2228 talloc_report_depth_file(ptr, 0, -1, f);
2232 report on memory usage by all children of a pointer
2234 _PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2236 talloc_report_depth_file(ptr, 0, 1, f);
2240 report on any memory hanging off the null context
2242 static void talloc_report_null(void)
2244 if (talloc_total_size(null_context) != 0) {
2245 talloc_report(null_context, stderr);
2250 report on any memory hanging off the null context
2252 static void talloc_report_null_full(void)
2254 if (talloc_total_size(null_context) != 0) {
2255 talloc_report_full(null_context, stderr);
2260 enable tracking of the NULL context
2262 _PUBLIC_ void talloc_enable_null_tracking(void)
2264 if (null_context == NULL) {
2265 null_context = _talloc_named_const(NULL, 0, "null_context");
2266 if (autofree_context != NULL) {
2267 talloc_reparent(NULL, null_context, autofree_context);
2273 enable tracking of the NULL context, not moving the autofree context
2274 into the NULL context. This is needed for the talloc testsuite
2276 _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2278 if (null_context == NULL) {
2279 null_context = _talloc_named_const(NULL, 0, "null_context");
2284 disable tracking of the NULL context
2286 _PUBLIC_ void talloc_disable_null_tracking(void)
2288 if (null_context != NULL) {
2289 /* we have to move any children onto the real NULL
2291 struct talloc_chunk *tc, *tc2;
2292 tc = talloc_chunk_from_ptr(null_context);
2293 for (tc2 = tc->child; tc2; tc2=tc2->next) {
2294 if (tc2->parent == tc) tc2->parent = NULL;
2295 if (tc2->prev == tc) tc2->prev = NULL;
2297 for (tc2 = tc->next; tc2; tc2=tc2->next) {
2298 if (tc2->parent == tc) tc2->parent = NULL;
2299 if (tc2->prev == tc) tc2->prev = NULL;
2304 talloc_free(null_context);
2305 null_context = NULL;
2309 enable leak reporting on exit
2311 _PUBLIC_ void talloc_enable_leak_report(void)
2313 talloc_enable_null_tracking();
2314 atexit(talloc_report_null);
2318 enable full leak reporting on exit
2320 _PUBLIC_ void talloc_enable_leak_report_full(void)
2322 talloc_enable_null_tracking();
2323 atexit(talloc_report_null_full);
2327 talloc and zero memory.
2329 _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2331 void *p = _talloc_named_const(ctx, size, name);
2334 memset(p, '\0', size);
2341 memdup with a talloc.
2343 _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2345 void *newp = _talloc_named_const(t, size, name);
2348 memcpy(newp, p, size);
2354 static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2357 struct talloc_chunk *tc;
2359 ret = (char *)__talloc(t, len + 1, &tc);
2360 if (unlikely(!ret)) return NULL;
2362 memcpy(ret, p, len);
2365 _tc_set_name_const(tc, ret);
2370 strdup with a talloc
2372 _PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2374 if (unlikely(!p)) return NULL;
2375 return __talloc_strlendup(t, p, strlen(p));
2379 strndup with a talloc
2381 _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2383 if (unlikely(!p)) return NULL;
2384 return __talloc_strlendup(t, p, strnlen(p, n));
2387 static inline char *__talloc_strlendup_append(char *s, size_t slen,
2388 const char *a, size_t alen)
2392 ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2393 if (unlikely(!ret)) return NULL;
2395 /* append the string and the trailing \0 */
2396 memcpy(&ret[slen], a, alen);
2399 _tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2404 * Appends at the end of the string.
2406 _PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2409 return talloc_strdup(NULL, a);
2416 return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2420 * Appends at the end of the talloc'ed buffer,
2421 * not the end of the string.
2423 _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2428 return talloc_strdup(NULL, a);
2435 slen = talloc_get_size(s);
2436 if (likely(slen > 0)) {
2440 return __talloc_strlendup_append(s, slen, a, strlen(a));
2444 * Appends at the end of the string.
2446 _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2449 return talloc_strndup(NULL, a, n);
2456 return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2460 * Appends at the end of the talloc'ed buffer,
2461 * not the end of the string.
2463 _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2468 return talloc_strndup(NULL, a, n);
2475 slen = talloc_get_size(s);
2476 if (likely(slen > 0)) {
2480 return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2483 #ifndef HAVE_VA_COPY
2484 #ifdef HAVE___VA_COPY
2485 #define va_copy(dest, src) __va_copy(dest, src)
2487 #define va_copy(dest, src) (dest) = (src)
2491 static struct talloc_chunk *_vasprintf_tc(const void *t,
2493 va_list ap) PRINTF_ATTRIBUTE(2,0);
2495 static struct talloc_chunk *_vasprintf_tc(const void *t,
2502 struct talloc_chunk *tc;
2505 /* this call looks strange, but it makes it work on older solaris boxes */
2507 len = vsnprintf(buf, sizeof(buf), fmt, ap2);
2509 if (unlikely(len < 0)) {
2513 ret = (char *)__talloc(t, len+1, &tc);
2514 if (unlikely(!ret)) return NULL;
2516 if (len < sizeof(buf)) {
2517 memcpy(ret, buf, len+1);
2520 vsnprintf(ret, len+1, fmt, ap2);
2524 _tc_set_name_const(tc, ret);
2528 _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2530 struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap);
2534 return TC_PTR_FROM_CHUNK(tc);
2539 Perform string formatting, and return a pointer to newly allocated
2540 memory holding the result, inside a memory pool.
2542 _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2548 ret = talloc_vasprintf(t, fmt, ap);
2553 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2554 const char *fmt, va_list ap)
2555 PRINTF_ATTRIBUTE(3,0);
2557 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2558 const char *fmt, va_list ap)
2565 alen = vsnprintf(&c, 1, fmt, ap2);
2569 /* Either the vsnprintf failed or the format resulted in
2570 * no characters being formatted. In the former case, we
2571 * ought to return NULL, in the latter we ought to return
2572 * the original string. Most current callers of this
2573 * function expect it to never return NULL.
2578 s = talloc_realloc(NULL, s, char, slen + alen + 1);
2579 if (!s) return NULL;
2582 vsnprintf(s + slen, alen + 1, fmt, ap2);
2585 _tc_set_name_const(talloc_chunk_from_ptr(s), s);
2590 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2591 * and return @p s, which may have moved. Good for gradually
2592 * accumulating output into a string buffer. Appends at the end
2595 _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2598 return talloc_vasprintf(NULL, fmt, ap);
2601 return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2605 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2606 * and return @p s, which may have moved. Always appends at the
2607 * end of the talloc'ed buffer, not the end of the string.
2609 _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2614 return talloc_vasprintf(NULL, fmt, ap);
2617 slen = talloc_get_size(s);
2618 if (likely(slen > 0)) {
2622 return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2626 Realloc @p s to append the formatted result of @p fmt and return @p
2627 s, which may have moved. Good for gradually accumulating output
2628 into a string buffer.
2630 _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2635 s = talloc_vasprintf_append(s, fmt, ap);
2641 Realloc @p s to append the formatted result of @p fmt and return @p
2642 s, which may have moved. Good for gradually accumulating output
2645 _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2650 s = talloc_vasprintf_append_buffer(s, fmt, ap);
2656 alloc an array, checking for integer overflow in the array size
2658 _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2660 if (count >= MAX_TALLOC_SIZE/el_size) {
2663 return _talloc_named_const(ctx, el_size * count, name);
2667 alloc an zero array, checking for integer overflow in the array size
2669 _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2671 if (count >= MAX_TALLOC_SIZE/el_size) {
2674 return _talloc_zero(ctx, el_size * count, name);
2678 realloc an array, checking for integer overflow in the array size
2680 _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2682 if (count >= MAX_TALLOC_SIZE/el_size) {
2685 return _talloc_realloc(ctx, ptr, el_size * count, name);
2689 a function version of talloc_realloc(), so it can be passed as a function pointer
2690 to libraries that want a realloc function (a realloc function encapsulates
2691 all the basic capabilities of an allocation library, which is why this is useful)
2693 _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2695 return _talloc_realloc(context, ptr, size, NULL);
2699 static int talloc_autofree_destructor(void *ptr)
2701 autofree_context = NULL;
2705 static void talloc_autofree(void)
2707 talloc_free(autofree_context);
2711 return a context which will be auto-freed on exit
2712 this is useful for reducing the noise in leak reports
2714 _PUBLIC_ void *talloc_autofree_context(void)
2716 if (autofree_context == NULL) {
2717 autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2718 talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2719 atexit(talloc_autofree);
2721 return autofree_context;
2724 _PUBLIC_ size_t talloc_get_size(const void *context)
2726 struct talloc_chunk *tc;
2728 if (context == NULL) {
2732 tc = talloc_chunk_from_ptr(context);
2738 find a parent of this context that has the given name, if any
2740 _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2742 struct talloc_chunk *tc;
2744 if (context == NULL) {
2748 tc = talloc_chunk_from_ptr(context);
2750 if (tc->name && strcmp(tc->name, name) == 0) {
2751 return TC_PTR_FROM_CHUNK(tc);
2753 while (tc && tc->prev) tc = tc->prev;
2762 show the parentage of a context
2764 _PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2766 struct talloc_chunk *tc;
2768 if (context == NULL) {
2769 fprintf(file, "talloc no parents for NULL\n");
2773 tc = talloc_chunk_from_ptr(context);
2774 fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context));
2776 fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
2777 while (tc && tc->prev) tc = tc->prev;
2786 return 1 if ptr is a parent of context
2788 static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2790 struct talloc_chunk *tc;
2792 if (context == NULL) {
2796 tc = talloc_chunk_from_ptr(context);
2801 if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2802 while (tc && tc->prev) tc = tc->prev;
2812 return 1 if ptr is a parent of context
2814 _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2816 return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2820 return the total size of memory used by this context and all children
2822 static inline size_t _talloc_total_limit_size(const void *ptr,
2823 struct talloc_memlimit *old_limit,
2824 struct talloc_memlimit *new_limit)
2826 return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2827 old_limit, new_limit);
2830 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2832 struct talloc_memlimit *l;
2834 for (l = limit; l != NULL; l = l->upper) {
2835 if (l->max_size != 0 &&
2836 ((l->max_size <= l->cur_size) ||
2837 (l->max_size - l->cur_size < size))) {
2846 Update memory limits when freeing a talloc_chunk.
2848 static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2850 size_t limit_shrink_size;
2857 * Pool entries don't count. Only the pools
2858 * themselves are counted as part of the memory
2859 * limits. Note that this also takes care of
2860 * nested pools which have both flags
2861 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2863 if (tc->flags & TALLOC_FLAG_POOLMEM) {
2868 * If we are part of a memory limited context hierarchy
2869 * we need to subtract the memory used from the counters
2872 limit_shrink_size = tc->size+TC_HDR_SIZE;
2875 * If we're deallocating a pool, take into
2876 * account the prefix size added for the pool.
2879 if (tc->flags & TALLOC_FLAG_POOL) {
2880 limit_shrink_size += TP_HDR_SIZE;
2883 talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2885 if (tc->limit->parent == tc) {
2893 Increase memory limit accounting after a malloc/realloc.
2895 static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2898 struct talloc_memlimit *l;
2900 for (l = limit; l != NULL; l = l->upper) {
2901 size_t new_cur_size = l->cur_size + size;
2902 if (new_cur_size < l->cur_size) {
2903 talloc_abort("logic error in talloc_memlimit_grow\n");
2906 l->cur_size = new_cur_size;
2911 Decrease memory limit accounting after a free/realloc.
2913 static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
2916 struct talloc_memlimit *l;
2918 for (l = limit; l != NULL; l = l->upper) {
2919 if (l->cur_size < size) {
2920 talloc_abort("logic error in talloc_memlimit_shrink\n");
2923 l->cur_size = l->cur_size - size;
2927 _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
2929 struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
2930 struct talloc_memlimit *orig_limit;
2931 struct talloc_memlimit *limit = NULL;
2933 if (tc->limit && tc->limit->parent == tc) {
2934 tc->limit->max_size = max_size;
2937 orig_limit = tc->limit;
2939 limit = malloc(sizeof(struct talloc_memlimit));
2940 if (limit == NULL) {
2944 limit->max_size = max_size;
2945 limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
2948 limit->upper = orig_limit;
2950 limit->upper = NULL;