2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Andrew Tridgell 1999-2005
7 Copyright (C) Paul `Rusty' Russell 2000
8 Copyright (C) Jeremy Allison 2000-2003
10 ** NOTE! The following LGPL license applies to the tdb
11 ** library. This does NOT imply that all of Samba is released
14 This library is free software; you can redistribute it and/or
15 modify it under the terms of the GNU Lesser General Public
16 License as published by the Free Software Foundation; either
17 version 3 of the License, or (at your option) any later version.
19 This library is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 Lesser General Public License for more details.
24 You should have received a copy of the GNU Lesser General Public
25 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "tdb_private.h"
30 /* 'right' merges can involve O(n^2) cost when combined with a
31 traverse, so they are disabled until we find a way to do them in
34 #define USE_RIGHT_MERGES 0
36 /* read a freelist record and check for simple errors */
37 int tdb_rec_free_read(struct tdb_context *tdb, tdb_off_t off, struct tdb_record *rec)
39 if (tdb->methods->tdb_read(tdb, off, rec, sizeof(*rec),DOCONV()) == -1)
42 if (rec->magic == TDB_MAGIC) {
43 /* this happens when a app is showdown while deleting a record - we should
44 not completely fail when this happens */
45 TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_rec_free_read non-free magic 0x%x at offset=%u - fixing\n",
47 rec->magic = TDB_FREE_MAGIC;
48 if (tdb_rec_write(tdb, off, rec) == -1)
52 if (rec->magic != TDB_FREE_MAGIC) {
53 /* Ensure ecode is set for log fn. */
54 tdb->ecode = TDB_ERR_CORRUPT;
55 TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_rec_free_read bad magic 0x%x at offset=%u\n",
59 if (tdb->methods->tdb_oob(tdb, rec->next, sizeof(*rec), 0) != 0)
66 /* Remove an element from the freelist. Must have alloc lock. */
67 static int remove_from_freelist(struct tdb_context *tdb, tdb_off_t off, tdb_off_t next)
69 tdb_off_t last_ptr, i;
71 /* read in the freelist top */
72 last_ptr = FREELIST_TOP;
73 while (tdb_ofs_read(tdb, last_ptr, &i) != -1 && i != 0) {
76 return tdb_ofs_write(tdb, last_ptr, &next);
78 /* Follow chain (next offset is at start of record) */
81 tdb->ecode = TDB_ERR_CORRUPT;
82 TDB_LOG((tdb, TDB_DEBUG_FATAL,"remove_from_freelist: not on list at off=%u\n", off));
88 /* update a record tailer (must hold allocation lock) */
89 static int update_tailer(struct tdb_context *tdb, tdb_off_t offset,
90 const struct tdb_record *rec)
94 /* Offset of tailer from record header */
95 totalsize = sizeof(*rec) + rec->rec_len;
96 return tdb_ofs_write(tdb, offset + totalsize - sizeof(tdb_off_t),
101 * Read the record directly on the left.
102 * Fail if there is no record on the left.
104 static int read_record_on_left(struct tdb_context *tdb, tdb_off_t rec_ptr,
106 struct tdb_record *left_r)
110 struct tdb_record left_rec;
113 left_ptr = rec_ptr - sizeof(tdb_off_t);
115 if (left_ptr <= TDB_DATA_START(tdb->hash_size)) {
116 /* no record on the left */
120 /* Read in tailer and jump back to header */
121 ret = tdb_ofs_read(tdb, left_ptr, &left_size);
123 TDB_LOG((tdb, TDB_DEBUG_FATAL,
124 "tdb_free: left offset read failed at %u\n", left_ptr));
128 /* it could be uninitialised data */
129 if (left_size == 0 || left_size == TDB_PAD_U32) {
133 if (left_size > rec_ptr) {
137 left_ptr = rec_ptr - left_size;
139 if (left_ptr < TDB_DATA_START(tdb->hash_size)) {
143 /* Now read in the left record */
144 ret = tdb->methods->tdb_read(tdb, left_ptr, &left_rec,
145 sizeof(left_rec), DOCONV());
147 TDB_LOG((tdb, TDB_DEBUG_FATAL,
148 "tdb_free: left read failed at %u (%u)\n",
149 left_ptr, left_size));
159 /* Add an element into the freelist. Merge adjacent records if
161 int tdb_free(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec)
166 /* Allocation and tailer lock */
167 if (tdb_lock(tdb, -1, F_WRLCK) != 0)
170 /* set an initial tailer, so if we fail we don't leave a bogus record */
171 if (update_tailer(tdb, offset, rec) != 0) {
172 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_tailer failed!\n"));
177 /* Look right first (I'm an Australian, dammit) */
178 if (offset + sizeof(*rec) + rec->rec_len + sizeof(*rec) <= tdb->map_size) {
179 tdb_off_t right = offset + sizeof(*rec) + rec->rec_len;
182 if (tdb->methods->tdb_read(tdb, right, &r, sizeof(r), DOCONV()) == -1) {
183 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: right read failed at %u\n", right));
187 /* If it's free, expand to include it. */
188 if (r.magic == TDB_FREE_MAGIC) {
189 if (remove_from_freelist(tdb, right, r.next) == -1) {
190 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: right free failed at %u\n", right));
193 rec->rec_len += sizeof(r) + r.rec_len;
194 if (update_tailer(tdb, offset, rec) == -1) {
195 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_tailer failed at %u\n", offset));
203 if (read_record_on_left(tdb, offset, &left, &l) != 0) {
207 if (l.magic != TDB_FREE_MAGIC) {
211 /* It's free - expand to include it. */
213 /* we now merge the new record into the left record, rather than the other
214 way around. This makes the operation O(1) instead of O(n). This change
215 prevents traverse from being O(n^2) after a lot of deletes */
216 l.rec_len += sizeof(*rec) + rec->rec_len;
217 if (tdb_rec_write(tdb, left, &l) == -1) {
218 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_left failed at %u\n", left));
221 if (update_tailer(tdb, left, &l) == -1) {
222 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_tailer failed at %u\n", left));
225 tdb_unlock(tdb, -1, F_WRLCK);
230 /* Now, prepend to free list */
231 rec->magic = TDB_FREE_MAGIC;
233 if (tdb_ofs_read(tdb, FREELIST_TOP, &rec->next) == -1 ||
234 tdb_rec_write(tdb, offset, rec) == -1 ||
235 tdb_ofs_write(tdb, FREELIST_TOP, &offset) == -1) {
236 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free record write failed at offset=%u\n", offset));
240 /* And we're done. */
241 tdb_unlock(tdb, -1, F_WRLCK);
245 tdb_unlock(tdb, -1, F_WRLCK);
252 the core of tdb_allocate - called when we have decided which
253 free list entry to use
255 Note that we try to allocate by grabbing data from the end of an existing record,
256 not the beginning. This is so the left merge in a free is more likely to be
257 able to free up the record without fragmentation
259 static tdb_off_t tdb_allocate_ofs(struct tdb_context *tdb,
260 tdb_len_t length, tdb_off_t rec_ptr,
261 struct tdb_record *rec, tdb_off_t last_ptr)
263 #define MIN_REC_SIZE (sizeof(struct tdb_record) + sizeof(tdb_off_t) + 8)
265 if (rec->rec_len < length + MIN_REC_SIZE) {
266 /* we have to grab the whole record */
268 /* unlink it from the previous record */
269 if (tdb_ofs_write(tdb, last_ptr, &rec->next) == -1) {
273 /* mark it not free */
274 rec->magic = TDB_MAGIC;
275 if (tdb_rec_write(tdb, rec_ptr, rec) == -1) {
281 /* we're going to just shorten the existing record */
282 rec->rec_len -= (length + sizeof(*rec));
283 if (tdb_rec_write(tdb, rec_ptr, rec) == -1) {
286 if (update_tailer(tdb, rec_ptr, rec) == -1) {
290 /* and setup the new record */
291 rec_ptr += sizeof(*rec) + rec->rec_len;
293 memset(rec, '\0', sizeof(*rec));
294 rec->rec_len = length;
295 rec->magic = TDB_MAGIC;
297 if (tdb_rec_write(tdb, rec_ptr, rec) == -1) {
301 if (update_tailer(tdb, rec_ptr, rec) == -1) {
308 /* allocate some space from the free list. The offset returned points
309 to a unconnected tdb_record within the database with room for at
310 least length bytes of total data
312 0 is returned if the space could not be allocated
314 static tdb_off_t tdb_allocate_from_freelist(
315 struct tdb_context *tdb, tdb_len_t length, struct tdb_record *rec)
317 tdb_off_t rec_ptr, last_ptr, newrec_ptr;
319 tdb_off_t rec_ptr, last_ptr;
322 float multiplier = 1.0;
324 /* over-allocate to reduce fragmentation */
327 /* Extra bytes required for tailer */
328 length += sizeof(tdb_off_t);
329 length = TDB_ALIGN(length, TDB_ALIGNMENT);
332 last_ptr = FREELIST_TOP;
334 /* read in the freelist top */
335 if (tdb_ofs_read(tdb, FREELIST_TOP, &rec_ptr) == -1)
339 bestfit.last_ptr = 0;
343 this is a best fit allocation strategy. Originally we used
344 a first fit strategy, but it suffered from massive fragmentation
345 issues when faced with a slowly increasing record size.
348 if (tdb_rec_free_read(tdb, rec_ptr, rec) == -1) {
352 if (rec->rec_len >= length) {
353 if (bestfit.rec_ptr == 0 ||
354 rec->rec_len < bestfit.rec_len) {
355 bestfit.rec_len = rec->rec_len;
356 bestfit.rec_ptr = rec_ptr;
357 bestfit.last_ptr = last_ptr;
361 /* move to the next record */
365 /* if we've found a record that is big enough, then
366 stop searching if its also not too big. The
367 definition of 'too big' changes as we scan
369 if (bestfit.rec_len > 0 &&
370 bestfit.rec_len < length * multiplier) {
374 /* this multiplier means we only extremely rarely
375 search more than 50 or so records. At 50 records we
376 accept records up to 11 times larger than what we
381 if (bestfit.rec_ptr != 0) {
382 if (tdb_rec_free_read(tdb, bestfit.rec_ptr, rec) == -1) {
386 newrec_ptr = tdb_allocate_ofs(tdb, length, bestfit.rec_ptr,
387 rec, bestfit.last_ptr);
391 /* we didn't find enough space. See if we can expand the
392 database and if we can then try again */
393 if (tdb_expand(tdb, length + sizeof(*rec)) == 0)
399 static bool tdb_alloc_dead(
400 struct tdb_context *tdb, int hash, tdb_len_t length,
401 tdb_off_t *rec_ptr, struct tdb_record *rec)
405 *rec_ptr = tdb_find_dead(tdb, hash, rec, length, &last_ptr);
410 * Unlink the record from the hash chain, it's about to be moved into
413 return (tdb_ofs_write(tdb, last_ptr, &rec->next) == 0);
417 * Chain "hash" is assumed to be locked
420 tdb_off_t tdb_allocate(struct tdb_context *tdb, int hash, tdb_len_t length,
421 struct tdb_record *rec)
426 if (tdb->max_dead_records == 0) {
428 * No dead records to expect anywhere. Do the blocking
429 * freelist lock without trying to steal from others
431 goto blocking_freelist_allocate;
435 * The following loop tries to get the freelist lock nonblocking. If
436 * it gets the lock, allocate from there. If the freelist is busy,
437 * instead of waiting we try to steal dead records from other hash
440 * Be aware that we do nonblocking locks on the other hash chains as
441 * well and fail gracefully. This way we avoid deadlocks (we block two
442 * hash chains, something which is pretty bad normally)
445 for (i=0; i<tdb->hash_size; i++) {
449 list = BUCKET(hash+i);
451 if (tdb_lock_nonblock(tdb, list, F_WRLCK) == 0) {
454 got_dead = tdb_alloc_dead(tdb, list, length, &ret, rec);
455 tdb_unlock(tdb, list, F_WRLCK);
462 if (tdb_lock_nonblock(tdb, -1, F_WRLCK) == 0) {
464 * Under the freelist lock take the chance to give
465 * back our dead records.
467 tdb_purge_dead(tdb, hash);
469 ret = tdb_allocate_from_freelist(tdb, length, rec);
470 tdb_unlock(tdb, -1, F_WRLCK);
475 blocking_freelist_allocate:
477 if (tdb_lock(tdb, -1, F_WRLCK) == -1) {
480 ret = tdb_allocate_from_freelist(tdb, length, rec);
481 tdb_unlock(tdb, -1, F_WRLCK);
486 return the size of the freelist - used to decide if we should repack
488 _PUBLIC_ int tdb_freelist_size(struct tdb_context *tdb)
493 if (tdb_lock(tdb, -1, F_RDLCK) == -1) {
498 while (tdb_ofs_read(tdb, ptr, &ptr) == 0 && ptr != 0) {
502 tdb_unlock(tdb, -1, F_RDLCK);