2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Andrew Tridgell 1999-2005
7 Copyright (C) Paul `Rusty' Russell 2000
8 Copyright (C) Jeremy Allison 2000-2003
10 ** NOTE! The following LGPL license applies to the tdb
11 ** library. This does NOT imply that all of Samba is released
14 This library is free software; you can redistribute it and/or
15 modify it under the terms of the GNU Lesser General Public
16 License as published by the Free Software Foundation; either
17 version 3 of the License, or (at your option) any later version.
19 This library is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 Lesser General Public License for more details.
24 You should have received a copy of the GNU Lesser General Public
25 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "tdb_private.h"
30 /* 'right' merges can involve O(n^2) cost when combined with a
31 traverse, so they are disabled until we find a way to do them in
34 #define USE_RIGHT_MERGES 0
36 /* read a freelist record and check for simple errors */
37 int tdb_rec_free_read(struct tdb_context *tdb, tdb_off_t off, struct tdb_record *rec)
39 if (tdb->methods->tdb_read(tdb, off, rec, sizeof(*rec),DOCONV()) == -1)
42 if (rec->magic == TDB_MAGIC) {
43 /* this happens when a app is showdown while deleting a record - we should
44 not completely fail when this happens */
45 TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_rec_free_read non-free magic 0x%x at offset=%u - fixing\n",
47 rec->magic = TDB_FREE_MAGIC;
48 if (tdb_rec_write(tdb, off, rec) == -1)
52 if (rec->magic != TDB_FREE_MAGIC) {
53 /* Ensure ecode is set for log fn. */
54 tdb->ecode = TDB_ERR_CORRUPT;
55 TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_rec_free_read bad magic 0x%x at offset=%u\n",
59 if (tdb->methods->tdb_oob(tdb, rec->next, sizeof(*rec), 0) != 0)
66 /* Remove an element from the freelist. Must have alloc lock. */
67 static int remove_from_freelist(struct tdb_context *tdb, tdb_off_t off, tdb_off_t next)
69 tdb_off_t last_ptr, i;
71 /* read in the freelist top */
72 last_ptr = FREELIST_TOP;
73 while (tdb_ofs_read(tdb, last_ptr, &i) != -1 && i != 0) {
76 return tdb_ofs_write(tdb, last_ptr, &next);
78 /* Follow chain (next offset is at start of record) */
81 tdb->ecode = TDB_ERR_CORRUPT;
82 TDB_LOG((tdb, TDB_DEBUG_FATAL,"remove_from_freelist: not on list at off=%u\n", off));
88 /* update a record tailer (must hold allocation lock) */
89 static int update_tailer(struct tdb_context *tdb, tdb_off_t offset,
90 const struct tdb_record *rec)
94 /* Offset of tailer from record header */
95 totalsize = sizeof(*rec) + rec->rec_len;
96 return tdb_ofs_write(tdb, offset + totalsize - sizeof(tdb_off_t),
101 * Read the record directly on the left.
102 * Fail if there is no record on the left.
104 static int read_record_on_left(struct tdb_context *tdb, tdb_off_t rec_ptr,
106 struct tdb_record *left_r)
110 struct tdb_record left_rec;
113 left_ptr = rec_ptr - sizeof(tdb_off_t);
115 if (left_ptr <= TDB_DATA_START(tdb->hash_size)) {
116 /* no record on the left */
120 /* Read in tailer and jump back to header */
121 ret = tdb_ofs_read(tdb, left_ptr, &left_size);
123 TDB_LOG((tdb, TDB_DEBUG_FATAL,
124 "tdb_free: left offset read failed at %u\n", left_ptr));
128 /* it could be uninitialised data */
129 if (left_size == 0 || left_size == TDB_PAD_U32) {
133 if (left_size > rec_ptr) {
137 left_ptr = rec_ptr - left_size;
139 if (left_ptr < TDB_DATA_START(tdb->hash_size)) {
143 /* Now read in the left record */
144 ret = tdb->methods->tdb_read(tdb, left_ptr, &left_rec,
145 sizeof(left_rec), DOCONV());
147 TDB_LOG((tdb, TDB_DEBUG_FATAL,
148 "tdb_free: left read failed at %u (%u)\n",
149 left_ptr, left_size));
159 /* Add an element into the freelist. Merge adjacent records if
161 int tdb_free(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec)
166 /* Allocation and tailer lock */
167 if (tdb_lock(tdb, -1, F_WRLCK) != 0)
170 /* set an initial tailer, so if we fail we don't leave a bogus record */
171 if (update_tailer(tdb, offset, rec) != 0) {
172 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_tailer failed!\n"));
177 /* Look right first (I'm an Australian, dammit) */
178 if (offset + sizeof(*rec) + rec->rec_len + sizeof(*rec) <= tdb->map_size) {
179 tdb_off_t right = offset + sizeof(*rec) + rec->rec_len;
182 if (tdb->methods->tdb_read(tdb, right, &r, sizeof(r), DOCONV()) == -1) {
183 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: right read failed at %u\n", right));
187 /* If it's free, expand to include it. */
188 if (r.magic == TDB_FREE_MAGIC) {
189 if (remove_from_freelist(tdb, right, r.next) == -1) {
190 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: right free failed at %u\n", right));
193 rec->rec_len += sizeof(r) + r.rec_len;
194 if (update_tailer(tdb, offset, rec) == -1) {
195 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_tailer failed at %u\n", offset));
203 if (read_record_on_left(tdb, offset, &left, &l) == 0) {
204 /* If it's free, expand to include it. */
205 if (l.magic == TDB_FREE_MAGIC) {
206 /* we now merge the new record into the left record, rather than the other
207 way around. This makes the operation O(1) instead of O(n). This change
208 prevents traverse from being O(n^2) after a lot of deletes */
209 l.rec_len += sizeof(*rec) + rec->rec_len;
210 if (tdb_rec_write(tdb, left, &l) == -1) {
211 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_left failed at %u\n", left));
214 if (update_tailer(tdb, left, &l) == -1) {
215 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_tailer failed at %u\n", offset));
218 tdb_unlock(tdb, -1, F_WRLCK);
225 /* Now, prepend to free list */
226 rec->magic = TDB_FREE_MAGIC;
228 if (tdb_ofs_read(tdb, FREELIST_TOP, &rec->next) == -1 ||
229 tdb_rec_write(tdb, offset, rec) == -1 ||
230 tdb_ofs_write(tdb, FREELIST_TOP, &offset) == -1) {
231 TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free record write failed at offset=%u\n", offset));
235 /* And we're done. */
236 tdb_unlock(tdb, -1, F_WRLCK);
240 tdb_unlock(tdb, -1, F_WRLCK);
247 the core of tdb_allocate - called when we have decided which
248 free list entry to use
250 Note that we try to allocate by grabbing data from the end of an existing record,
251 not the beginning. This is so the left merge in a free is more likely to be
252 able to free up the record without fragmentation
254 static tdb_off_t tdb_allocate_ofs(struct tdb_context *tdb,
255 tdb_len_t length, tdb_off_t rec_ptr,
256 struct tdb_record *rec, tdb_off_t last_ptr)
258 #define MIN_REC_SIZE (sizeof(struct tdb_record) + sizeof(tdb_off_t) + 8)
260 if (rec->rec_len < length + MIN_REC_SIZE) {
261 /* we have to grab the whole record */
263 /* unlink it from the previous record */
264 if (tdb_ofs_write(tdb, last_ptr, &rec->next) == -1) {
268 /* mark it not free */
269 rec->magic = TDB_MAGIC;
270 if (tdb_rec_write(tdb, rec_ptr, rec) == -1) {
276 /* we're going to just shorten the existing record */
277 rec->rec_len -= (length + sizeof(*rec));
278 if (tdb_rec_write(tdb, rec_ptr, rec) == -1) {
281 if (update_tailer(tdb, rec_ptr, rec) == -1) {
285 /* and setup the new record */
286 rec_ptr += sizeof(*rec) + rec->rec_len;
288 memset(rec, '\0', sizeof(*rec));
289 rec->rec_len = length;
290 rec->magic = TDB_MAGIC;
292 if (tdb_rec_write(tdb, rec_ptr, rec) == -1) {
296 if (update_tailer(tdb, rec_ptr, rec) == -1) {
303 /* allocate some space from the free list. The offset returned points
304 to a unconnected tdb_record within the database with room for at
305 least length bytes of total data
307 0 is returned if the space could not be allocated
309 static tdb_off_t tdb_allocate_from_freelist(
310 struct tdb_context *tdb, tdb_len_t length, struct tdb_record *rec)
312 tdb_off_t rec_ptr, last_ptr, newrec_ptr;
314 tdb_off_t rec_ptr, last_ptr;
317 float multiplier = 1.0;
319 /* over-allocate to reduce fragmentation */
322 /* Extra bytes required for tailer */
323 length += sizeof(tdb_off_t);
324 length = TDB_ALIGN(length, TDB_ALIGNMENT);
327 last_ptr = FREELIST_TOP;
329 /* read in the freelist top */
330 if (tdb_ofs_read(tdb, FREELIST_TOP, &rec_ptr) == -1)
334 bestfit.last_ptr = 0;
338 this is a best fit allocation strategy. Originally we used
339 a first fit strategy, but it suffered from massive fragmentation
340 issues when faced with a slowly increasing record size.
343 if (tdb_rec_free_read(tdb, rec_ptr, rec) == -1) {
347 if (rec->rec_len >= length) {
348 if (bestfit.rec_ptr == 0 ||
349 rec->rec_len < bestfit.rec_len) {
350 bestfit.rec_len = rec->rec_len;
351 bestfit.rec_ptr = rec_ptr;
352 bestfit.last_ptr = last_ptr;
356 /* move to the next record */
360 /* if we've found a record that is big enough, then
361 stop searching if its also not too big. The
362 definition of 'too big' changes as we scan
364 if (bestfit.rec_len > 0 &&
365 bestfit.rec_len < length * multiplier) {
369 /* this multiplier means we only extremely rarely
370 search more than 50 or so records. At 50 records we
371 accept records up to 11 times larger than what we
376 if (bestfit.rec_ptr != 0) {
377 if (tdb_rec_free_read(tdb, bestfit.rec_ptr, rec) == -1) {
381 newrec_ptr = tdb_allocate_ofs(tdb, length, bestfit.rec_ptr,
382 rec, bestfit.last_ptr);
386 /* we didn't find enough space. See if we can expand the
387 database and if we can then try again */
388 if (tdb_expand(tdb, length + sizeof(*rec)) == 0)
394 static bool tdb_alloc_dead(
395 struct tdb_context *tdb, int hash, tdb_len_t length,
396 tdb_off_t *rec_ptr, struct tdb_record *rec)
400 *rec_ptr = tdb_find_dead(tdb, hash, rec, length, &last_ptr);
405 * Unlink the record from the hash chain, it's about to be moved into
408 return (tdb_ofs_write(tdb, last_ptr, &rec->next) == 0);
412 * Chain "hash" is assumed to be locked
415 tdb_off_t tdb_allocate(struct tdb_context *tdb, int hash, tdb_len_t length,
416 struct tdb_record *rec)
421 if (tdb->max_dead_records == 0) {
423 * No dead records to expect anywhere. Do the blocking
424 * freelist lock without trying to steal from others
426 goto blocking_freelist_allocate;
430 * The following loop tries to get the freelist lock nonblocking. If
431 * it gets the lock, allocate from there. If the freelist is busy,
432 * instead of waiting we try to steal dead records from other hash
435 * Be aware that we do nonblocking locks on the other hash chains as
436 * well and fail gracefully. This way we avoid deadlocks (we block two
437 * hash chains, something which is pretty bad normally)
440 for (i=0; i<tdb->hash_size; i++) {
444 list = BUCKET(hash+i);
446 if (tdb_lock_nonblock(tdb, list, F_WRLCK) == 0) {
449 got_dead = tdb_alloc_dead(tdb, list, length, &ret, rec);
450 tdb_unlock(tdb, list, F_WRLCK);
457 if (tdb_lock_nonblock(tdb, -1, F_WRLCK) == 0) {
459 * Under the freelist lock take the chance to give
460 * back our dead records.
462 tdb_purge_dead(tdb, hash);
464 ret = tdb_allocate_from_freelist(tdb, length, rec);
465 tdb_unlock(tdb, -1, F_WRLCK);
470 blocking_freelist_allocate:
472 if (tdb_lock(tdb, -1, F_WRLCK) == -1) {
475 ret = tdb_allocate_from_freelist(tdb, length, rec);
476 tdb_unlock(tdb, -1, F_WRLCK);
481 return the size of the freelist - used to decide if we should repack
483 _PUBLIC_ int tdb_freelist_size(struct tdb_context *tdb)
488 if (tdb_lock(tdb, -1, F_RDLCK) == -1) {
493 while (tdb_ofs_read(tdb, ptr, &ptr) == 0 && ptr != 0) {
497 tdb_unlock(tdb, -1, F_RDLCK);