0f1de19245d039e686996f1aeaff0f32b0572440
[samba.git] / ctdb / server / ctdb_vacuum.c
1 /*
2    ctdb vacuuming events
3
4    Copyright (C) Ronnie Sahlberg  2009
5    Copyright (C) Michael Adam 2010-2013
6    Copyright (C) Stefan Metzmacher 2010-2011
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include "includes.h"
23 #include "tdb.h"
24 #include "system/network.h"
25 #include "system/filesys.h"
26 #include "system/dir.h"
27 #include "../include/ctdb_private.h"
28 #include "lib/tdb_wrap/tdb_wrap.h"
29 #include "lib/util/dlinklist.h"
30 #include "../include/ctdb_private.h"
31 #include "../common/rb_tree.h"
32
33 #define TIMELIMIT() timeval_current_ofs(10, 0)
34
35 enum vacuum_child_status { VACUUM_RUNNING, VACUUM_OK, VACUUM_ERROR, VACUUM_TIMEOUT};
36
37 struct ctdb_vacuum_child_context {
38         struct ctdb_vacuum_child_context *next, *prev;
39         struct ctdb_vacuum_handle *vacuum_handle;
40         /* fd child writes status to */
41         int fd[2];
42         pid_t child_pid;
43         enum vacuum_child_status status;
44         struct timeval start_time;
45 };
46
47 struct ctdb_vacuum_handle {
48         struct ctdb_db_context *ctdb_db;
49         struct ctdb_vacuum_child_context *child_ctx;
50         uint32_t fast_path_count;
51 };
52
53
54 /*  a list of records to possibly delete */
55 struct vacuum_data {
56         struct ctdb_context *ctdb;
57         struct ctdb_db_context *ctdb_db;
58         struct tdb_context *dest_db;
59         trbt_tree_t *delete_list;
60         struct ctdb_marshall_buffer **vacuum_fetch_list;
61         struct timeval start;
62         bool traverse_error;
63         bool vacuum;
64         struct {
65                 struct {
66                         uint32_t added_to_vacuum_fetch_list;
67                         uint32_t added_to_delete_list;
68                         uint32_t deleted;
69                         uint32_t skipped;
70                         uint32_t error;
71                         uint32_t total;
72                 } delete_queue;
73                 struct {
74                         uint32_t scheduled;
75                         uint32_t skipped;
76                         uint32_t error;
77                         uint32_t total;
78                 } db_traverse;
79                 struct {
80                         uint32_t total;
81                         uint32_t remote_error;
82                         uint32_t local_error;
83                         uint32_t deleted;
84                         uint32_t skipped;
85                         uint32_t left;
86                 } delete_list;
87                 struct {
88                         uint32_t vacuumed;
89                         uint32_t copied;
90                 } repack;
91         } count;
92 };
93
94 /* this structure contains the information for one record to be deleted */
95 struct delete_record_data {
96         struct ctdb_context *ctdb;
97         struct ctdb_db_context *ctdb_db;
98         struct ctdb_ltdb_header hdr;
99         TDB_DATA key;
100         uint8_t keydata[1];
101 };
102
103 struct delete_records_list {
104         struct ctdb_marshall_buffer *records;
105         struct vacuum_data *vdata;
106 };
107
108 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
109                                            const struct ctdb_ltdb_header *hdr,
110                                            TDB_DATA key);
111
112 /**
113  * Store key and header in a tree, indexed by the key hash.
114  */
115 static int insert_delete_record_data_into_tree(struct ctdb_context *ctdb,
116                                                struct ctdb_db_context *ctdb_db,
117                                                trbt_tree_t *tree,
118                                                const struct ctdb_ltdb_header *hdr,
119                                                TDB_DATA key)
120 {
121         struct delete_record_data *dd;
122         uint32_t hash;
123         size_t len;
124
125         len = offsetof(struct delete_record_data, keydata) + key.dsize;
126
127         dd = (struct delete_record_data *)talloc_size(tree, len);
128         if (dd == NULL) {
129                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
130                 return -1;
131         }
132         talloc_set_name_const(dd, "struct delete_record_data");
133
134         dd->ctdb      = ctdb;
135         dd->ctdb_db   = ctdb_db;
136         dd->key.dsize = key.dsize;
137         dd->key.dptr  = dd->keydata;
138         memcpy(dd->keydata, key.dptr, key.dsize);
139
140         dd->hdr = *hdr;
141
142         hash = ctdb_hash(&key);
143
144         trbt_insert32(tree, hash, dd);
145
146         return 0;
147 }
148
149 static int add_record_to_delete_list(struct vacuum_data *vdata, TDB_DATA key,
150                                      struct ctdb_ltdb_header *hdr)
151 {
152         struct ctdb_context *ctdb = vdata->ctdb;
153         struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
154         uint32_t hash;
155         int ret;
156
157         hash = ctdb_hash(&key);
158
159         if (trbt_lookup32(vdata->delete_list, hash)) {
160                 DEBUG(DEBUG_INFO, (__location__ " Hash collision when vacuuming, skipping this record.\n"));
161                 return 0;
162         }
163
164         ret = insert_delete_record_data_into_tree(ctdb, ctdb_db,
165                                                   vdata->delete_list,
166                                                   hdr, key);
167         if (ret != 0) {
168                 return -1;
169         }
170
171         vdata->count.delete_list.total++;
172
173         return 0;
174 }
175
176 /**
177  * Add a record to the list of records to be sent
178  * to their lmaster with VACUUM_FETCH.
179  */
180 static int add_record_to_vacuum_fetch_list(struct vacuum_data *vdata,
181                                            TDB_DATA key)
182 {
183         struct ctdb_context *ctdb = vdata->ctdb;
184         uint32_t lmaster;
185         struct ctdb_marshall_buffer *vfl;
186
187         lmaster = ctdb_lmaster(ctdb, &key);
188
189         vfl = vdata->vacuum_fetch_list[lmaster];
190
191         vfl = ctdb_marshall_add(ctdb, vfl, vfl->db_id, ctdb->pnn,
192                                 key, NULL, tdb_null);
193         if (vfl == NULL) {
194                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
195                 vdata->traverse_error = true;
196                 return -1;
197         }
198
199         vdata->vacuum_fetch_list[lmaster] = vfl;
200
201         return 0;
202 }
203
204
205 static void ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
206                               struct timeval t, void *private_data);
207
208 static int vacuum_record_parser(TDB_DATA key, TDB_DATA data, void *private_data)
209 {
210         struct ctdb_ltdb_header *header =
211                 (struct ctdb_ltdb_header *)private_data;
212
213         if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
214                 return -1;
215         }
216
217         *header = *(struct ctdb_ltdb_header *)data.dptr;
218
219         return 0;
220 }
221
222 /*
223  * traverse function for gathering the records that can be deleted
224  */
225 static int vacuum_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
226                            void *private_data)
227 {
228         struct vacuum_data *vdata = talloc_get_type(private_data,
229                                                     struct vacuum_data);
230         struct ctdb_context *ctdb = vdata->ctdb;
231         struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
232         uint32_t lmaster;
233         struct ctdb_ltdb_header *hdr;
234         int res = 0;
235
236         vdata->count.db_traverse.total++;
237
238         lmaster = ctdb_lmaster(ctdb, &key);
239         if (lmaster >= ctdb->num_nodes) {
240                 vdata->count.db_traverse.error++;
241                 DEBUG(DEBUG_CRIT, (__location__
242                                    " lmaster[%u] >= ctdb->num_nodes[%u] for key"
243                                    " with hash[%u]!\n",
244                                    (unsigned)lmaster,
245                                    (unsigned)ctdb->num_nodes,
246                                    (unsigned)ctdb_hash(&key)));
247                 return -1;
248         }
249
250         if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
251                 /* it is not a deleted record */
252                 vdata->count.db_traverse.skipped++;
253                 return 0;
254         }
255
256         hdr = (struct ctdb_ltdb_header *)data.dptr;
257
258         if (hdr->dmaster != ctdb->pnn) {
259                 vdata->count.db_traverse.skipped++;
260                 return 0;
261         }
262
263         /*
264          * Add the record to this process's delete_queue for processing
265          * in the subsequent traverse in the fast vacuum run.
266          */
267         res = insert_record_into_delete_queue(ctdb_db, hdr, key);
268         if (res != 0) {
269                 vdata->count.db_traverse.error++;
270         } else {
271                 vdata->count.db_traverse.scheduled++;
272         }
273
274         return 0;
275 }
276
277 /*
278  * traverse the tree of records to delete and marshall them into
279  * a blob
280  */
281 static int delete_marshall_traverse(void *param, void *data)
282 {
283         struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
284         struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
285         struct ctdb_marshall_buffer *m;
286
287         m = ctdb_marshall_add(recs, recs->records, recs->records->db_id,
288                               recs->records->db_id,
289                               dd->key, &dd->hdr, tdb_null);
290         if (m == NULL) {
291                 DEBUG(DEBUG_ERR, (__location__ " failed to marshall record\n"));
292                 return -1;
293         }
294
295         recs->records = m;
296         return 0;
297 }
298
299 /**
300  * Variant of delete_marshall_traverse() that bumps the
301  * RSN of each traversed record in the database.
302  *
303  * This is needed to ensure that when rolling out our
304  * empty record copy before remote deletion, we as the
305  * record's dmaster keep a higher RSN than the non-dmaster
306  * nodes. This is needed to prevent old copies from
307  * resurrection in recoveries.
308  */
309 static int delete_marshall_traverse_first(void *param, void *data)
310 {
311         struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
312         struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
313         struct ctdb_db_context *ctdb_db = dd->ctdb_db;
314         struct ctdb_context *ctdb = ctdb_db->ctdb;
315         struct ctdb_ltdb_header header;
316         uint32_t lmaster;
317         uint32_t hash = ctdb_hash(&(dd->key));
318         int res;
319
320         res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
321         if (res != 0) {
322                 DEBUG(DEBUG_ERR,
323                       (__location__ " Error getting chainlock on record with "
324                        "key hash [0x%08x] on database db[%s].\n",
325                        hash, ctdb_db->db_name));
326                 recs->vdata->count.delete_list.skipped++;
327                 recs->vdata->count.delete_list.left--;
328                 talloc_free(dd);
329                 return 0;
330         }
331
332         /*
333          * Verify that the record is still empty, its RSN has not
334          * changed and that we are still its lmaster and dmaster.
335          */
336
337         res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
338                                vacuum_record_parser, &header);
339         if (res != 0) {
340                 goto skip;
341         }
342
343         if (header.flags & CTDB_REC_RO_FLAGS) {
344                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
345                                    "on database db[%s] has read-only flags. "
346                                    "skipping.\n",
347                                    hash, ctdb_db->db_name));
348                 goto skip;
349         }
350
351         if (header.dmaster != ctdb->pnn) {
352                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
353                                    "on database db[%s] has been migrated away. "
354                                    "skipping.\n",
355                                    hash, ctdb_db->db_name));
356                 goto skip;
357         }
358
359         if (header.rsn != dd->hdr.rsn) {
360                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
361                                    "on database db[%s] seems to have been "
362                                    "migrated away and back again (with empty "
363                                    "data). skipping.\n",
364                                    hash, ctdb_db->db_name));
365                 goto skip;
366         }
367
368         lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
369
370         if (lmaster != ctdb->pnn) {
371                 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
372                                    "delete list (key hash [0x%08x], db[%s]). "
373                                    "Strange! skipping.\n",
374                                    hash, ctdb_db->db_name));
375                 goto skip;
376         }
377
378         /*
379          * Increment the record's RSN to ensure the dmaster (i.e. the current
380          * node) has the highest RSN of the record in the cluster.
381          * This is to prevent old record copies from resurrecting in recoveries
382          * if something should fail during the deletion process.
383          * Note that ctdb_ltdb_store_server() increments the RSN if called
384          * on the record's dmaster.
385          */
386
387         res = ctdb_ltdb_store(ctdb_db, dd->key, &header, tdb_null);
388         if (res != 0) {
389                 DEBUG(DEBUG_ERR, (__location__ ": Failed to store record with "
390                                   "key hash [0x%08x] on database db[%s].\n",
391                                   hash, ctdb_db->db_name));
392                 goto skip;
393         }
394
395         tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
396
397         goto done;
398
399 skip:
400         tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
401
402         recs->vdata->count.delete_list.skipped++;
403         recs->vdata->count.delete_list.left--;
404         talloc_free(dd);
405         dd = NULL;
406
407 done:
408         if (dd == NULL) {
409                 return 0;
410         }
411
412         return delete_marshall_traverse(param, data);
413 }
414
415 /**
416  * traverse function for the traversal of the delete_queue,
417  * the fast-path vacuuming list.
418  *
419  *  - If the record has been migrated off the node
420  *    or has been revived (filled with data) on the node,
421  *    then skip the record.
422  *
423  *  - If the current node is the record's lmaster and it is
424  *    a record that has never been migrated with data, then
425  *    delete the record from the local tdb.
426  *
427  *  - If the current node is the record's lmaster and it has
428  *    been migrated with data, then schedule it for the normal
429  *    vacuuming procedure (i.e. add it to the delete_list).
430  *
431  *  - If the current node is NOT the record's lmaster then
432  *    add it to the list of records that are to be sent to
433  *    the lmaster with the VACUUM_FETCH message.
434  */
435 static int delete_queue_traverse(void *param, void *data)
436 {
437         struct delete_record_data *dd =
438                 talloc_get_type(data, struct delete_record_data);
439         struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
440         struct ctdb_db_context *ctdb_db = dd->ctdb_db;
441         struct ctdb_context *ctdb = ctdb_db->ctdb; /* or dd->ctdb ??? */
442         int res;
443         struct ctdb_ltdb_header header;
444         uint32_t lmaster;
445         uint32_t hash = ctdb_hash(&(dd->key));
446
447         vdata->count.delete_queue.total++;
448
449         res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
450         if (res != 0) {
451                 DEBUG(DEBUG_ERR,
452                       (__location__ " Error getting chainlock on record with "
453                        "key hash [0x%08x] on database db[%s].\n",
454                        hash, ctdb_db->db_name));
455                 vdata->count.delete_queue.error++;
456                 return 0;
457         }
458
459         res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
460                                vacuum_record_parser, &header);
461         if (res != 0) {
462                 goto skipped;
463         }
464
465         if (header.dmaster != ctdb->pnn) {
466                 /* The record has been migrated off the node. Skip. */
467                 goto skipped;
468         }
469
470         if (header.rsn != dd->hdr.rsn) {
471                 /*
472                  * The record has been migrated off the node and back again.
473                  * But not requeued for deletion. Skip it.
474                  */
475                 goto skipped;
476         }
477
478         /*
479          * We are dmaster, and the record has no data, and it has
480          * not been migrated after it has been queued for deletion.
481          *
482          * At this stage, the record could still have been revived locally
483          * and last been written with empty data. This can only be
484          * fixed with the addition of an active or delete flag. (TODO)
485          */
486
487         lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
488
489         if (lmaster != ctdb->pnn) {
490                 res = add_record_to_vacuum_fetch_list(vdata, dd->key);
491
492                 if (res != 0) {
493                         DEBUG(DEBUG_ERR,
494                               (__location__ " Error adding record to list "
495                                "of records to send to lmaster.\n"));
496                         vdata->count.delete_queue.error++;
497                 } else {
498                         vdata->count.delete_queue.added_to_vacuum_fetch_list++;
499                 }
500                 goto done;
501         }
502
503         /* use header->flags or dd->hdr.flags ?? */
504         if (dd->hdr.flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA) {
505                 res = add_record_to_delete_list(vdata, dd->key, &dd->hdr);
506
507                 if (res != 0) {
508                         DEBUG(DEBUG_ERR,
509                               (__location__ " Error adding record to list "
510                                "of records for deletion on lmaster.\n"));
511                         vdata->count.delete_queue.error++;
512                 } else {
513                         vdata->count.delete_queue.added_to_delete_list++;
514                 }
515         } else {
516                 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
517
518                 if (res != 0) {
519                         DEBUG(DEBUG_ERR,
520                               (__location__ " Error deleting record with key "
521                                "hash [0x%08x] from local data base db[%s].\n",
522                                hash, ctdb_db->db_name));
523                         vdata->count.delete_queue.error++;
524                         goto done;
525                 }
526
527                 DEBUG(DEBUG_DEBUG,
528                       (__location__ " Deleted record with key hash "
529                        "[0x%08x] from local data base db[%s].\n",
530                        hash, ctdb_db->db_name));
531                 vdata->count.delete_queue.deleted++;
532         }
533
534         goto done;
535
536 skipped:
537         vdata->count.delete_queue.skipped++;
538
539 done:
540         tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
541
542         return 0;
543 }
544
545 /**
546  * Delete the records that we are lmaster and dmaster for and
547  * that could be deleted on all other nodes via the TRY_DELETE_RECORDS
548  * control.
549  */
550 static int delete_record_traverse(void *param, void *data)
551 {
552         struct delete_record_data *dd =
553                 talloc_get_type(data, struct delete_record_data);
554         struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
555         struct ctdb_db_context *ctdb_db = dd->ctdb_db;
556         struct ctdb_context *ctdb = ctdb_db->ctdb;
557         int res;
558         struct ctdb_ltdb_header header;
559         uint32_t lmaster;
560         uint32_t hash = ctdb_hash(&(dd->key));
561
562         res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
563         if (res != 0) {
564                 DEBUG(DEBUG_ERR,
565                       (__location__ " Error getting chainlock on record with "
566                        "key hash [0x%08x] on database db[%s].\n",
567                        hash, ctdb_db->db_name));
568                 vdata->count.delete_list.local_error++;
569                 vdata->count.delete_list.left--;
570                 talloc_free(dd);
571                 return 0;
572         }
573
574         /*
575          * Verify that the record is still empty, its RSN has not
576          * changed and that we are still its lmaster and dmaster.
577          */
578
579         res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
580                                vacuum_record_parser, &header);
581         if (res != 0) {
582                 goto skip;
583         }
584
585         if (header.flags & CTDB_REC_RO_FLAGS) {
586                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
587                                    "on database db[%s] has read-only flags. "
588                                    "skipping.\n",
589                                    hash, ctdb_db->db_name));
590                 goto skip;
591         }
592
593         if (header.dmaster != ctdb->pnn) {
594                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
595                                    "on database db[%s] has been migrated away. "
596                                    "skipping.\n",
597                                    hash, ctdb_db->db_name));
598                 goto skip;
599         }
600
601         if (header.rsn != dd->hdr.rsn + 1) {
602                 /*
603                  * The record has been migrated off the node and back again.
604                  * But not requeued for deletion. Skip it.
605                  * (Note that the first marshall traverse has bumped the RSN
606                  *  on disk.)
607                  */
608                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
609                                    "on database db[%s] seems to have been "
610                                    "migrated away and back again (with empty "
611                                    "data). skipping.\n",
612                                    hash, ctdb_db->db_name));
613                 goto skip;
614         }
615
616         lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
617
618         if (lmaster != ctdb->pnn) {
619                 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
620                                    "delete list (key hash [0x%08x], db[%s]). "
621                                    "Strange! skipping.\n",
622                                    hash, ctdb_db->db_name));
623                 goto skip;
624         }
625
626         res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
627
628         if (res != 0) {
629                 DEBUG(DEBUG_ERR,
630                       (__location__ " Error deleting record with key hash "
631                        "[0x%08x] from local data base db[%s].\n",
632                        hash, ctdb_db->db_name));
633                 vdata->count.delete_list.local_error++;
634                 goto done;
635         }
636
637         DEBUG(DEBUG_DEBUG,
638               (__location__ " Deleted record with key hash [0x%08x] from "
639                "local data base db[%s].\n", hash, ctdb_db->db_name));
640
641         vdata->count.delete_list.deleted++;
642         goto done;
643
644 skip:
645         vdata->count.delete_list.skipped++;
646
647 done:
648         tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
649
650         talloc_free(dd);
651         vdata->count.delete_list.left--;
652
653         return 0;
654 }
655
656 /**
657  * Traverse the delete_queue.
658  * Records are either deleted directly or filled
659  * into the delete list or the vacuum fetch lists
660  * for further processing.
661  */
662 static void ctdb_process_delete_queue(struct ctdb_db_context *ctdb_db,
663                                       struct vacuum_data *vdata)
664 {
665         uint32_t sum;
666         int ret;
667
668         ret = trbt_traversearray32(ctdb_db->delete_queue, 1,
669                                    delete_queue_traverse, vdata);
670
671         if (ret != 0) {
672                 DEBUG(DEBUG_ERR, (__location__ " Error traversing "
673                       "the delete queue.\n"));
674         }
675
676         sum = vdata->count.delete_queue.deleted
677             + vdata->count.delete_queue.skipped
678             + vdata->count.delete_queue.error
679             + vdata->count.delete_queue.added_to_delete_list
680             + vdata->count.delete_queue.added_to_vacuum_fetch_list;
681
682         if (vdata->count.delete_queue.total != sum) {
683                 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in fast vacuum "
684                       "counts for db[%s]: total[%u] != sum[%u]\n",
685                       ctdb_db->db_name,
686                       (unsigned)vdata->count.delete_queue.total,
687                       (unsigned)sum));
688         }
689
690         if (vdata->count.delete_queue.total > 0) {
691                 DEBUG(DEBUG_INFO,
692                       (__location__
693                        " fast vacuuming delete_queue traverse statistics: "
694                        "db[%s] "
695                        "total[%u] "
696                        "del[%u] "
697                        "skp[%u] "
698                        "err[%u] "
699                        "adl[%u] "
700                        "avf[%u]\n",
701                        ctdb_db->db_name,
702                        (unsigned)vdata->count.delete_queue.total,
703                        (unsigned)vdata->count.delete_queue.deleted,
704                        (unsigned)vdata->count.delete_queue.skipped,
705                        (unsigned)vdata->count.delete_queue.error,
706                        (unsigned)vdata->count.delete_queue.added_to_delete_list,
707                        (unsigned)vdata->count.delete_queue.added_to_vacuum_fetch_list));
708         }
709
710         return;
711 }
712
713 /**
714  * read-only traverse of the database, looking for records that
715  * might be able to be vacuumed.
716  *
717  * This is not done each time but only every tunable
718  * VacuumFastPathCount times.
719  */
720 static void ctdb_vacuum_traverse_db(struct ctdb_db_context *ctdb_db,
721                                     struct vacuum_data *vdata)
722 {
723         int ret;
724
725         ret = tdb_traverse_read(ctdb_db->ltdb->tdb, vacuum_traverse, vdata);
726         if (ret == -1 || vdata->traverse_error) {
727                 DEBUG(DEBUG_ERR, (__location__ " Traverse error in vacuuming "
728                                   "'%s'\n", ctdb_db->db_name));
729                 return;
730         }
731
732         if (vdata->count.db_traverse.total > 0) {
733                 DEBUG(DEBUG_INFO,
734                       (__location__
735                        " full vacuuming db traverse statistics: "
736                        "db[%s] "
737                        "total[%u] "
738                        "skp[%u] "
739                        "err[%u] "
740                        "sched[%u]\n",
741                        ctdb_db->db_name,
742                        (unsigned)vdata->count.db_traverse.total,
743                        (unsigned)vdata->count.db_traverse.skipped,
744                        (unsigned)vdata->count.db_traverse.error,
745                        (unsigned)vdata->count.db_traverse.scheduled));
746         }
747
748         return;
749 }
750
751 /**
752  * Process the vacuum fetch lists:
753  * For records for which we are not the lmaster, tell the lmaster to
754  * fetch the record.
755  */
756 static void ctdb_process_vacuum_fetch_lists(struct ctdb_db_context *ctdb_db,
757                                             struct vacuum_data *vdata)
758 {
759         int i;
760         struct ctdb_context *ctdb = ctdb_db->ctdb;
761
762         for (i = 0; i < ctdb->num_nodes; i++) {
763                 TDB_DATA data;
764                 struct ctdb_marshall_buffer *vfl = vdata->vacuum_fetch_list[i];
765
766                 if (ctdb->nodes[i]->pnn == ctdb->pnn) {
767                         continue;
768                 }
769
770                 if (vfl->count == 0) {
771                         continue;
772                 }
773
774                 DEBUG(DEBUG_INFO, ("Found %u records for lmaster %u in '%s'\n",
775                                    vfl->count, ctdb->nodes[i]->pnn,
776                                    ctdb_db->db_name));
777
778                 data = ctdb_marshall_finish(vfl);
779                 if (ctdb_client_send_message(ctdb, ctdb->nodes[i]->pnn,
780                                              CTDB_SRVID_VACUUM_FETCH,
781                                              data) != 0)
782                 {
783                         DEBUG(DEBUG_ERR, (__location__ " Failed to send vacuum "
784                                           "fetch message to %u\n",
785                                           ctdb->nodes[i]->pnn));
786                 }
787         }
788
789         return;
790 }
791
792 /**
793  * Process the delete list:
794  *
795  * This is the last step of vacuuming that consistently deletes
796  * those records that have been migrated with data and can hence
797  * not be deleted when leaving a node.
798  *
799  * In this step, the lmaster does the final deletion of those empty
800  * records that it is also dmaster for. It has ususally received
801  * at least some of these records previously from the former dmasters
802  * with the vacuum fetch message.
803  *
804  * This last step is implemented as a 3-phase process to protect from
805  * races leading to data corruption:
806  *
807  *  1) Send the lmaster's copy to all other active nodes with the
808  *     RECEIVE_RECORDS control: The remote nodes store the lmaster's copy.
809  *  2) Send the records that could successfully be stored remotely
810  *     in step #1 to all active nodes with the TRY_DELETE_RECORDS
811  *     control. The remote notes delete their local copy.
812  *  3) The lmaster locally deletes its copies of all records that
813  *     could successfully be deleted remotely in step #2.
814  */
815 static void ctdb_process_delete_list(struct ctdb_db_context *ctdb_db,
816                                      struct vacuum_data *vdata)
817 {
818         int ret, i;
819         struct ctdb_context *ctdb = ctdb_db->ctdb;
820         struct delete_records_list *recs;
821         TDB_DATA indata;
822         struct ctdb_node_map *nodemap;
823         uint32_t *active_nodes;
824         int num_active_nodes;
825         TALLOC_CTX *tmp_ctx;
826         uint32_t sum;
827
828         if (vdata->count.delete_list.total == 0) {
829                 return;
830         }
831
832         tmp_ctx = talloc_new(vdata);
833         if (tmp_ctx == NULL) {
834                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
835                 return;
836         }
837
838         vdata->count.delete_list.left = vdata->count.delete_list.total;
839
840         /*
841          * get the list of currently active nodes
842          */
843
844         ret = ctdb_ctrl_getnodemap(ctdb, TIMELIMIT(),
845                                    CTDB_CURRENT_NODE,
846                                    tmp_ctx,
847                                    &nodemap);
848         if (ret != 0) {
849                 DEBUG(DEBUG_ERR,(__location__ " unable to get node map\n"));
850                 goto done;
851         }
852
853         active_nodes = list_of_active_nodes(ctdb, nodemap,
854                                             nodemap, /* talloc context */
855                                             false /* include self */);
856         /* yuck! ;-) */
857         num_active_nodes = talloc_get_size(active_nodes)/sizeof(*active_nodes);
858
859         /*
860          * Now delete the records all active nodes in a three-phase process:
861          * 1) send all active remote nodes the current empty copy with this
862          *    node as DMASTER
863          * 2) if all nodes could store the new copy,
864          *    tell all the active remote nodes to delete all their copy
865          * 3) if all remote nodes deleted their record copy, delete it locally
866          */
867
868         /*
869          * Step 1:
870          * Send currently empty record copy to all active nodes for storing.
871          */
872
873         recs = talloc_zero(tmp_ctx, struct delete_records_list);
874         if (recs == NULL) {
875                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
876                 goto done;
877         }
878         recs->records = (struct ctdb_marshall_buffer *)
879                 talloc_zero_size(recs,
880                                  offsetof(struct ctdb_marshall_buffer, data));
881         if (recs->records == NULL) {
882                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
883                 goto done;
884         }
885         recs->records->db_id = ctdb_db->db_id;
886         recs->vdata = vdata;
887
888         /*
889          * traverse the tree of all records we want to delete and
890          * create a blob we can send to the other nodes.
891          *
892          * We call delete_marshall_traverse_first() to bump the
893          * records' RSNs in the database, to ensure we (as dmaster)
894          * keep the highest RSN of the records in the cluster.
895          */
896         ret = trbt_traversearray32(vdata->delete_list, 1,
897                                    delete_marshall_traverse_first, recs);
898         if (ret != 0) {
899                 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
900                       "delete list for first marshalling.\n"));
901                 goto done;
902         }
903
904         indata = ctdb_marshall_finish(recs->records);
905
906         for (i = 0; i < num_active_nodes; i++) {
907                 struct ctdb_marshall_buffer *records;
908                 struct ctdb_rec_data *rec;
909                 int32_t res;
910                 TDB_DATA outdata;
911
912                 ret = ctdb_control(ctdb, active_nodes[i], 0,
913                                 CTDB_CONTROL_RECEIVE_RECORDS, 0,
914                                 indata, recs, &outdata, &res,
915                                 NULL, NULL);
916                 if (ret != 0 || res != 0) {
917                         DEBUG(DEBUG_ERR, ("Error storing record copies on "
918                                           "node %u: ret[%d] res[%d]\n",
919                                           active_nodes[i], ret, res));
920                         goto done;
921                 }
922
923                 /*
924                  * outdata contains the list of records coming back
925                  * from the node: These are the records that the
926                  * remote node could not store. We remove these from
927                  * the list to process further.
928                  */
929                 records = (struct ctdb_marshall_buffer *)outdata.dptr;
930                 rec = (struct ctdb_rec_data *)&records->data[0];
931                 while (records->count-- > 1) {
932                         TDB_DATA reckey, recdata;
933                         struct ctdb_ltdb_header *rechdr;
934                         struct delete_record_data *dd;
935
936                         reckey.dptr = &rec->data[0];
937                         reckey.dsize = rec->keylen;
938                         recdata.dptr = &rec->data[reckey.dsize];
939                         recdata.dsize = rec->datalen;
940
941                         if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
942                                 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
943                                 goto done;
944                         }
945                         rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
946                         recdata.dptr += sizeof(*rechdr);
947                         recdata.dsize -= sizeof(*rechdr);
948
949                         dd = (struct delete_record_data *)trbt_lookup32(
950                                         vdata->delete_list,
951                                         ctdb_hash(&reckey));
952                         if (dd != NULL) {
953                                 /*
954                                  * The other node could not store the record
955                                  * copy and it is the first node that failed.
956                                  * So we should remove it from the tree and
957                                  * update statistics.
958                                  */
959                                 talloc_free(dd);
960                                 vdata->count.delete_list.remote_error++;
961                                 vdata->count.delete_list.left--;
962                         } else {
963                                 DEBUG(DEBUG_ERR, (__location__ " Failed to "
964                                       "find record with hash 0x%08x coming "
965                                       "back from RECEIVE_RECORDS "
966                                       "control in delete list.\n",
967                                       ctdb_hash(&reckey)));
968                                 vdata->count.delete_list.local_error++;
969                                 vdata->count.delete_list.left--;
970                         }
971
972                         rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
973                 }
974         }
975
976         if (vdata->count.delete_list.left == 0) {
977                 goto success;
978         }
979
980         /*
981          * Step 2:
982          * Send the remaining records to all active nodes for deletion.
983          *
984          * The lmaster's (i.e. our) copies of these records have been stored
985          * successfully on the other nodes.
986          */
987
988         /*
989          * Create a marshall blob from the remaining list of records to delete.
990          */
991
992         talloc_free(recs->records);
993
994         recs->records = (struct ctdb_marshall_buffer *)
995                 talloc_zero_size(recs,
996                                  offsetof(struct ctdb_marshall_buffer, data));
997         if (recs->records == NULL) {
998                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
999                 goto done;
1000         }
1001         recs->records->db_id = ctdb_db->db_id;
1002
1003         ret = trbt_traversearray32(vdata->delete_list, 1,
1004                                    delete_marshall_traverse, recs);
1005         if (ret != 0) {
1006                 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
1007                       "delete list for second marshalling.\n"));
1008                 goto done;
1009         }
1010
1011         indata = ctdb_marshall_finish(recs->records);
1012
1013         for (i = 0; i < num_active_nodes; i++) {
1014                 struct ctdb_marshall_buffer *records;
1015                 struct ctdb_rec_data *rec;
1016                 int32_t res;
1017                 TDB_DATA outdata;
1018
1019                 ret = ctdb_control(ctdb, active_nodes[i], 0,
1020                                 CTDB_CONTROL_TRY_DELETE_RECORDS, 0,
1021                                 indata, recs, &outdata, &res,
1022                                 NULL, NULL);
1023                 if (ret != 0 || res != 0) {
1024                         DEBUG(DEBUG_ERR, ("Failed to delete records on "
1025                                           "node %u: ret[%d] res[%d]\n",
1026                                           active_nodes[i], ret, res));
1027                         goto done;
1028                 }
1029
1030                 /*
1031                  * outdata contains the list of records coming back
1032                  * from the node: These are the records that the
1033                  * remote node could not delete. We remove these from
1034                  * the list to delete locally.
1035                  */
1036                 records = (struct ctdb_marshall_buffer *)outdata.dptr;
1037                 rec = (struct ctdb_rec_data *)&records->data[0];
1038                 while (records->count-- > 1) {
1039                         TDB_DATA reckey, recdata;
1040                         struct ctdb_ltdb_header *rechdr;
1041                         struct delete_record_data *dd;
1042
1043                         reckey.dptr = &rec->data[0];
1044                         reckey.dsize = rec->keylen;
1045                         recdata.dptr = &rec->data[reckey.dsize];
1046                         recdata.dsize = rec->datalen;
1047
1048                         if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
1049                                 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
1050                                 goto done;
1051                         }
1052                         rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
1053                         recdata.dptr += sizeof(*rechdr);
1054                         recdata.dsize -= sizeof(*rechdr);
1055
1056                         dd = (struct delete_record_data *)trbt_lookup32(
1057                                         vdata->delete_list,
1058                                         ctdb_hash(&reckey));
1059                         if (dd != NULL) {
1060                                 /*
1061                                  * The other node could not delete the
1062                                  * record and it is the first node that
1063                                  * failed. So we should remove it from
1064                                  * the tree and update statistics.
1065                                  */
1066                                 talloc_free(dd);
1067                                 vdata->count.delete_list.remote_error++;
1068                                 vdata->count.delete_list.left--;
1069                         } else {
1070                                 DEBUG(DEBUG_ERR, (__location__ " Failed to "
1071                                       "find record with hash 0x%08x coming "
1072                                       "back from TRY_DELETE_RECORDS "
1073                                       "control in delete list.\n",
1074                                       ctdb_hash(&reckey)));
1075                                 vdata->count.delete_list.local_error++;
1076                                 vdata->count.delete_list.left--;
1077                         }
1078
1079                         rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
1080                 }
1081         }
1082
1083         if (vdata->count.delete_list.left == 0) {
1084                 goto success;
1085         }
1086
1087         /*
1088          * Step 3:
1089          * Delete the remaining records locally.
1090          *
1091          * These records have successfully been deleted on all
1092          * active remote nodes.
1093          */
1094
1095         ret = trbt_traversearray32(vdata->delete_list, 1,
1096                                    delete_record_traverse, vdata);
1097         if (ret != 0) {
1098                 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
1099                       "delete list for deletion.\n"));
1100         }
1101
1102 success:
1103
1104         if (vdata->count.delete_list.left != 0) {
1105                 DEBUG(DEBUG_ERR, (__location__ " Vaccum db[%s] error: "
1106                       "there are %u records left for deletion after "
1107                       "processing delete list\n",
1108                       ctdb_db->db_name,
1109                       (unsigned)vdata->count.delete_list.left));
1110         }
1111
1112         sum = vdata->count.delete_list.deleted
1113             + vdata->count.delete_list.skipped
1114             + vdata->count.delete_list.remote_error
1115             + vdata->count.delete_list.local_error
1116             + vdata->count.delete_list.left;
1117
1118         if (vdata->count.delete_list.total != sum) {
1119                 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in vacuum "
1120                       "delete list counts for db[%s]: total[%u] != sum[%u]\n",
1121                       ctdb_db->db_name,
1122                       (unsigned)vdata->count.delete_list.total,
1123                       (unsigned)sum));
1124         }
1125
1126         if (vdata->count.delete_list.total > 0) {
1127                 DEBUG(DEBUG_INFO,
1128                       (__location__
1129                        " vacuum delete list statistics: "
1130                        "db[%s] "
1131                        "total[%u] "
1132                        "del[%u] "
1133                        "skip[%u] "
1134                        "rem.err[%u] "
1135                        "loc.err[%u] "
1136                        "left[%u]\n",
1137                        ctdb_db->db_name,
1138                        (unsigned)vdata->count.delete_list.total,
1139                        (unsigned)vdata->count.delete_list.deleted,
1140                        (unsigned)vdata->count.delete_list.skipped,
1141                        (unsigned)vdata->count.delete_list.remote_error,
1142                        (unsigned)vdata->count.delete_list.local_error,
1143                        (unsigned)vdata->count.delete_list.left));
1144         }
1145
1146 done:
1147         talloc_free(tmp_ctx);
1148
1149         return;
1150 }
1151
1152 /**
1153  * initialize the vacuum_data
1154  */
1155 static struct vacuum_data *ctdb_vacuum_init_vacuum_data(
1156                                         struct ctdb_db_context *ctdb_db,
1157                                         TALLOC_CTX *mem_ctx)
1158 {
1159         int i;
1160         struct ctdb_context *ctdb = ctdb_db->ctdb;
1161         struct vacuum_data *vdata;
1162
1163         vdata = talloc_zero(mem_ctx, struct vacuum_data);
1164         if (vdata == NULL) {
1165                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1166                 return NULL;
1167         }
1168
1169         vdata->ctdb = ctdb_db->ctdb;
1170         vdata->ctdb_db = ctdb_db;
1171         vdata->delete_list = trbt_create(vdata, 0);
1172         if (vdata->delete_list == NULL) {
1173                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1174                 goto fail;
1175         }
1176
1177         vdata->start = timeval_current();
1178
1179         vdata->count.delete_queue.added_to_delete_list = 0;
1180         vdata->count.delete_queue.added_to_vacuum_fetch_list = 0;
1181         vdata->count.delete_queue.deleted = 0;
1182         vdata->count.delete_queue.skipped = 0;
1183         vdata->count.delete_queue.error = 0;
1184         vdata->count.delete_queue.total = 0;
1185         vdata->count.db_traverse.scheduled = 0;
1186         vdata->count.db_traverse.skipped = 0;
1187         vdata->count.db_traverse.error = 0;
1188         vdata->count.db_traverse.total = 0;
1189         vdata->count.delete_list.total = 0;
1190         vdata->count.delete_list.left = 0;
1191         vdata->count.delete_list.remote_error = 0;
1192         vdata->count.delete_list.local_error = 0;
1193         vdata->count.delete_list.skipped = 0;
1194         vdata->count.delete_list.deleted = 0;
1195
1196         /* the list needs to be of length num_nodes */
1197         vdata->vacuum_fetch_list = talloc_zero_array(vdata,
1198                                                 struct ctdb_marshall_buffer *,
1199                                                 ctdb->num_nodes);
1200         if (vdata->vacuum_fetch_list == NULL) {
1201                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1202                 goto fail;
1203         }
1204         for (i = 0; i < ctdb->num_nodes; i++) {
1205                 vdata->vacuum_fetch_list[i] = (struct ctdb_marshall_buffer *)
1206                         talloc_zero_size(vdata->vacuum_fetch_list,
1207                                          offsetof(struct ctdb_marshall_buffer, data));
1208                 if (vdata->vacuum_fetch_list[i] == NULL) {
1209                         DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1210                         talloc_free(vdata);
1211                         return NULL;
1212                 }
1213                 vdata->vacuum_fetch_list[i]->db_id = ctdb_db->db_id;
1214         }
1215
1216         return vdata;
1217
1218 fail:
1219         talloc_free(vdata);
1220         return NULL;
1221 }
1222
1223 /**
1224  * Vacuum a DB:
1225  *  - Always do the fast vacuuming run, which traverses
1226  *    the in-memory delete queue: these records have been
1227  *    scheduled for deletion.
1228  *  - Only if explicitly requested, the database is traversed
1229  *    in order to use the traditional heuristics on empty records
1230  *    to trigger deletion.
1231  *    This is done only every VacuumFastPathCount'th vacuuming run.
1232  *
1233  * The traverse runs fill two lists:
1234  *
1235  * - The delete_list:
1236  *   This is the list of empty records the current
1237  *   node is lmaster and dmaster for. These records are later
1238  *   deleted first on other nodes and then locally.
1239  *
1240  *   The fast vacuuming run has a short cut for those records
1241  *   that have never been migrated with data: these records
1242  *   are immediately deleted locally, since they have left
1243  *   no trace on other nodes.
1244  *
1245  * - The vacuum_fetch lists
1246  *   (one for each other lmaster node):
1247  *   The records in this list are sent for deletion to
1248  *   their lmaster in a bulk VACUUM_FETCH message.
1249  *
1250  *   The lmaster then migrates all these records to itelf
1251  *   so that they can be vacuumed there.
1252  *
1253  * This executes in the child context.
1254  */
1255 static int ctdb_vacuum_db(struct ctdb_db_context *ctdb_db,
1256                           bool full_vacuum_run)
1257 {
1258         struct ctdb_context *ctdb = ctdb_db->ctdb;
1259         int ret, pnn;
1260         struct vacuum_data *vdata;
1261         TALLOC_CTX *tmp_ctx;
1262
1263         DEBUG(DEBUG_INFO, (__location__ " Entering %s vacuum run for db "
1264                            "%s db_id[0x%08x]\n",
1265                            full_vacuum_run ? "full" : "fast",
1266                            ctdb_db->db_name, ctdb_db->db_id));
1267
1268         ret = ctdb_ctrl_getvnnmap(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE, ctdb, &ctdb->vnn_map);
1269         if (ret != 0) {
1270                 DEBUG(DEBUG_ERR, ("Unable to get vnnmap from local node\n"));
1271                 return ret;
1272         }
1273
1274         pnn = ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1275         if (pnn == -1) {
1276                 DEBUG(DEBUG_ERR, ("Unable to get pnn from local node\n"));
1277                 return -1;
1278         }
1279
1280         ctdb->pnn = pnn;
1281
1282         tmp_ctx = talloc_new(ctdb_db);
1283         if (tmp_ctx == NULL) {
1284                 DEBUG(DEBUG_ERR, ("Out of memory!\n"));
1285                 return -1;
1286         }
1287
1288         vdata = ctdb_vacuum_init_vacuum_data(ctdb_db, tmp_ctx);
1289         if (vdata == NULL) {
1290                 talloc_free(tmp_ctx);
1291                 return -1;
1292         }
1293
1294         if (full_vacuum_run) {
1295                 ctdb_vacuum_traverse_db(ctdb_db, vdata);
1296         }
1297
1298         ctdb_process_delete_queue(ctdb_db, vdata);
1299
1300         ctdb_process_vacuum_fetch_lists(ctdb_db, vdata);
1301
1302         ctdb_process_delete_list(ctdb_db, vdata);
1303
1304         talloc_free(tmp_ctx);
1305
1306         /* this ensures we run our event queue */
1307         ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1308
1309         return 0;
1310 }
1311
1312 /*
1313  * repack and vaccum a db
1314  * called from the child context
1315  */
1316 static int ctdb_vacuum_and_repack_db(struct ctdb_db_context *ctdb_db,
1317                                      bool full_vacuum_run)
1318 {
1319         uint32_t repack_limit = ctdb_db->ctdb->tunable.repack_limit;
1320         const char *name = ctdb_db->db_name;
1321         int freelist_size = 0;
1322         int ret;
1323
1324         if (ctdb_vacuum_db(ctdb_db, full_vacuum_run) != 0) {
1325                 DEBUG(DEBUG_ERR,(__location__ " Failed to vacuum '%s'\n", name));
1326         }
1327
1328         freelist_size = tdb_freelist_size(ctdb_db->ltdb->tdb);
1329         if (freelist_size == -1) {
1330                 DEBUG(DEBUG_ERR,(__location__ " Failed to get freelist size for '%s'\n", name));
1331                 return -1;
1332         }
1333
1334         /*
1335          * decide if a repack is necessary
1336          */
1337         if ((repack_limit == 0 || (uint32_t)freelist_size < repack_limit))
1338         {
1339                 return 0;
1340         }
1341
1342         DEBUG(DEBUG_INFO, ("Repacking %s with %u freelist entries\n",
1343                            name, freelist_size));
1344
1345         ret = tdb_repack(ctdb_db->ltdb->tdb);
1346         if (ret != 0) {
1347                 DEBUG(DEBUG_ERR,(__location__ " Failed to repack '%s'\n", name));
1348                 return -1;
1349         }
1350
1351         return 0;
1352 }
1353
1354 static uint32_t get_vacuum_interval(struct ctdb_db_context *ctdb_db)
1355 {
1356         uint32_t interval = ctdb_db->ctdb->tunable.vacuum_interval;
1357
1358         return interval;
1359 }
1360
1361 static int vacuum_child_destructor(struct ctdb_vacuum_child_context *child_ctx)
1362 {
1363         double l = timeval_elapsed(&child_ctx->start_time);
1364         struct ctdb_db_context *ctdb_db = child_ctx->vacuum_handle->ctdb_db;
1365         struct ctdb_context *ctdb = ctdb_db->ctdb;
1366
1367         CTDB_UPDATE_DB_LATENCY(ctdb_db, "vacuum", vacuum.latency, l);
1368         DEBUG(DEBUG_INFO,("Vacuuming took %.3f seconds for database %s\n", l, ctdb_db->db_name));
1369
1370         if (child_ctx->child_pid != -1) {
1371                 ctdb_kill(ctdb, child_ctx->child_pid, SIGKILL);
1372         } else {
1373                 /* Bump the number of successful fast-path runs. */
1374                 child_ctx->vacuum_handle->fast_path_count++;
1375         }
1376
1377         DLIST_REMOVE(ctdb->vacuumers, child_ctx);
1378
1379         event_add_timed(ctdb->ev, child_ctx->vacuum_handle,
1380                         timeval_current_ofs(get_vacuum_interval(ctdb_db), 0), 
1381                         ctdb_vacuum_event, child_ctx->vacuum_handle);
1382
1383         return 0;
1384 }
1385
1386 /*
1387  * this event is generated when a vacuum child process times out
1388  */
1389 static void vacuum_child_timeout(struct event_context *ev, struct timed_event *te,
1390                                          struct timeval t, void *private_data)
1391 {
1392         struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1393
1394         DEBUG(DEBUG_ERR,("Vacuuming child process timed out for db %s\n", child_ctx->vacuum_handle->ctdb_db->db_name));
1395
1396         child_ctx->status = VACUUM_TIMEOUT;
1397
1398         talloc_free(child_ctx);
1399 }
1400
1401
1402 /*
1403  * this event is generated when a vacuum child process has completed
1404  */
1405 static void vacuum_child_handler(struct event_context *ev, struct fd_event *fde,
1406                              uint16_t flags, void *private_data)
1407 {
1408         struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1409         char c = 0;
1410         int ret;
1411
1412         DEBUG(DEBUG_INFO,("Vacuuming child process %d finished for db %s\n", child_ctx->child_pid, child_ctx->vacuum_handle->ctdb_db->db_name));
1413         child_ctx->child_pid = -1;
1414
1415         ret = sys_read(child_ctx->fd[0], &c, 1);
1416         if (ret != 1 || c != 0) {
1417                 child_ctx->status = VACUUM_ERROR;
1418                 DEBUG(DEBUG_ERR, ("A vacuum child process failed with an error for database %s. ret=%d c=%d\n", child_ctx->vacuum_handle->ctdb_db->db_name, ret, c));
1419         } else {
1420                 child_ctx->status = VACUUM_OK;
1421         }
1422
1423         talloc_free(child_ctx);
1424 }
1425
1426 /*
1427  * this event is called every time we need to start a new vacuum process
1428  */
1429 static void
1430 ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
1431                                struct timeval t, void *private_data)
1432 {
1433         struct ctdb_vacuum_handle *vacuum_handle = talloc_get_type(private_data, struct ctdb_vacuum_handle);
1434         struct ctdb_db_context *ctdb_db = vacuum_handle->ctdb_db;
1435         struct ctdb_context *ctdb = ctdb_db->ctdb;
1436         struct ctdb_vacuum_child_context *child_ctx;
1437         struct tevent_fd *fde;
1438         int ret;
1439
1440         /* we dont vacuum if we are in recovery mode, or db frozen */
1441         if (ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ||
1442             ctdb->freeze_mode[ctdb_db->priority] != CTDB_FREEZE_NONE) {
1443                 DEBUG(DEBUG_INFO, ("Not vacuuming %s (%s)\n", ctdb_db->db_name,
1444                                    ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ? "in recovery"
1445                                    : ctdb->freeze_mode[ctdb_db->priority] == CTDB_FREEZE_PENDING
1446                                    ? "freeze pending"
1447                                    : "frozen"));
1448                 event_add_timed(ctdb->ev, vacuum_handle,
1449                         timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1450                         ctdb_vacuum_event, vacuum_handle);
1451                 return;
1452         }
1453
1454         /* Do not allow multiple vacuuming child processes to be active at the
1455          * same time.  If there is vacuuming child process active, delay
1456          * new vacuuming event to stagger vacuuming events.
1457          */
1458         if (ctdb->vacuumers != NULL) {
1459                 event_add_timed(ctdb->ev, vacuum_handle,
1460                                 timeval_current_ofs(0, 500*1000),
1461                                 ctdb_vacuum_event, vacuum_handle);
1462                 return;
1463         }
1464
1465         child_ctx = talloc(vacuum_handle, struct ctdb_vacuum_child_context);
1466         if (child_ctx == NULL) {
1467                 DEBUG(DEBUG_CRIT, (__location__ " Failed to allocate child context for vacuuming of %s\n", ctdb_db->db_name));
1468                 ctdb_fatal(ctdb, "Out of memory when crating vacuum child context. Shutting down\n");
1469         }
1470
1471
1472         ret = pipe(child_ctx->fd);
1473         if (ret != 0) {
1474                 talloc_free(child_ctx);
1475                 DEBUG(DEBUG_ERR, ("Failed to create pipe for vacuum child process.\n"));
1476                 event_add_timed(ctdb->ev, vacuum_handle,
1477                         timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1478                         ctdb_vacuum_event, vacuum_handle);
1479                 return;
1480         }
1481
1482         if (vacuum_handle->fast_path_count > ctdb->tunable.vacuum_fast_path_count) {
1483                 vacuum_handle->fast_path_count = 0;
1484         }
1485
1486         child_ctx->child_pid = ctdb_fork(ctdb);
1487         if (child_ctx->child_pid == (pid_t)-1) {
1488                 close(child_ctx->fd[0]);
1489                 close(child_ctx->fd[1]);
1490                 talloc_free(child_ctx);
1491                 DEBUG(DEBUG_ERR, ("Failed to fork vacuum child process.\n"));
1492                 event_add_timed(ctdb->ev, vacuum_handle,
1493                         timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1494                         ctdb_vacuum_event, vacuum_handle);
1495                 return;
1496         }
1497
1498
1499         if (child_ctx->child_pid == 0) {
1500                 char cc = 0;
1501                 bool full_vacuum_run = false;
1502                 close(child_ctx->fd[0]);
1503
1504                 DEBUG(DEBUG_INFO,("Vacuuming child process %d for db %s started\n", getpid(), ctdb_db->db_name));
1505                 ctdb_set_process_name("ctdb_vacuum");
1506                 if (switch_from_server_to_client(ctdb, "vacuum-%s", ctdb_db->db_name) != 0) {
1507                         DEBUG(DEBUG_CRIT, (__location__ "ERROR: failed to switch vacuum daemon into client mode. Shutting down.\n"));
1508                         _exit(1);
1509                 }
1510
1511                 if ((ctdb->tunable.vacuum_fast_path_count > 0) &&
1512                     (vacuum_handle->fast_path_count == 0))
1513                 {
1514                         full_vacuum_run = true;
1515                 }
1516                 cc = ctdb_vacuum_and_repack_db(ctdb_db, full_vacuum_run);
1517
1518                 sys_write(child_ctx->fd[1], &cc, 1);
1519                 _exit(0);
1520         }
1521
1522         set_close_on_exec(child_ctx->fd[0]);
1523         close(child_ctx->fd[1]);
1524
1525         child_ctx->status = VACUUM_RUNNING;
1526         child_ctx->start_time = timeval_current();
1527
1528         DLIST_ADD(ctdb->vacuumers, child_ctx);
1529         talloc_set_destructor(child_ctx, vacuum_child_destructor);
1530
1531         /*
1532          * Clear the fastpath vacuuming list in the parent.
1533          */
1534         talloc_free(ctdb_db->delete_queue);
1535         ctdb_db->delete_queue = trbt_create(ctdb_db, 0);
1536         if (ctdb_db->delete_queue == NULL) {
1537                 /* fatal here? ... */
1538                 ctdb_fatal(ctdb, "Out of memory when re-creating vacuum tree "
1539                                  "in parent context. Shutting down\n");
1540         }
1541
1542         event_add_timed(ctdb->ev, child_ctx,
1543                 timeval_current_ofs(ctdb->tunable.vacuum_max_run_time, 0),
1544                 vacuum_child_timeout, child_ctx);
1545
1546         DEBUG(DEBUG_DEBUG, (__location__ " Created PIPE FD:%d to child vacuum process\n", child_ctx->fd[0]));
1547
1548         fde = event_add_fd(ctdb->ev, child_ctx, child_ctx->fd[0],
1549                            EVENT_FD_READ, vacuum_child_handler, child_ctx);
1550         tevent_fd_set_auto_close(fde);
1551
1552         vacuum_handle->child_ctx = child_ctx;
1553         child_ctx->vacuum_handle = vacuum_handle;
1554 }
1555
1556 void ctdb_stop_vacuuming(struct ctdb_context *ctdb)
1557 {
1558         /* Simply free them all. */
1559         while (ctdb->vacuumers) {
1560                 DEBUG(DEBUG_INFO, ("Aborting vacuuming for %s (%i)\n",
1561                            ctdb->vacuumers->vacuum_handle->ctdb_db->db_name,
1562                            (int)ctdb->vacuumers->child_pid));
1563                 /* vacuum_child_destructor kills it, removes from list */
1564                 talloc_free(ctdb->vacuumers);
1565         }
1566 }
1567
1568 /* this function initializes the vacuuming context for a database
1569  * starts the vacuuming events
1570  */
1571 int ctdb_vacuum_init(struct ctdb_db_context *ctdb_db)
1572 {
1573         if (ctdb_db->persistent != 0) {
1574                 DEBUG(DEBUG_ERR,("Vacuuming is disabled for persistent database %s\n", ctdb_db->db_name));
1575                 return 0;
1576         }
1577
1578         ctdb_db->vacuum_handle = talloc(ctdb_db, struct ctdb_vacuum_handle);
1579         CTDB_NO_MEMORY(ctdb_db->ctdb, ctdb_db->vacuum_handle);
1580
1581         ctdb_db->vacuum_handle->ctdb_db         = ctdb_db;
1582         ctdb_db->vacuum_handle->fast_path_count = 0;
1583
1584         event_add_timed(ctdb_db->ctdb->ev, ctdb_db->vacuum_handle, 
1585                         timeval_current_ofs(get_vacuum_interval(ctdb_db), 0), 
1586                         ctdb_vacuum_event, ctdb_db->vacuum_handle);
1587
1588         return 0;
1589 }
1590
1591 static void remove_record_from_delete_queue(struct ctdb_db_context *ctdb_db,
1592                                             const struct ctdb_ltdb_header *hdr,
1593                                             const TDB_DATA key)
1594 {
1595         struct delete_record_data *kd;
1596         uint32_t hash;
1597
1598         hash = (uint32_t)ctdb_hash(&key);
1599
1600         DEBUG(DEBUG_DEBUG, (__location__
1601                             " remove_record_from_delete_queue: "
1602                             "db[%s] "
1603                             "db_id[0x%08x] "
1604                             "key_hash[0x%08x] "
1605                             "lmaster[%u] "
1606                             "migrated_with_data[%s]\n",
1607                              ctdb_db->db_name, ctdb_db->db_id,
1608                              hash,
1609                              ctdb_lmaster(ctdb_db->ctdb, &key),
1610                              hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1611
1612         kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1613         if (kd == NULL) {
1614                 DEBUG(DEBUG_DEBUG, (__location__
1615                                     " remove_record_from_delete_queue: "
1616                                     "record not in queue (hash[0x%08x])\n.",
1617                                     hash));
1618                 return;
1619         }
1620
1621         if ((kd->key.dsize != key.dsize) ||
1622             (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1623         {
1624                 DEBUG(DEBUG_DEBUG, (__location__
1625                                     " remove_record_from_delete_queue: "
1626                                     "hash collision for key with hash[0x%08x] "
1627                                     "in db[%s] - skipping\n",
1628                                     hash, ctdb_db->db_name));
1629                 return;
1630         }
1631
1632         DEBUG(DEBUG_DEBUG, (__location__
1633                             " remove_record_from_delete_queue: "
1634                             "removing key with hash[0x%08x]\n",
1635                              hash));
1636
1637         talloc_free(kd);
1638
1639         return;
1640 }
1641
1642 /**
1643  * Insert a record into the ctdb_db context's delete queue,
1644  * handling hash collisions.
1645  */
1646 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
1647                                            const struct ctdb_ltdb_header *hdr,
1648                                            TDB_DATA key)
1649 {
1650         struct delete_record_data *kd;
1651         uint32_t hash;
1652         int ret;
1653
1654         hash = (uint32_t)ctdb_hash(&key);
1655
1656         DEBUG(DEBUG_INFO, (__location__ " schedule for deletion: db[%s] "
1657                            "db_id[0x%08x] "
1658                            "key_hash[0x%08x] "
1659                            "lmaster[%u] "
1660                            "migrated_with_data[%s]\n",
1661                             ctdb_db->db_name, ctdb_db->db_id,
1662                             hash,
1663                             ctdb_lmaster(ctdb_db->ctdb, &key),
1664                             hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1665
1666         kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1667         if (kd != NULL) {
1668                 if ((kd->key.dsize != key.dsize) ||
1669                     (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1670                 {
1671                         DEBUG(DEBUG_INFO,
1672                               (__location__ " schedule for deletion: "
1673                                "hash collision for key hash [0x%08x]. "
1674                                "Skipping the record.\n", hash));
1675                         return 0;
1676                 } else {
1677                         DEBUG(DEBUG_DEBUG,
1678                               (__location__ " schedule for deletion: "
1679                                "updating entry for key with hash [0x%08x].\n",
1680                                hash));
1681                 }
1682         }
1683
1684         ret = insert_delete_record_data_into_tree(ctdb_db->ctdb, ctdb_db,
1685                                                   ctdb_db->delete_queue,
1686                                                   hdr, key);
1687         if (ret != 0) {
1688                 DEBUG(DEBUG_INFO,
1689                       (__location__ " schedule for deletion: error "
1690                        "inserting key with hash [0x%08x] into delete queue\n",
1691                        hash));
1692                 return -1;
1693         }
1694
1695         return 0;
1696 }
1697
1698 /**
1699  * Schedule a record for deletetion.
1700  * Called from the parent context.
1701  */
1702 int32_t ctdb_control_schedule_for_deletion(struct ctdb_context *ctdb,
1703                                            TDB_DATA indata)
1704 {
1705         struct ctdb_control_schedule_for_deletion *dd;
1706         struct ctdb_db_context *ctdb_db;
1707         int ret;
1708         TDB_DATA key;
1709
1710         dd = (struct ctdb_control_schedule_for_deletion *)indata.dptr;
1711
1712         ctdb_db = find_ctdb_db(ctdb, dd->db_id);
1713         if (ctdb_db == NULL) {
1714                 DEBUG(DEBUG_ERR, (__location__ " Unknown db id 0x%08x\n",
1715                                   dd->db_id));
1716                 return -1;
1717         }
1718
1719         key.dsize = dd->keylen;
1720         key.dptr = dd->key;
1721
1722         ret = insert_record_into_delete_queue(ctdb_db, &dd->hdr, key);
1723
1724         return ret;
1725 }
1726
1727 int32_t ctdb_local_schedule_for_deletion(struct ctdb_db_context *ctdb_db,
1728                                          const struct ctdb_ltdb_header *hdr,
1729                                          TDB_DATA key)
1730 {
1731         int ret;
1732         struct ctdb_control_schedule_for_deletion *dd;
1733         TDB_DATA indata;
1734         int32_t status;
1735
1736         if (ctdb_db->ctdb->ctdbd_pid == getpid()) {
1737                 /* main daemon - directly queue */
1738                 ret = insert_record_into_delete_queue(ctdb_db, hdr, key);
1739
1740                 return ret;
1741         }
1742
1743         /* if we dont have a connection to the daemon we can not send
1744            a control. For example sometimes from update_record control child
1745            process.
1746         */
1747         if (!ctdb_db->ctdb->can_send_controls) {
1748                 return -1;
1749         }
1750
1751
1752         /* child process: send the main daemon a control */
1753         indata.dsize = offsetof(struct ctdb_control_schedule_for_deletion, key) + key.dsize;
1754         indata.dptr = talloc_zero_array(ctdb_db, uint8_t, indata.dsize);
1755         if (indata.dptr == NULL) {
1756                 DEBUG(DEBUG_ERR, (__location__ " out of memory\n"));
1757                 return -1;
1758         }
1759         dd = (struct ctdb_control_schedule_for_deletion *)(void *)indata.dptr;
1760         dd->db_id = ctdb_db->db_id;
1761         dd->hdr = *hdr;
1762         dd->keylen = key.dsize;
1763         memcpy(dd->key, key.dptr, key.dsize);
1764
1765         ret = ctdb_control(ctdb_db->ctdb,
1766                            CTDB_CURRENT_NODE,
1767                            ctdb_db->db_id,
1768                            CTDB_CONTROL_SCHEDULE_FOR_DELETION,
1769                            CTDB_CTRL_FLAG_NOREPLY, /* flags */
1770                            indata,
1771                            NULL, /* mem_ctx */
1772                            NULL, /* outdata */
1773                            &status,
1774                            NULL, /* timeout : NULL == wait forever */
1775                            NULL); /* error message */
1776
1777         talloc_free(indata.dptr);
1778
1779         if (ret != 0 || status != 0) {
1780                 DEBUG(DEBUG_ERR, (__location__ " Error sending "
1781                                   "SCHEDULE_FOR_DELETION "
1782                                   "control.\n"));
1783                 if (status != 0) {
1784                         ret = -1;
1785                 }
1786         }
1787
1788         return ret;
1789 }
1790
1791 void ctdb_local_remove_from_delete_queue(struct ctdb_db_context *ctdb_db,
1792                                          const struct ctdb_ltdb_header *hdr,
1793                                          const TDB_DATA key)
1794 {
1795         if (ctdb_db->ctdb->ctdbd_pid != getpid()) {
1796                 /*
1797                  * Only remove the record from the delete queue if called
1798                  * in the main daemon.
1799                  */
1800                 return;
1801         }
1802
1803         remove_record_from_delete_queue(ctdb_db, hdr, key);
1804
1805         return;
1806 }