sched/doc: Update documentation for base_slice_ns and CONFIG_HZ relation
[sfrench/cifs-2.6.git] / fs / bcachefs / btree_io.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_sort.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "debug.h"
15 #include "error.h"
16 #include "extents.h"
17 #include "io_write.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
20 #include "recovery.h"
21 #include "super-io.h"
22 #include "trace.h"
23
24 #include <linux/sched/mm.h>
25
26 void bch2_btree_node_io_unlock(struct btree *b)
27 {
28         EBUG_ON(!btree_node_write_in_flight(b));
29
30         clear_btree_node_write_in_flight_inner(b);
31         clear_btree_node_write_in_flight(b);
32         wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
33 }
34
35 void bch2_btree_node_io_lock(struct btree *b)
36 {
37         bch2_assert_btree_nodes_not_locked();
38
39         wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
40                             TASK_UNINTERRUPTIBLE);
41 }
42
43 void __bch2_btree_node_wait_on_read(struct btree *b)
44 {
45         wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
46                        TASK_UNINTERRUPTIBLE);
47 }
48
49 void __bch2_btree_node_wait_on_write(struct btree *b)
50 {
51         wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
52                        TASK_UNINTERRUPTIBLE);
53 }
54
55 void bch2_btree_node_wait_on_read(struct btree *b)
56 {
57         bch2_assert_btree_nodes_not_locked();
58
59         wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
60                        TASK_UNINTERRUPTIBLE);
61 }
62
63 void bch2_btree_node_wait_on_write(struct btree *b)
64 {
65         bch2_assert_btree_nodes_not_locked();
66
67         wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
68                        TASK_UNINTERRUPTIBLE);
69 }
70
71 static void verify_no_dups(struct btree *b,
72                            struct bkey_packed *start,
73                            struct bkey_packed *end)
74 {
75 #ifdef CONFIG_BCACHEFS_DEBUG
76         struct bkey_packed *k, *p;
77
78         if (start == end)
79                 return;
80
81         for (p = start, k = bkey_p_next(start);
82              k != end;
83              p = k, k = bkey_p_next(k)) {
84                 struct bkey l = bkey_unpack_key(b, p);
85                 struct bkey r = bkey_unpack_key(b, k);
86
87                 BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
88         }
89 #endif
90 }
91
92 static void set_needs_whiteout(struct bset *i, int v)
93 {
94         struct bkey_packed *k;
95
96         for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
97                 k->needs_whiteout = v;
98 }
99
100 static void btree_bounce_free(struct bch_fs *c, size_t size,
101                               bool used_mempool, void *p)
102 {
103         if (used_mempool)
104                 mempool_free(p, &c->btree_bounce_pool);
105         else
106                 kvfree(p);
107 }
108
109 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
110                                 bool *used_mempool)
111 {
112         unsigned flags = memalloc_nofs_save();
113         void *p;
114
115         BUG_ON(size > c->opts.btree_node_size);
116
117         *used_mempool = false;
118         p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
119         if (!p) {
120                 *used_mempool = true;
121                 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
122         }
123         memalloc_nofs_restore(flags);
124         return p;
125 }
126
127 static void sort_bkey_ptrs(const struct btree *bt,
128                            struct bkey_packed **ptrs, unsigned nr)
129 {
130         unsigned n = nr, a = nr / 2, b, c, d;
131
132         if (!a)
133                 return;
134
135         /* Heap sort: see lib/sort.c: */
136         while (1) {
137                 if (a)
138                         a--;
139                 else if (--n)
140                         swap(ptrs[0], ptrs[n]);
141                 else
142                         break;
143
144                 for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
145                         b = bch2_bkey_cmp_packed(bt,
146                                             ptrs[c],
147                                             ptrs[d]) >= 0 ? c : d;
148                 if (d == n)
149                         b = c;
150
151                 while (b != a &&
152                        bch2_bkey_cmp_packed(bt,
153                                        ptrs[a],
154                                        ptrs[b]) >= 0)
155                         b = (b - 1) / 2;
156                 c = b;
157                 while (b != a) {
158                         b = (b - 1) / 2;
159                         swap(ptrs[b], ptrs[c]);
160                 }
161         }
162 }
163
164 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
165 {
166         struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
167         bool used_mempool = false;
168         size_t bytes = b->whiteout_u64s * sizeof(u64);
169
170         if (!b->whiteout_u64s)
171                 return;
172
173         new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
174
175         ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
176
177         for (k = unwritten_whiteouts_start(b);
178              k != unwritten_whiteouts_end(b);
179              k = bkey_p_next(k))
180                 *--ptrs = k;
181
182         sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
183
184         k = new_whiteouts;
185
186         while (ptrs != ptrs_end) {
187                 bkey_p_copy(k, *ptrs);
188                 k = bkey_p_next(k);
189                 ptrs++;
190         }
191
192         verify_no_dups(b, new_whiteouts,
193                        (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
194
195         memcpy_u64s(unwritten_whiteouts_start(b),
196                     new_whiteouts, b->whiteout_u64s);
197
198         btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
199 }
200
201 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
202                                 bool compacting, enum compact_mode mode)
203 {
204         if (!bset_dead_u64s(b, t))
205                 return false;
206
207         switch (mode) {
208         case COMPACT_LAZY:
209                 return should_compact_bset_lazy(b, t) ||
210                         (compacting && !bset_written(b, bset(b, t)));
211         case COMPACT_ALL:
212                 return true;
213         default:
214                 BUG();
215         }
216 }
217
218 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
219 {
220         struct bset_tree *t;
221         bool ret = false;
222
223         for_each_bset(b, t) {
224                 struct bset *i = bset(b, t);
225                 struct bkey_packed *k, *n, *out, *start, *end;
226                 struct btree_node_entry *src = NULL, *dst = NULL;
227
228                 if (t != b->set && !bset_written(b, i)) {
229                         src = container_of(i, struct btree_node_entry, keys);
230                         dst = max(write_block(b),
231                                   (void *) btree_bkey_last(b, t - 1));
232                 }
233
234                 if (src != dst)
235                         ret = true;
236
237                 if (!should_compact_bset(b, t, ret, mode)) {
238                         if (src != dst) {
239                                 memmove(dst, src, sizeof(*src) +
240                                         le16_to_cpu(src->keys.u64s) *
241                                         sizeof(u64));
242                                 i = &dst->keys;
243                                 set_btree_bset(b, t, i);
244                         }
245                         continue;
246                 }
247
248                 start   = btree_bkey_first(b, t);
249                 end     = btree_bkey_last(b, t);
250
251                 if (src != dst) {
252                         memmove(dst, src, sizeof(*src));
253                         i = &dst->keys;
254                         set_btree_bset(b, t, i);
255                 }
256
257                 out = i->start;
258
259                 for (k = start; k != end; k = n) {
260                         n = bkey_p_next(k);
261
262                         if (!bkey_deleted(k)) {
263                                 bkey_p_copy(out, k);
264                                 out = bkey_p_next(out);
265                         } else {
266                                 BUG_ON(k->needs_whiteout);
267                         }
268                 }
269
270                 i->u64s = cpu_to_le16((u64 *) out - i->_data);
271                 set_btree_bset_end(b, t);
272                 bch2_bset_set_no_aux_tree(b, t);
273                 ret = true;
274         }
275
276         bch2_verify_btree_nr_keys(b);
277
278         bch2_btree_build_aux_trees(b);
279
280         return ret;
281 }
282
283 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
284                             enum compact_mode mode)
285 {
286         return bch2_drop_whiteouts(b, mode);
287 }
288
289 static void btree_node_sort(struct bch_fs *c, struct btree *b,
290                             unsigned start_idx,
291                             unsigned end_idx,
292                             bool filter_whiteouts)
293 {
294         struct btree_node *out;
295         struct sort_iter_stack sort_iter;
296         struct bset_tree *t;
297         struct bset *start_bset = bset(b, &b->set[start_idx]);
298         bool used_mempool = false;
299         u64 start_time, seq = 0;
300         unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
301         bool sorting_entire_node = start_idx == 0 &&
302                 end_idx == b->nsets;
303
304         sort_iter_stack_init(&sort_iter, b);
305
306         for (t = b->set + start_idx;
307              t < b->set + end_idx;
308              t++) {
309                 u64s += le16_to_cpu(bset(b, t)->u64s);
310                 sort_iter_add(&sort_iter.iter,
311                               btree_bkey_first(b, t),
312                               btree_bkey_last(b, t));
313         }
314
315         bytes = sorting_entire_node
316                 ? btree_buf_bytes(b)
317                 : __vstruct_bytes(struct btree_node, u64s);
318
319         out = btree_bounce_alloc(c, bytes, &used_mempool);
320
321         start_time = local_clock();
322
323         u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter, filter_whiteouts);
324
325         out->keys.u64s = cpu_to_le16(u64s);
326
327         BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
328
329         if (sorting_entire_node)
330                 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
331                                        start_time);
332
333         /* Make sure we preserve bset journal_seq: */
334         for (t = b->set + start_idx; t < b->set + end_idx; t++)
335                 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
336         start_bset->journal_seq = cpu_to_le64(seq);
337
338         if (sorting_entire_node) {
339                 u64s = le16_to_cpu(out->keys.u64s);
340
341                 BUG_ON(bytes != btree_buf_bytes(b));
342
343                 /*
344                  * Our temporary buffer is the same size as the btree node's
345                  * buffer, we can just swap buffers instead of doing a big
346                  * memcpy()
347                  */
348                 *out = *b->data;
349                 out->keys.u64s = cpu_to_le16(u64s);
350                 swap(out, b->data);
351                 set_btree_bset(b, b->set, &b->data->keys);
352         } else {
353                 start_bset->u64s = out->keys.u64s;
354                 memcpy_u64s(start_bset->start,
355                             out->keys.start,
356                             le16_to_cpu(out->keys.u64s));
357         }
358
359         for (i = start_idx + 1; i < end_idx; i++)
360                 b->nr.bset_u64s[start_idx] +=
361                         b->nr.bset_u64s[i];
362
363         b->nsets -= shift;
364
365         for (i = start_idx + 1; i < b->nsets; i++) {
366                 b->nr.bset_u64s[i]      = b->nr.bset_u64s[i + shift];
367                 b->set[i]               = b->set[i + shift];
368         }
369
370         for (i = b->nsets; i < MAX_BSETS; i++)
371                 b->nr.bset_u64s[i] = 0;
372
373         set_btree_bset_end(b, &b->set[start_idx]);
374         bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
375
376         btree_bounce_free(c, bytes, used_mempool, out);
377
378         bch2_verify_btree_nr_keys(b);
379 }
380
381 void bch2_btree_sort_into(struct bch_fs *c,
382                          struct btree *dst,
383                          struct btree *src)
384 {
385         struct btree_nr_keys nr;
386         struct btree_node_iter src_iter;
387         u64 start_time = local_clock();
388
389         BUG_ON(dst->nsets != 1);
390
391         bch2_bset_set_no_aux_tree(dst, dst->set);
392
393         bch2_btree_node_iter_init_from_start(&src_iter, src);
394
395         nr = bch2_sort_repack(btree_bset_first(dst),
396                         src, &src_iter,
397                         &dst->format,
398                         true);
399
400         bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
401                                start_time);
402
403         set_btree_bset_end(dst, dst->set);
404
405         dst->nr.live_u64s       += nr.live_u64s;
406         dst->nr.bset_u64s[0]    += nr.bset_u64s[0];
407         dst->nr.packed_keys     += nr.packed_keys;
408         dst->nr.unpacked_keys   += nr.unpacked_keys;
409
410         bch2_verify_btree_nr_keys(dst);
411 }
412
413 /*
414  * We're about to add another bset to the btree node, so if there's currently
415  * too many bsets - sort some of them together:
416  */
417 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
418 {
419         unsigned unwritten_idx;
420         bool ret = false;
421
422         for (unwritten_idx = 0;
423              unwritten_idx < b->nsets;
424              unwritten_idx++)
425                 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
426                         break;
427
428         if (b->nsets - unwritten_idx > 1) {
429                 btree_node_sort(c, b, unwritten_idx,
430                                 b->nsets, false);
431                 ret = true;
432         }
433
434         if (unwritten_idx > 1) {
435                 btree_node_sort(c, b, 0, unwritten_idx, false);
436                 ret = true;
437         }
438
439         return ret;
440 }
441
442 void bch2_btree_build_aux_trees(struct btree *b)
443 {
444         struct bset_tree *t;
445
446         for_each_bset(b, t)
447                 bch2_bset_build_aux_tree(b, t,
448                                 !bset_written(b, bset(b, t)) &&
449                                 t == bset_tree_last(b));
450 }
451
452 /*
453  * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
454  *
455  * The first bset is going to be of similar order to the size of the node, the
456  * last bset is bounded by btree_write_set_buffer(), which is set to keep the
457  * memmove on insert from being too expensive: the middle bset should, ideally,
458  * be the geometric mean of the first and the last.
459  *
460  * Returns true if the middle bset is greater than that geometric mean:
461  */
462 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
463 {
464         unsigned mid_u64s_bits =
465                 (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
466
467         return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
468 }
469
470 /*
471  * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
472  * inserted into
473  *
474  * Safe to call if there already is an unwritten bset - will only add a new bset
475  * if @b doesn't already have one.
476  *
477  * Returns true if we sorted (i.e. invalidated iterators
478  */
479 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
480 {
481         struct bch_fs *c = trans->c;
482         struct btree_node_entry *bne;
483         bool reinit_iter = false;
484
485         EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
486         BUG_ON(bset_written(b, bset(b, &b->set[1])));
487         BUG_ON(btree_node_just_written(b));
488
489         if (b->nsets == MAX_BSETS &&
490             !btree_node_write_in_flight(b) &&
491             should_compact_all(c, b)) {
492                 bch2_btree_node_write(c, b, SIX_LOCK_write,
493                                       BTREE_WRITE_init_next_bset);
494                 reinit_iter = true;
495         }
496
497         if (b->nsets == MAX_BSETS &&
498             btree_node_compact(c, b))
499                 reinit_iter = true;
500
501         BUG_ON(b->nsets >= MAX_BSETS);
502
503         bne = want_new_bset(c, b);
504         if (bne)
505                 bch2_bset_init_next(b, bne);
506
507         bch2_btree_build_aux_trees(b);
508
509         if (reinit_iter)
510                 bch2_trans_node_reinit_iter(trans, b);
511 }
512
513 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
514                           struct bch_dev *ca,
515                           struct btree *b, struct bset *i,
516                           unsigned offset, int write)
517 {
518         prt_printf(out, bch2_log_msg(c, "%s"),
519                    write == READ
520                    ? "error validating btree node "
521                    : "corrupt btree node before write ");
522         if (ca)
523                 prt_printf(out, "on %s ", ca->name);
524         prt_printf(out, "at btree ");
525         bch2_btree_pos_to_text(out, c, b);
526
527         prt_printf(out, "\n  node offset %u/%u",
528                    b->written, btree_ptr_sectors_written(&b->key));
529         if (i)
530                 prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
531         prt_str(out, ": ");
532 }
533
534 __printf(9, 10)
535 static int __btree_err(int ret,
536                        struct bch_fs *c,
537                        struct bch_dev *ca,
538                        struct btree *b,
539                        struct bset *i,
540                        int write,
541                        bool have_retry,
542                        enum bch_sb_error_id err_type,
543                        const char *fmt, ...)
544 {
545         struct printbuf out = PRINTBUF;
546         va_list args;
547
548         btree_err_msg(&out, c, ca, b, i, b->written, write);
549
550         va_start(args, fmt);
551         prt_vprintf(&out, fmt, args);
552         va_end(args);
553
554         if (write == WRITE) {
555                 bch2_print_string_as_lines(KERN_ERR, out.buf);
556                 ret = c->opts.errors == BCH_ON_ERROR_continue
557                         ? 0
558                         : -BCH_ERR_fsck_errors_not_fixed;
559                 goto out;
560         }
561
562         if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
563                 ret = -BCH_ERR_btree_node_read_err_fixable;
564         if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
565                 ret = -BCH_ERR_btree_node_read_err_bad_node;
566
567         if (ret != -BCH_ERR_btree_node_read_err_fixable)
568                 bch2_sb_error_count(c, err_type);
569
570         switch (ret) {
571         case -BCH_ERR_btree_node_read_err_fixable:
572                 ret = bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf);
573                 if (ret != -BCH_ERR_fsck_fix &&
574                     ret != -BCH_ERR_fsck_ignore)
575                         goto fsck_err;
576                 ret = -BCH_ERR_fsck_fix;
577                 break;
578         case -BCH_ERR_btree_node_read_err_want_retry:
579         case -BCH_ERR_btree_node_read_err_must_retry:
580                 bch2_print_string_as_lines(KERN_ERR, out.buf);
581                 break;
582         case -BCH_ERR_btree_node_read_err_bad_node:
583                 bch2_print_string_as_lines(KERN_ERR, out.buf);
584                 ret = bch2_topology_error(c);
585                 break;
586         case -BCH_ERR_btree_node_read_err_incompatible:
587                 bch2_print_string_as_lines(KERN_ERR, out.buf);
588                 ret = -BCH_ERR_fsck_errors_not_fixed;
589                 break;
590         default:
591                 BUG();
592         }
593 out:
594 fsck_err:
595         printbuf_exit(&out);
596         return ret;
597 }
598
599 #define btree_err(type, c, ca, b, i, _err_type, msg, ...)               \
600 ({                                                                      \
601         int _ret = __btree_err(type, c, ca, b, i, write, have_retry,    \
602                                BCH_FSCK_ERR_##_err_type,                \
603                                msg, ##__VA_ARGS__);                     \
604                                                                         \
605         if (_ret != -BCH_ERR_fsck_fix) {                                \
606                 ret = _ret;                                             \
607                 goto fsck_err;                                          \
608         }                                                               \
609                                                                         \
610         *saw_error = true;                                              \
611 })
612
613 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
614
615 /*
616  * When btree topology repair changes the start or end of a node, that might
617  * mean we have to drop keys that are no longer inside the node:
618  */
619 __cold
620 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
621 {
622         struct bset_tree *t;
623
624         for_each_bset(b, t) {
625                 struct bset *i = bset(b, t);
626                 struct bkey_packed *k;
627
628                 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
629                         if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
630                                 break;
631
632                 if (k != i->start) {
633                         unsigned shift = (u64 *) k - (u64 *) i->start;
634
635                         memmove_u64s_down(i->start, k,
636                                           (u64 *) vstruct_end(i) - (u64 *) k);
637                         i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
638                         set_btree_bset_end(b, t);
639                 }
640
641                 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
642                         if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
643                                 break;
644
645                 if (k != vstruct_last(i)) {
646                         i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
647                         set_btree_bset_end(b, t);
648                 }
649         }
650
651         /*
652          * Always rebuild search trees: eytzinger search tree nodes directly
653          * depend on the values of min/max key:
654          */
655         bch2_bset_set_no_aux_tree(b, b->set);
656         bch2_btree_build_aux_trees(b);
657
658         struct bkey_s_c k;
659         struct bkey unpacked;
660         struct btree_node_iter iter;
661         for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
662                 BUG_ON(bpos_lt(k.k->p, b->data->min_key));
663                 BUG_ON(bpos_gt(k.k->p, b->data->max_key));
664         }
665 }
666
667 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
668                          struct btree *b, struct bset *i,
669                          unsigned offset, unsigned sectors,
670                          int write, bool have_retry, bool *saw_error)
671 {
672         unsigned version = le16_to_cpu(i->version);
673         struct printbuf buf1 = PRINTBUF;
674         struct printbuf buf2 = PRINTBUF;
675         int ret = 0;
676
677         btree_err_on(!bch2_version_compatible(version),
678                      -BCH_ERR_btree_node_read_err_incompatible,
679                      c, ca, b, i,
680                      btree_node_unsupported_version,
681                      "unsupported bset version %u.%u",
682                      BCH_VERSION_MAJOR(version),
683                      BCH_VERSION_MINOR(version));
684
685         if (btree_err_on(version < c->sb.version_min,
686                          -BCH_ERR_btree_node_read_err_fixable,
687                          c, NULL, b, i,
688                          btree_node_bset_older_than_sb_min,
689                          "bset version %u older than superblock version_min %u",
690                          version, c->sb.version_min)) {
691                 mutex_lock(&c->sb_lock);
692                 c->disk_sb.sb->version_min = cpu_to_le16(version);
693                 bch2_write_super(c);
694                 mutex_unlock(&c->sb_lock);
695         }
696
697         if (btree_err_on(BCH_VERSION_MAJOR(version) >
698                          BCH_VERSION_MAJOR(c->sb.version),
699                          -BCH_ERR_btree_node_read_err_fixable,
700                          c, NULL, b, i,
701                          btree_node_bset_newer_than_sb,
702                          "bset version %u newer than superblock version %u",
703                          version, c->sb.version)) {
704                 mutex_lock(&c->sb_lock);
705                 c->disk_sb.sb->version = cpu_to_le16(version);
706                 bch2_write_super(c);
707                 mutex_unlock(&c->sb_lock);
708         }
709
710         btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
711                      -BCH_ERR_btree_node_read_err_incompatible,
712                      c, ca, b, i,
713                      btree_node_unsupported_version,
714                      "BSET_SEPARATE_WHITEOUTS no longer supported");
715
716         if (btree_err_on(offset + sectors > btree_sectors(c),
717                          -BCH_ERR_btree_node_read_err_fixable,
718                          c, ca, b, i,
719                          bset_past_end_of_btree_node,
720                          "bset past end of btree node")) {
721                 i->u64s = 0;
722                 ret = 0;
723                 goto out;
724         }
725
726         btree_err_on(offset && !i->u64s,
727                      -BCH_ERR_btree_node_read_err_fixable,
728                      c, ca, b, i,
729                      bset_empty,
730                      "empty bset");
731
732         btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
733                      -BCH_ERR_btree_node_read_err_want_retry,
734                      c, ca, b, i,
735                      bset_wrong_sector_offset,
736                      "bset at wrong sector offset");
737
738         if (!offset) {
739                 struct btree_node *bn =
740                         container_of(i, struct btree_node, keys);
741                 /* These indicate that we read the wrong btree node: */
742
743                 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
744                         struct bch_btree_ptr_v2 *bp =
745                                 &bkey_i_to_btree_ptr_v2(&b->key)->v;
746
747                         /* XXX endianness */
748                         btree_err_on(bp->seq != bn->keys.seq,
749                                      -BCH_ERR_btree_node_read_err_must_retry,
750                                      c, ca, b, NULL,
751                                      bset_bad_seq,
752                                      "incorrect sequence number (wrong btree node)");
753                 }
754
755                 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
756                              -BCH_ERR_btree_node_read_err_must_retry,
757                              c, ca, b, i,
758                              btree_node_bad_btree,
759                              "incorrect btree id");
760
761                 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
762                              -BCH_ERR_btree_node_read_err_must_retry,
763                              c, ca, b, i,
764                              btree_node_bad_level,
765                              "incorrect level");
766
767                 if (!write)
768                         compat_btree_node(b->c.level, b->c.btree_id, version,
769                                           BSET_BIG_ENDIAN(i), write, bn);
770
771                 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
772                         struct bch_btree_ptr_v2 *bp =
773                                 &bkey_i_to_btree_ptr_v2(&b->key)->v;
774
775                         if (BTREE_PTR_RANGE_UPDATED(bp)) {
776                                 b->data->min_key = bp->min_key;
777                                 b->data->max_key = b->key.k.p;
778                         }
779
780                         btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
781                                      -BCH_ERR_btree_node_read_err_must_retry,
782                                      c, ca, b, NULL,
783                                      btree_node_bad_min_key,
784                                      "incorrect min_key: got %s should be %s",
785                                      (printbuf_reset(&buf1),
786                                       bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
787                                      (printbuf_reset(&buf2),
788                                       bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
789                 }
790
791                 btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
792                              -BCH_ERR_btree_node_read_err_must_retry,
793                              c, ca, b, i,
794                              btree_node_bad_max_key,
795                              "incorrect max key %s",
796                              (printbuf_reset(&buf1),
797                               bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
798
799                 if (write)
800                         compat_btree_node(b->c.level, b->c.btree_id, version,
801                                           BSET_BIG_ENDIAN(i), write, bn);
802
803                 btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
804                              -BCH_ERR_btree_node_read_err_bad_node,
805                              c, ca, b, i,
806                              btree_node_bad_format,
807                              "invalid bkey format: %s\n  %s", buf1.buf,
808                              (printbuf_reset(&buf2),
809                               bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
810                 printbuf_reset(&buf1);
811
812                 compat_bformat(b->c.level, b->c.btree_id, version,
813                                BSET_BIG_ENDIAN(i), write,
814                                &bn->format);
815         }
816 out:
817 fsck_err:
818         printbuf_exit(&buf2);
819         printbuf_exit(&buf1);
820         return ret;
821 }
822
823 static int bset_key_invalid(struct bch_fs *c, struct btree *b,
824                             struct bkey_s_c k,
825                             bool updated_range, int rw,
826                             struct printbuf *err)
827 {
828         return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
829                 (!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?:
830                 (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
831 }
832
833 static bool __bkey_valid(struct bch_fs *c, struct btree *b,
834                          struct bset *i, struct bkey_packed *k)
835 {
836         if (bkey_p_next(k) > vstruct_last(i))
837                 return false;
838
839         if (k->format > KEY_FORMAT_CURRENT)
840                 return false;
841
842         if (k->u64s < bkeyp_key_u64s(&b->format, k))
843                 return false;
844
845         struct printbuf buf = PRINTBUF;
846         struct bkey tmp;
847         struct bkey_s u = __bkey_disassemble(b, k, &tmp);
848         bool ret = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b), READ, &buf);
849         printbuf_exit(&buf);
850         return ret;
851 }
852
853 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
854                          struct bset *i, int write,
855                          bool have_retry, bool *saw_error)
856 {
857         unsigned version = le16_to_cpu(i->version);
858         struct bkey_packed *k, *prev = NULL;
859         struct printbuf buf = PRINTBUF;
860         bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
861                 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
862         int ret = 0;
863
864         for (k = i->start;
865              k != vstruct_last(i);) {
866                 struct bkey_s u;
867                 struct bkey tmp;
868                 unsigned next_good_key;
869
870                 if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
871                                  -BCH_ERR_btree_node_read_err_fixable,
872                                  c, NULL, b, i,
873                                  btree_node_bkey_past_bset_end,
874                                  "key extends past end of bset")) {
875                         i->u64s = cpu_to_le16((u64 *) k - i->_data);
876                         break;
877                 }
878
879                 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
880                                  -BCH_ERR_btree_node_read_err_fixable,
881                                  c, NULL, b, i,
882                                  btree_node_bkey_bad_format,
883                                  "invalid bkey format %u", k->format))
884                         goto drop_this_key;
885
886                 if (btree_err_on(k->u64s < bkeyp_key_u64s(&b->format, k),
887                                  -BCH_ERR_btree_node_read_err_fixable,
888                                  c, NULL, b, i,
889                                  btree_node_bkey_bad_u64s,
890                                  "k->u64s too small (%u < %u)", k->u64s, bkeyp_key_u64s(&b->format, k)))
891                         goto drop_this_key;
892
893                 if (!write)
894                         bch2_bkey_compat(b->c.level, b->c.btree_id, version,
895                                     BSET_BIG_ENDIAN(i), write,
896                                     &b->format, k);
897
898                 u = __bkey_disassemble(b, k, &tmp);
899
900                 printbuf_reset(&buf);
901                 if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
902                         printbuf_reset(&buf);
903                         bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
904                         prt_printf(&buf, "\n  ");
905                         bch2_bkey_val_to_text(&buf, c, u.s_c);
906
907                         btree_err(-BCH_ERR_btree_node_read_err_fixable,
908                                   c, NULL, b, i,
909                                   btree_node_bad_bkey,
910                                   "invalid bkey: %s", buf.buf);
911                         goto drop_this_key;
912                 }
913
914                 if (write)
915                         bch2_bkey_compat(b->c.level, b->c.btree_id, version,
916                                     BSET_BIG_ENDIAN(i), write,
917                                     &b->format, k);
918
919                 if (prev && bkey_iter_cmp(b, prev, k) > 0) {
920                         struct bkey up = bkey_unpack_key(b, prev);
921
922                         printbuf_reset(&buf);
923                         prt_printf(&buf, "keys out of order: ");
924                         bch2_bkey_to_text(&buf, &up);
925                         prt_printf(&buf, " > ");
926                         bch2_bkey_to_text(&buf, u.k);
927
928                         if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
929                                       c, NULL, b, i,
930                                       btree_node_bkey_out_of_order,
931                                       "%s", buf.buf))
932                                 goto drop_this_key;
933                 }
934
935                 prev = k;
936                 k = bkey_p_next(k);
937                 continue;
938 drop_this_key:
939                 next_good_key = k->u64s;
940
941                 if (!next_good_key ||
942                     (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN &&
943                      version >= bcachefs_metadata_version_snapshot)) {
944                         /*
945                          * only do scanning if bch2_bkey_compat() has nothing to
946                          * do
947                          */
948
949                         if (!__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
950                                 for (next_good_key = 1;
951                                      next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
952                                      next_good_key++)
953                                         if (__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
954                                                 goto got_good_key;
955
956                         }
957
958                         /*
959                          * didn't find a good key, have to truncate the rest of
960                          * the bset
961                          */
962                         next_good_key = (u64 *) vstruct_last(i) - (u64 *) k;
963                 }
964 got_good_key:
965                 le16_add_cpu(&i->u64s, -next_good_key);
966                 memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k);
967         }
968 fsck_err:
969         printbuf_exit(&buf);
970         return ret;
971 }
972
973 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
974                               struct btree *b, bool have_retry, bool *saw_error)
975 {
976         struct btree_node_entry *bne;
977         struct sort_iter *iter;
978         struct btree_node *sorted;
979         struct bkey_packed *k;
980         struct bset *i;
981         bool used_mempool, blacklisted;
982         bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
983                 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
984         unsigned u64s;
985         unsigned ptr_written = btree_ptr_sectors_written(&b->key);
986         struct printbuf buf = PRINTBUF;
987         int ret = 0, retry_read = 0, write = READ;
988         u64 start_time = local_clock();
989
990         b->version_ondisk = U16_MAX;
991         /* We might get called multiple times on read retry: */
992         b->written = 0;
993
994         iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
995         sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
996
997         if (bch2_meta_read_fault("btree"))
998                 btree_err(-BCH_ERR_btree_node_read_err_must_retry,
999                           c, ca, b, NULL,
1000                           btree_node_fault_injected,
1001                           "dynamic fault");
1002
1003         btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1004                      -BCH_ERR_btree_node_read_err_must_retry,
1005                      c, ca, b, NULL,
1006                      btree_node_bad_magic,
1007                      "bad magic: want %llx, got %llx",
1008                      bset_magic(c), le64_to_cpu(b->data->magic));
1009
1010         if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
1011                 struct bch_btree_ptr_v2 *bp =
1012                         &bkey_i_to_btree_ptr_v2(&b->key)->v;
1013
1014                 bch2_bpos_to_text(&buf, b->data->min_key);
1015                 prt_str(&buf, "-");
1016                 bch2_bpos_to_text(&buf, b->data->max_key);
1017
1018                 btree_err_on(b->data->keys.seq != bp->seq,
1019                              -BCH_ERR_btree_node_read_err_must_retry,
1020                              c, ca, b, NULL,
1021                              btree_node_bad_seq,
1022                              "got wrong btree node (want %llx got %llx)\n"
1023                              "got btree %s level %llu pos %s",
1024                              bp->seq, b->data->keys.seq,
1025                              bch2_btree_id_str(BTREE_NODE_ID(b->data)),
1026                              BTREE_NODE_LEVEL(b->data),
1027                              buf.buf);
1028         } else {
1029                 btree_err_on(!b->data->keys.seq,
1030                              -BCH_ERR_btree_node_read_err_must_retry,
1031                              c, ca, b, NULL,
1032                              btree_node_bad_seq,
1033                              "bad btree header: seq 0");
1034         }
1035
1036         while (b->written < (ptr_written ?: btree_sectors(c))) {
1037                 unsigned sectors;
1038                 struct nonce nonce;
1039                 bool first = !b->written;
1040                 bool csum_bad;
1041
1042                 if (!b->written) {
1043                         i = &b->data->keys;
1044
1045                         btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1046                                      -BCH_ERR_btree_node_read_err_want_retry,
1047                                      c, ca, b, i,
1048                                      bset_unknown_csum,
1049                                      "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1050
1051                         nonce = btree_nonce(i, b->written << 9);
1052
1053                         struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1054                         csum_bad = bch2_crc_cmp(b->data->csum, csum);
1055                         if (csum_bad)
1056                                 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1057
1058                         btree_err_on(csum_bad,
1059                                      -BCH_ERR_btree_node_read_err_want_retry,
1060                                      c, ca, b, i,
1061                                      bset_bad_csum,
1062                                      "%s",
1063                                      (printbuf_reset(&buf),
1064                                       bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum),
1065                                       buf.buf));
1066
1067                         ret = bset_encrypt(c, i, b->written << 9);
1068                         if (bch2_fs_fatal_err_on(ret, c,
1069                                         "error decrypting btree node: %i", ret))
1070                                 goto fsck_err;
1071
1072                         btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
1073                                      !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
1074                                      -BCH_ERR_btree_node_read_err_incompatible,
1075                                      c, NULL, b, NULL,
1076                                      btree_node_unsupported_version,
1077                                      "btree node does not have NEW_EXTENT_OVERWRITE set");
1078
1079                         sectors = vstruct_sectors(b->data, c->block_bits);
1080                 } else {
1081                         bne = write_block(b);
1082                         i = &bne->keys;
1083
1084                         if (i->seq != b->data->keys.seq)
1085                                 break;
1086
1087                         btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1088                                      -BCH_ERR_btree_node_read_err_want_retry,
1089                                      c, ca, b, i,
1090                                      bset_unknown_csum,
1091                                      "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1092
1093                         nonce = btree_nonce(i, b->written << 9);
1094                         struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1095                         csum_bad = bch2_crc_cmp(bne->csum, csum);
1096                         if (csum_bad)
1097                                 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1098
1099                         btree_err_on(csum_bad,
1100                                      -BCH_ERR_btree_node_read_err_want_retry,
1101                                      c, ca, b, i,
1102                                      bset_bad_csum,
1103                                      "%s",
1104                                      (printbuf_reset(&buf),
1105                                       bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum),
1106                                       buf.buf));
1107
1108                         ret = bset_encrypt(c, i, b->written << 9);
1109                         if (bch2_fs_fatal_err_on(ret, c,
1110                                         "error decrypting btree node: %i\n", ret))
1111                                 goto fsck_err;
1112
1113                         sectors = vstruct_sectors(bne, c->block_bits);
1114                 }
1115
1116                 b->version_ondisk = min(b->version_ondisk,
1117                                         le16_to_cpu(i->version));
1118
1119                 ret = validate_bset(c, ca, b, i, b->written, sectors,
1120                                     READ, have_retry, saw_error);
1121                 if (ret)
1122                         goto fsck_err;
1123
1124                 if (!b->written)
1125                         btree_node_set_format(b, b->data->format);
1126
1127                 ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
1128                 if (ret)
1129                         goto fsck_err;
1130
1131                 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1132
1133                 blacklisted = bch2_journal_seq_is_blacklisted(c,
1134                                         le64_to_cpu(i->journal_seq),
1135                                         true);
1136
1137                 btree_err_on(blacklisted && first,
1138                              -BCH_ERR_btree_node_read_err_fixable,
1139                              c, ca, b, i,
1140                              bset_blacklisted_journal_seq,
1141                              "first btree node bset has blacklisted journal seq (%llu)",
1142                              le64_to_cpu(i->journal_seq));
1143
1144                 btree_err_on(blacklisted && ptr_written,
1145                              -BCH_ERR_btree_node_read_err_fixable,
1146                              c, ca, b, i,
1147                              first_bset_blacklisted_journal_seq,
1148                              "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1149                              le64_to_cpu(i->journal_seq),
1150                              b->written, b->written + sectors, ptr_written);
1151
1152                 b->written += sectors;
1153
1154                 if (blacklisted && !first)
1155                         continue;
1156
1157                 sort_iter_add(iter,
1158                               vstruct_idx(i, 0),
1159                               vstruct_last(i));
1160         }
1161
1162         if (ptr_written) {
1163                 btree_err_on(b->written < ptr_written,
1164                              -BCH_ERR_btree_node_read_err_want_retry,
1165                              c, ca, b, NULL,
1166                              btree_node_data_missing,
1167                              "btree node data missing: expected %u sectors, found %u",
1168                              ptr_written, b->written);
1169         } else {
1170                 for (bne = write_block(b);
1171                      bset_byte_offset(b, bne) < btree_buf_bytes(b);
1172                      bne = (void *) bne + block_bytes(c))
1173                         btree_err_on(bne->keys.seq == b->data->keys.seq &&
1174                                      !bch2_journal_seq_is_blacklisted(c,
1175                                                                       le64_to_cpu(bne->keys.journal_seq),
1176                                                                       true),
1177                                      -BCH_ERR_btree_node_read_err_want_retry,
1178                                      c, ca, b, NULL,
1179                                      btree_node_bset_after_end,
1180                                      "found bset signature after last bset");
1181         }
1182
1183         sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
1184         sorted->keys.u64s = 0;
1185
1186         set_btree_bset(b, b->set, &b->data->keys);
1187
1188         b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1189
1190         u64s = le16_to_cpu(sorted->keys.u64s);
1191         *sorted = *b->data;
1192         sorted->keys.u64s = cpu_to_le16(u64s);
1193         swap(sorted, b->data);
1194         set_btree_bset(b, b->set, &b->data->keys);
1195         b->nsets = 1;
1196
1197         BUG_ON(b->nr.live_u64s != u64s);
1198
1199         btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
1200
1201         if (updated_range)
1202                 bch2_btree_node_drop_keys_outside_node(b);
1203
1204         i = &b->data->keys;
1205         for (k = i->start; k != vstruct_last(i);) {
1206                 struct bkey tmp;
1207                 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1208
1209                 printbuf_reset(&buf);
1210
1211                 if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) ||
1212                     (bch2_inject_invalid_keys &&
1213                      !bversion_cmp(u.k->version, MAX_VERSION))) {
1214                         printbuf_reset(&buf);
1215
1216                         prt_printf(&buf, "invalid bkey: ");
1217                         bch2_bkey_val_invalid(c, u.s_c, READ, &buf);
1218                         prt_printf(&buf, "\n  ");
1219                         bch2_bkey_val_to_text(&buf, c, u.s_c);
1220
1221                         btree_err(-BCH_ERR_btree_node_read_err_fixable,
1222                                   c, NULL, b, i,
1223                                   btree_node_bad_bkey,
1224                                   "%s", buf.buf);
1225
1226                         btree_keys_account_key_drop(&b->nr, 0, k);
1227
1228                         i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1229                         memmove_u64s_down(k, bkey_p_next(k),
1230                                           (u64 *) vstruct_end(i) - (u64 *) k);
1231                         set_btree_bset_end(b, b->set);
1232                         continue;
1233                 }
1234
1235                 if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1236                         struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1237
1238                         bp.v->mem_ptr = 0;
1239                 }
1240
1241                 k = bkey_p_next(k);
1242         }
1243
1244         bch2_bset_build_aux_tree(b, b->set, false);
1245
1246         set_needs_whiteout(btree_bset_first(b), true);
1247
1248         btree_node_reset_sib_u64s(b);
1249
1250         bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1251                 struct bch_dev *ca2 = bch_dev_bkey_exists(c, ptr->dev);
1252
1253                 if (ca2->mi.state != BCH_MEMBER_STATE_rw)
1254                         set_btree_node_need_rewrite(b);
1255         }
1256
1257         if (!ptr_written)
1258                 set_btree_node_need_rewrite(b);
1259 out:
1260         mempool_free(iter, &c->fill_iter);
1261         printbuf_exit(&buf);
1262         bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time);
1263         return retry_read;
1264 fsck_err:
1265         if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1266             ret == -BCH_ERR_btree_node_read_err_must_retry)
1267                 retry_read = 1;
1268         else
1269                 set_btree_node_read_error(b);
1270         goto out;
1271 }
1272
1273 static void btree_node_read_work(struct work_struct *work)
1274 {
1275         struct btree_read_bio *rb =
1276                 container_of(work, struct btree_read_bio, work);
1277         struct bch_fs *c        = rb->c;
1278         struct btree *b         = rb->b;
1279         struct bch_dev *ca      = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1280         struct bio *bio         = &rb->bio;
1281         struct bch_io_failures failed = { .nr = 0 };
1282         struct printbuf buf = PRINTBUF;
1283         bool saw_error = false;
1284         bool retry = false;
1285         bool can_retry;
1286
1287         goto start;
1288         while (1) {
1289                 retry = true;
1290                 bch_info(c, "retrying read");
1291                 ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1292                 rb->have_ioref          = bch2_dev_get_ioref(ca, READ);
1293                 bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1294                 bio->bi_iter.bi_sector  = rb->pick.ptr.offset;
1295                 bio->bi_iter.bi_size    = btree_buf_bytes(b);
1296
1297                 if (rb->have_ioref) {
1298                         bio_set_dev(bio, ca->disk_sb.bdev);
1299                         submit_bio_wait(bio);
1300                 } else {
1301                         bio->bi_status = BLK_STS_REMOVED;
1302                 }
1303 start:
1304                 printbuf_reset(&buf);
1305                 bch2_btree_pos_to_text(&buf, c, b);
1306                 bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read,
1307                                    "btree read error %s for %s",
1308                                    bch2_blk_status_to_str(bio->bi_status), buf.buf);
1309                 if (rb->have_ioref)
1310                         percpu_ref_put(&ca->io_ref);
1311                 rb->have_ioref = false;
1312
1313                 bch2_mark_io_failure(&failed, &rb->pick);
1314
1315                 can_retry = bch2_bkey_pick_read_device(c,
1316                                 bkey_i_to_s_c(&b->key),
1317                                 &failed, &rb->pick) > 0;
1318
1319                 if (!bio->bi_status &&
1320                     !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
1321                         if (retry)
1322                                 bch_info(c, "retry success");
1323                         break;
1324                 }
1325
1326                 saw_error = true;
1327
1328                 if (!can_retry) {
1329                         set_btree_node_read_error(b);
1330                         break;
1331                 }
1332         }
1333
1334         bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1335                                rb->start_time);
1336         bio_put(&rb->bio);
1337
1338         if (saw_error && !btree_node_read_error(b)) {
1339                 printbuf_reset(&buf);
1340                 bch2_bpos_to_text(&buf, b->key.k.p);
1341                 bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
1342                          __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf);
1343
1344                 bch2_btree_node_rewrite_async(c, b);
1345         }
1346
1347         printbuf_exit(&buf);
1348         clear_btree_node_read_in_flight(b);
1349         wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1350 }
1351
1352 static void btree_node_read_endio(struct bio *bio)
1353 {
1354         struct btree_read_bio *rb =
1355                 container_of(bio, struct btree_read_bio, bio);
1356         struct bch_fs *c        = rb->c;
1357
1358         if (rb->have_ioref) {
1359                 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1360
1361                 bch2_latency_acct(ca, rb->start_time, READ);
1362         }
1363
1364         queue_work(c->io_complete_wq, &rb->work);
1365 }
1366
1367 struct btree_node_read_all {
1368         struct closure          cl;
1369         struct bch_fs           *c;
1370         struct btree            *b;
1371         unsigned                nr;
1372         void                    *buf[BCH_REPLICAS_MAX];
1373         struct bio              *bio[BCH_REPLICAS_MAX];
1374         blk_status_t            err[BCH_REPLICAS_MAX];
1375 };
1376
1377 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1378 {
1379         struct btree_node *bn = data;
1380         struct btree_node_entry *bne;
1381         unsigned offset = 0;
1382
1383         if (le64_to_cpu(bn->magic) !=  bset_magic(c))
1384                 return 0;
1385
1386         while (offset < btree_sectors(c)) {
1387                 if (!offset) {
1388                         offset += vstruct_sectors(bn, c->block_bits);
1389                 } else {
1390                         bne = data + (offset << 9);
1391                         if (bne->keys.seq != bn->keys.seq)
1392                                 break;
1393                         offset += vstruct_sectors(bne, c->block_bits);
1394                 }
1395         }
1396
1397         return offset;
1398 }
1399
1400 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1401 {
1402         struct btree_node *bn = data;
1403         struct btree_node_entry *bne;
1404
1405         if (!offset)
1406                 return false;
1407
1408         while (offset < btree_sectors(c)) {
1409                 bne = data + (offset << 9);
1410                 if (bne->keys.seq == bn->keys.seq)
1411                         return true;
1412                 offset++;
1413         }
1414
1415         return false;
1416         return offset;
1417 }
1418
1419 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
1420 {
1421         closure_type(ra, struct btree_node_read_all, cl);
1422         struct bch_fs *c = ra->c;
1423         struct btree *b = ra->b;
1424         struct printbuf buf = PRINTBUF;
1425         bool dump_bset_maps = false;
1426         bool have_retry = false;
1427         int ret = 0, best = -1, write = READ;
1428         unsigned i, written = 0, written2 = 0;
1429         __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1430                 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1431         bool _saw_error = false, *saw_error = &_saw_error;
1432
1433         for (i = 0; i < ra->nr; i++) {
1434                 struct btree_node *bn = ra->buf[i];
1435
1436                 if (ra->err[i])
1437                         continue;
1438
1439                 if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1440                     (seq && seq != bn->keys.seq))
1441                         continue;
1442
1443                 if (best < 0) {
1444                         best = i;
1445                         written = btree_node_sectors_written(c, bn);
1446                         continue;
1447                 }
1448
1449                 written2 = btree_node_sectors_written(c, ra->buf[i]);
1450                 if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
1451                                  c, NULL, b, NULL,
1452                                  btree_node_replicas_sectors_written_mismatch,
1453                                  "btree node sectors written mismatch: %u != %u",
1454                                  written, written2) ||
1455                     btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1456                                  -BCH_ERR_btree_node_read_err_fixable,
1457                                  c, NULL, b, NULL,
1458                                  btree_node_bset_after_end,
1459                                  "found bset signature after last bset") ||
1460                     btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1461                                  -BCH_ERR_btree_node_read_err_fixable,
1462                                  c, NULL, b, NULL,
1463                                  btree_node_replicas_data_mismatch,
1464                                  "btree node replicas content mismatch"))
1465                         dump_bset_maps = true;
1466
1467                 if (written2 > written) {
1468                         written = written2;
1469                         best = i;
1470                 }
1471         }
1472 fsck_err:
1473         if (dump_bset_maps) {
1474                 for (i = 0; i < ra->nr; i++) {
1475                         struct btree_node *bn = ra->buf[i];
1476                         struct btree_node_entry *bne = NULL;
1477                         unsigned offset = 0, sectors;
1478                         bool gap = false;
1479
1480                         if (ra->err[i])
1481                                 continue;
1482
1483                         printbuf_reset(&buf);
1484
1485                         while (offset < btree_sectors(c)) {
1486                                 if (!offset) {
1487                                         sectors = vstruct_sectors(bn, c->block_bits);
1488                                 } else {
1489                                         bne = ra->buf[i] + (offset << 9);
1490                                         if (bne->keys.seq != bn->keys.seq)
1491                                                 break;
1492                                         sectors = vstruct_sectors(bne, c->block_bits);
1493                                 }
1494
1495                                 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1496                                 if (bne && bch2_journal_seq_is_blacklisted(c,
1497                                                         le64_to_cpu(bne->keys.journal_seq), false))
1498                                         prt_printf(&buf, "*");
1499                                 offset += sectors;
1500                         }
1501
1502                         while (offset < btree_sectors(c)) {
1503                                 bne = ra->buf[i] + (offset << 9);
1504                                 if (bne->keys.seq == bn->keys.seq) {
1505                                         if (!gap)
1506                                                 prt_printf(&buf, " GAP");
1507                                         gap = true;
1508
1509                                         sectors = vstruct_sectors(bne, c->block_bits);
1510                                         prt_printf(&buf, " %u-%u", offset, offset + sectors);
1511                                         if (bch2_journal_seq_is_blacklisted(c,
1512                                                         le64_to_cpu(bne->keys.journal_seq), false))
1513                                                 prt_printf(&buf, "*");
1514                                 }
1515                                 offset++;
1516                         }
1517
1518                         bch_err(c, "replica %u:%s", i, buf.buf);
1519                 }
1520         }
1521
1522         if (best >= 0) {
1523                 memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
1524                 ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
1525         } else {
1526                 ret = -1;
1527         }
1528
1529         if (ret)
1530                 set_btree_node_read_error(b);
1531         else if (*saw_error)
1532                 bch2_btree_node_rewrite_async(c, b);
1533
1534         for (i = 0; i < ra->nr; i++) {
1535                 mempool_free(ra->buf[i], &c->btree_bounce_pool);
1536                 bio_put(ra->bio[i]);
1537         }
1538
1539         closure_debug_destroy(&ra->cl);
1540         kfree(ra);
1541         printbuf_exit(&buf);
1542
1543         clear_btree_node_read_in_flight(b);
1544         wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1545 }
1546
1547 static void btree_node_read_all_replicas_endio(struct bio *bio)
1548 {
1549         struct btree_read_bio *rb =
1550                 container_of(bio, struct btree_read_bio, bio);
1551         struct bch_fs *c        = rb->c;
1552         struct btree_node_read_all *ra = rb->ra;
1553
1554         if (rb->have_ioref) {
1555                 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1556
1557                 bch2_latency_acct(ca, rb->start_time, READ);
1558         }
1559
1560         ra->err[rb->idx] = bio->bi_status;
1561         closure_put(&ra->cl);
1562 }
1563
1564 /*
1565  * XXX This allocates multiple times from the same mempools, and can deadlock
1566  * under sufficient memory pressure (but is only a debug path)
1567  */
1568 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1569 {
1570         struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1571         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1572         const union bch_extent_entry *entry;
1573         struct extent_ptr_decoded pick;
1574         struct btree_node_read_all *ra;
1575         unsigned i;
1576
1577         ra = kzalloc(sizeof(*ra), GFP_NOFS);
1578         if (!ra)
1579                 return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1580
1581         closure_init(&ra->cl, NULL);
1582         ra->c   = c;
1583         ra->b   = b;
1584         ra->nr  = bch2_bkey_nr_ptrs(k);
1585
1586         for (i = 0; i < ra->nr; i++) {
1587                 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1588                 ra->bio[i] = bio_alloc_bioset(NULL,
1589                                               buf_pages(ra->buf[i], btree_buf_bytes(b)),
1590                                               REQ_OP_READ|REQ_SYNC|REQ_META,
1591                                               GFP_NOFS,
1592                                               &c->btree_bio);
1593         }
1594
1595         i = 0;
1596         bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1597                 struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1598                 struct btree_read_bio *rb =
1599                         container_of(ra->bio[i], struct btree_read_bio, bio);
1600                 rb->c                   = c;
1601                 rb->b                   = b;
1602                 rb->ra                  = ra;
1603                 rb->start_time          = local_clock();
1604                 rb->have_ioref          = bch2_dev_get_ioref(ca, READ);
1605                 rb->idx                 = i;
1606                 rb->pick                = pick;
1607                 rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1608                 rb->bio.bi_end_io       = btree_node_read_all_replicas_endio;
1609                 bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
1610
1611                 if (rb->have_ioref) {
1612                         this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1613                                      bio_sectors(&rb->bio));
1614                         bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1615
1616                         closure_get(&ra->cl);
1617                         submit_bio(&rb->bio);
1618                 } else {
1619                         ra->err[i] = BLK_STS_REMOVED;
1620                 }
1621
1622                 i++;
1623         }
1624
1625         if (sync) {
1626                 closure_sync(&ra->cl);
1627                 btree_node_read_all_replicas_done(&ra->cl.work);
1628         } else {
1629                 continue_at(&ra->cl, btree_node_read_all_replicas_done,
1630                             c->io_complete_wq);
1631         }
1632
1633         return 0;
1634 }
1635
1636 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
1637                           bool sync)
1638 {
1639         struct bch_fs *c = trans->c;
1640         struct extent_ptr_decoded pick;
1641         struct btree_read_bio *rb;
1642         struct bch_dev *ca;
1643         struct bio *bio;
1644         int ret;
1645
1646         trace_and_count(c, btree_node_read, trans, b);
1647
1648         if (bch2_verify_all_btree_replicas &&
1649             !btree_node_read_all_replicas(c, b, sync))
1650                 return;
1651
1652         ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1653                                          NULL, &pick);
1654
1655         if (ret <= 0) {
1656                 struct printbuf buf = PRINTBUF;
1657
1658                 prt_str(&buf, "btree node read error: no device to read from\n at ");
1659                 bch2_btree_pos_to_text(&buf, c, b);
1660                 bch_err(c, "%s", buf.buf);
1661
1662                 if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1663                     c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
1664                         bch2_fatal_error(c);
1665
1666                 set_btree_node_read_error(b);
1667                 clear_btree_node_read_in_flight(b);
1668                 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1669                 printbuf_exit(&buf);
1670                 return;
1671         }
1672
1673         ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1674
1675         bio = bio_alloc_bioset(NULL,
1676                                buf_pages(b->data, btree_buf_bytes(b)),
1677                                REQ_OP_READ|REQ_SYNC|REQ_META,
1678                                GFP_NOFS,
1679                                &c->btree_bio);
1680         rb = container_of(bio, struct btree_read_bio, bio);
1681         rb->c                   = c;
1682         rb->b                   = b;
1683         rb->ra                  = NULL;
1684         rb->start_time          = local_clock();
1685         rb->have_ioref          = bch2_dev_get_ioref(ca, READ);
1686         rb->pick                = pick;
1687         INIT_WORK(&rb->work, btree_node_read_work);
1688         bio->bi_iter.bi_sector  = pick.ptr.offset;
1689         bio->bi_end_io          = btree_node_read_endio;
1690         bch2_bio_map(bio, b->data, btree_buf_bytes(b));
1691
1692         if (rb->have_ioref) {
1693                 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1694                              bio_sectors(bio));
1695                 bio_set_dev(bio, ca->disk_sb.bdev);
1696
1697                 if (sync) {
1698                         submit_bio_wait(bio);
1699                         bch2_latency_acct(ca, rb->start_time, READ);
1700                         btree_node_read_work(&rb->work);
1701                 } else {
1702                         submit_bio(bio);
1703                 }
1704         } else {
1705                 bio->bi_status = BLK_STS_REMOVED;
1706
1707                 if (sync)
1708                         btree_node_read_work(&rb->work);
1709                 else
1710                         queue_work(c->io_complete_wq, &rb->work);
1711         }
1712 }
1713
1714 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1715                                   const struct bkey_i *k, unsigned level)
1716 {
1717         struct bch_fs *c = trans->c;
1718         struct closure cl;
1719         struct btree *b;
1720         int ret;
1721
1722         closure_init_stack(&cl);
1723
1724         do {
1725                 ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1726                 closure_sync(&cl);
1727         } while (ret);
1728
1729         b = bch2_btree_node_mem_alloc(trans, level != 0);
1730         bch2_btree_cache_cannibalize_unlock(trans);
1731
1732         BUG_ON(IS_ERR(b));
1733
1734         bkey_copy(&b->key, k);
1735         BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1736
1737         set_btree_node_read_in_flight(b);
1738
1739         bch2_btree_node_read(trans, b, true);
1740
1741         if (btree_node_read_error(b)) {
1742                 bch2_btree_node_hash_remove(&c->btree_cache, b);
1743
1744                 mutex_lock(&c->btree_cache.lock);
1745                 list_move(&b->list, &c->btree_cache.freeable);
1746                 mutex_unlock(&c->btree_cache.lock);
1747
1748                 ret = -BCH_ERR_btree_node_read_error;
1749                 goto err;
1750         }
1751
1752         bch2_btree_set_root_for_read(c, b);
1753 err:
1754         six_unlock_write(&b->c.lock);
1755         six_unlock_intent(&b->c.lock);
1756
1757         return ret;
1758 }
1759
1760 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1761                         const struct bkey_i *k, unsigned level)
1762 {
1763         return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1764 }
1765
1766 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1767                                       struct btree_write *w)
1768 {
1769         unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1770
1771         do {
1772                 old = new = v;
1773                 if (!(old & 1))
1774                         break;
1775
1776                 new &= ~1UL;
1777         } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1778
1779         if (old & 1)
1780                 closure_put(&((struct btree_update *) new)->cl);
1781
1782         bch2_journal_pin_drop(&c->journal, &w->journal);
1783 }
1784
1785 static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
1786 {
1787         struct btree_write *w = btree_prev_write(b);
1788         unsigned long old, new, v;
1789         unsigned type = 0;
1790
1791         bch2_btree_complete_write(c, b, w);
1792
1793         v = READ_ONCE(b->flags);
1794         do {
1795                 old = new = v;
1796
1797                 if ((old & (1U << BTREE_NODE_dirty)) &&
1798                     (old & (1U << BTREE_NODE_need_write)) &&
1799                     !(old & (1U << BTREE_NODE_never_write)) &&
1800                     !(old & (1U << BTREE_NODE_write_blocked)) &&
1801                     !(old & (1U << BTREE_NODE_will_make_reachable))) {
1802                         new &= ~(1U << BTREE_NODE_dirty);
1803                         new &= ~(1U << BTREE_NODE_need_write);
1804                         new |=  (1U << BTREE_NODE_write_in_flight);
1805                         new |=  (1U << BTREE_NODE_write_in_flight_inner);
1806                         new |=  (1U << BTREE_NODE_just_written);
1807                         new ^=  (1U << BTREE_NODE_write_idx);
1808
1809                         type = new & BTREE_WRITE_TYPE_MASK;
1810                         new &= ~BTREE_WRITE_TYPE_MASK;
1811                 } else {
1812                         new &= ~(1U << BTREE_NODE_write_in_flight);
1813                         new &= ~(1U << BTREE_NODE_write_in_flight_inner);
1814                 }
1815         } while ((v = cmpxchg(&b->flags, old, new)) != old);
1816
1817         if (new & (1U << BTREE_NODE_write_in_flight))
1818                 __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
1819         else
1820                 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
1821 }
1822
1823 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1824 {
1825         struct btree_trans *trans = bch2_trans_get(c);
1826
1827         btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
1828         __btree_node_write_done(c, b);
1829         six_unlock_read(&b->c.lock);
1830
1831         bch2_trans_put(trans);
1832 }
1833
1834 static void btree_node_write_work(struct work_struct *work)
1835 {
1836         struct btree_write_bio *wbio =
1837                 container_of(work, struct btree_write_bio, work);
1838         struct bch_fs *c        = wbio->wbio.c;
1839         struct btree *b         = wbio->wbio.bio.bi_private;
1840         struct bch_extent_ptr *ptr;
1841         int ret = 0;
1842
1843         btree_bounce_free(c,
1844                 wbio->data_bytes,
1845                 wbio->wbio.used_mempool,
1846                 wbio->data);
1847
1848         bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
1849                 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1850
1851         if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
1852                 ret = -BCH_ERR_btree_node_write_all_failed;
1853                 goto err;
1854         }
1855
1856         if (wbio->wbio.first_btree_write) {
1857                 if (wbio->wbio.failed.nr) {
1858
1859                 }
1860         } else {
1861                 ret = bch2_trans_do(c, NULL, NULL, 0,
1862                         bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
1863                                         BCH_WATERMARK_reclaim|
1864                                         BCH_TRANS_COMMIT_journal_reclaim|
1865                                         BCH_TRANS_COMMIT_no_enospc|
1866                                         BCH_TRANS_COMMIT_no_check_rw,
1867                                         !wbio->wbio.failed.nr));
1868                 if (ret)
1869                         goto err;
1870         }
1871 out:
1872         bio_put(&wbio->wbio.bio);
1873         btree_node_write_done(c, b);
1874         return;
1875 err:
1876         set_btree_node_noevict(b);
1877         if (!bch2_err_matches(ret, EROFS))
1878                 bch2_fs_fatal_error(c, "fatal error writing btree node: %s", bch2_err_str(ret));
1879         goto out;
1880 }
1881
1882 static void btree_node_write_endio(struct bio *bio)
1883 {
1884         struct bch_write_bio *wbio      = to_wbio(bio);
1885         struct bch_write_bio *parent    = wbio->split ? wbio->parent : NULL;
1886         struct bch_write_bio *orig      = parent ?: wbio;
1887         struct btree_write_bio *wb      = container_of(orig, struct btree_write_bio, wbio);
1888         struct bch_fs *c                = wbio->c;
1889         struct btree *b                 = wbio->bio.bi_private;
1890         struct bch_dev *ca              = bch_dev_bkey_exists(c, wbio->dev);
1891         unsigned long flags;
1892
1893         if (wbio->have_ioref)
1894                 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1895
1896         if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
1897                                "btree write error: %s",
1898                                bch2_blk_status_to_str(bio->bi_status)) ||
1899             bch2_meta_write_fault("btree")) {
1900                 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1901                 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1902                 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1903         }
1904
1905         if (wbio->have_ioref)
1906                 percpu_ref_put(&ca->io_ref);
1907
1908         if (parent) {
1909                 bio_put(bio);
1910                 bio_endio(&parent->bio);
1911                 return;
1912         }
1913
1914         clear_btree_node_write_in_flight_inner(b);
1915         wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
1916         INIT_WORK(&wb->work, btree_node_write_work);
1917         queue_work(c->btree_io_complete_wq, &wb->work);
1918 }
1919
1920 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1921                                    struct bset *i, unsigned sectors)
1922 {
1923         struct printbuf buf = PRINTBUF;
1924         bool saw_error;
1925         int ret;
1926
1927         ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key),
1928                                 BKEY_TYPE_btree, WRITE, &buf);
1929
1930         if (ret)
1931                 bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf);
1932         printbuf_exit(&buf);
1933         if (ret)
1934                 return ret;
1935
1936         ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
1937                 validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
1938         if (ret) {
1939                 bch2_inconsistent_error(c);
1940                 dump_stack();
1941         }
1942
1943         return ret;
1944 }
1945
1946 static void btree_write_submit(struct work_struct *work)
1947 {
1948         struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
1949         BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1950
1951         bkey_copy(&tmp.k, &wbio->key);
1952
1953         bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
1954                 ptr->offset += wbio->sector_offset;
1955
1956         bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
1957                                   &tmp.k, false);
1958 }
1959
1960 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
1961 {
1962         struct btree_write_bio *wbio;
1963         struct bset_tree *t;
1964         struct bset *i;
1965         struct btree_node *bn = NULL;
1966         struct btree_node_entry *bne = NULL;
1967         struct sort_iter_stack sort_iter;
1968         struct nonce nonce;
1969         unsigned bytes_to_write, sectors_to_write, bytes, u64s;
1970         u64 seq = 0;
1971         bool used_mempool;
1972         unsigned long old, new;
1973         bool validate_before_checksum = false;
1974         enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
1975         void *data;
1976         int ret;
1977
1978         if (flags & BTREE_WRITE_ALREADY_STARTED)
1979                 goto do_write;
1980
1981         /*
1982          * We may only have a read lock on the btree node - the dirty bit is our
1983          * "lock" against racing with other threads that may be trying to start
1984          * a write, we do a write iff we clear the dirty bit. Since setting the
1985          * dirty bit requires a write lock, we can't race with other threads
1986          * redirtying it:
1987          */
1988         do {
1989                 old = new = READ_ONCE(b->flags);
1990
1991                 if (!(old & (1 << BTREE_NODE_dirty)))
1992                         return;
1993
1994                 if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
1995                     !(old & (1 << BTREE_NODE_need_write)))
1996                         return;
1997
1998                 if (old &
1999                     ((1 << BTREE_NODE_never_write)|
2000                      (1 << BTREE_NODE_write_blocked)))
2001                         return;
2002
2003                 if (b->written &&
2004                     (old & (1 << BTREE_NODE_will_make_reachable)))
2005                         return;
2006
2007                 if (old & (1 << BTREE_NODE_write_in_flight))
2008                         return;
2009
2010                 if (flags & BTREE_WRITE_ONLY_IF_NEED)
2011                         type = new & BTREE_WRITE_TYPE_MASK;
2012                 new &= ~BTREE_WRITE_TYPE_MASK;
2013
2014                 new &= ~(1 << BTREE_NODE_dirty);
2015                 new &= ~(1 << BTREE_NODE_need_write);
2016                 new |=  (1 << BTREE_NODE_write_in_flight);
2017                 new |=  (1 << BTREE_NODE_write_in_flight_inner);
2018                 new |=  (1 << BTREE_NODE_just_written);
2019                 new ^=  (1 << BTREE_NODE_write_idx);
2020         } while (cmpxchg_acquire(&b->flags, old, new) != old);
2021
2022         if (new & (1U << BTREE_NODE_need_write))
2023                 return;
2024 do_write:
2025         BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
2026
2027         atomic_dec(&c->btree_cache.dirty);
2028
2029         BUG_ON(btree_node_fake(b));
2030         BUG_ON((b->will_make_reachable != 0) != !b->written);
2031
2032         BUG_ON(b->written >= btree_sectors(c));
2033         BUG_ON(b->written & (block_sectors(c) - 1));
2034         BUG_ON(bset_written(b, btree_bset_last(b)));
2035         BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
2036         BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
2037
2038         bch2_sort_whiteouts(c, b);
2039
2040         sort_iter_stack_init(&sort_iter, b);
2041
2042         bytes = !b->written
2043                 ? sizeof(struct btree_node)
2044                 : sizeof(struct btree_node_entry);
2045
2046         bytes += b->whiteout_u64s * sizeof(u64);
2047
2048         for_each_bset(b, t) {
2049                 i = bset(b, t);
2050
2051                 if (bset_written(b, i))
2052                         continue;
2053
2054                 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
2055                 sort_iter_add(&sort_iter.iter,
2056                               btree_bkey_first(b, t),
2057                               btree_bkey_last(b, t));
2058                 seq = max(seq, le64_to_cpu(i->journal_seq));
2059         }
2060
2061         BUG_ON(b->written && !seq);
2062
2063         /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
2064         bytes += 8;
2065
2066         /* buffer must be a multiple of the block size */
2067         bytes = round_up(bytes, block_bytes(c));
2068
2069         data = btree_bounce_alloc(c, bytes, &used_mempool);
2070
2071         if (!b->written) {
2072                 bn = data;
2073                 *bn = *b->data;
2074                 i = &bn->keys;
2075         } else {
2076                 bne = data;
2077                 bne->keys = b->data->keys;
2078                 i = &bne->keys;
2079         }
2080
2081         i->journal_seq  = cpu_to_le64(seq);
2082         i->u64s         = 0;
2083
2084         sort_iter_add(&sort_iter.iter,
2085                       unwritten_whiteouts_start(b),
2086                       unwritten_whiteouts_end(b));
2087         SET_BSET_SEPARATE_WHITEOUTS(i, false);
2088
2089         b->whiteout_u64s = 0;
2090
2091         u64s = bch2_sort_keys(i->start, &sort_iter.iter, false);
2092         le16_add_cpu(&i->u64s, u64s);
2093
2094         BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2095
2096         set_needs_whiteout(i, false);
2097
2098         /* do we have data to write? */
2099         if (b->written && !i->u64s)
2100                 goto nowrite;
2101
2102         bytes_to_write = vstruct_end(i) - data;
2103         sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2104
2105         if (!b->written &&
2106             b->key.k.type == KEY_TYPE_btree_ptr_v2)
2107                 BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write);
2108
2109         memset(data + bytes_to_write, 0,
2110                (sectors_to_write << 9) - bytes_to_write);
2111
2112         BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2113         BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2114         BUG_ON(i->seq != b->data->keys.seq);
2115
2116         i->version = cpu_to_le16(c->sb.version);
2117         SET_BSET_OFFSET(i, b->written);
2118         SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2119
2120         if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2121                 validate_before_checksum = true;
2122
2123         /* validate_bset will be modifying: */
2124         if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2125                 validate_before_checksum = true;
2126
2127         /* if we're going to be encrypting, check metadata validity first: */
2128         if (validate_before_checksum &&
2129             validate_bset_for_write(c, b, i, sectors_to_write))
2130                 goto err;
2131
2132         ret = bset_encrypt(c, i, b->written << 9);
2133         if (bch2_fs_fatal_err_on(ret, c,
2134                         "error encrypting btree node: %i\n", ret))
2135                 goto err;
2136
2137         nonce = btree_nonce(i, b->written << 9);
2138
2139         if (bn)
2140                 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2141         else
2142                 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2143
2144         /* if we're not encrypting, check metadata after checksumming: */
2145         if (!validate_before_checksum &&
2146             validate_bset_for_write(c, b, i, sectors_to_write))
2147                 goto err;
2148
2149         /*
2150          * We handle btree write errors by immediately halting the journal -
2151          * after we've done that, we can't issue any subsequent btree writes
2152          * because they might have pointers to new nodes that failed to write.
2153          *
2154          * Furthermore, there's no point in doing any more btree writes because
2155          * with the journal stopped, we're never going to update the journal to
2156          * reflect that those writes were done and the data flushed from the
2157          * journal:
2158          *
2159          * Also on journal error, the pending write may have updates that were
2160          * never journalled (interior nodes, see btree_update_nodes_written()) -
2161          * it's critical that we don't do the write in that case otherwise we
2162          * will have updates visible that weren't in the journal:
2163          *
2164          * Make sure to update b->written so bch2_btree_init_next() doesn't
2165          * break:
2166          */
2167         if (bch2_journal_error(&c->journal) ||
2168             c->opts.nochanges)
2169                 goto err;
2170
2171         trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2172
2173         wbio = container_of(bio_alloc_bioset(NULL,
2174                                 buf_pages(data, sectors_to_write << 9),
2175                                 REQ_OP_WRITE|REQ_META,
2176                                 GFP_NOFS,
2177                                 &c->btree_bio),
2178                             struct btree_write_bio, wbio.bio);
2179         wbio_init(&wbio->wbio.bio);
2180         wbio->data                      = data;
2181         wbio->data_bytes                = bytes;
2182         wbio->sector_offset             = b->written;
2183         wbio->wbio.c                    = c;
2184         wbio->wbio.used_mempool         = used_mempool;
2185         wbio->wbio.first_btree_write    = !b->written;
2186         wbio->wbio.bio.bi_end_io        = btree_node_write_endio;
2187         wbio->wbio.bio.bi_private       = b;
2188
2189         bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2190
2191         bkey_copy(&wbio->key, &b->key);
2192
2193         b->written += sectors_to_write;
2194
2195         if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2196                 bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2197                         cpu_to_le16(b->written);
2198
2199         atomic64_inc(&c->btree_write_stats[type].nr);
2200         atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2201
2202         INIT_WORK(&wbio->work, btree_write_submit);
2203         queue_work(c->io_complete_wq, &wbio->work);
2204         return;
2205 err:
2206         set_btree_node_noevict(b);
2207         b->written += sectors_to_write;
2208 nowrite:
2209         btree_bounce_free(c, bytes, used_mempool, data);
2210         __btree_node_write_done(c, b);
2211 }
2212
2213 /*
2214  * Work that must be done with write lock held:
2215  */
2216 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2217 {
2218         bool invalidated_iter = false;
2219         struct btree_node_entry *bne;
2220         struct bset_tree *t;
2221
2222         if (!btree_node_just_written(b))
2223                 return false;
2224
2225         BUG_ON(b->whiteout_u64s);
2226
2227         clear_btree_node_just_written(b);
2228
2229         /*
2230          * Note: immediately after write, bset_written() doesn't work - the
2231          * amount of data we had to write after compaction might have been
2232          * smaller than the offset of the last bset.
2233          *
2234          * However, we know that all bsets have been written here, as long as
2235          * we're still holding the write lock:
2236          */
2237
2238         /*
2239          * XXX: decide if we really want to unconditionally sort down to a
2240          * single bset:
2241          */
2242         if (b->nsets > 1) {
2243                 btree_node_sort(c, b, 0, b->nsets, true);
2244                 invalidated_iter = true;
2245         } else {
2246                 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2247         }
2248
2249         for_each_bset(b, t)
2250                 set_needs_whiteout(bset(b, t), true);
2251
2252         bch2_btree_verify(c, b);
2253
2254         /*
2255          * If later we don't unconditionally sort down to a single bset, we have
2256          * to ensure this is still true:
2257          */
2258         BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2259
2260         bne = want_new_bset(c, b);
2261         if (bne)
2262                 bch2_bset_init_next(b, bne);
2263
2264         bch2_btree_build_aux_trees(b);
2265
2266         return invalidated_iter;
2267 }
2268
2269 /*
2270  * Use this one if the node is intent locked:
2271  */
2272 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2273                            enum six_lock_type lock_type_held,
2274                            unsigned flags)
2275 {
2276         if (lock_type_held == SIX_LOCK_intent ||
2277             (lock_type_held == SIX_LOCK_read &&
2278              six_lock_tryupgrade(&b->c.lock))) {
2279                 __bch2_btree_node_write(c, b, flags);
2280
2281                 /* don't cycle lock unnecessarily: */
2282                 if (btree_node_just_written(b) &&
2283                     six_trylock_write(&b->c.lock)) {
2284                         bch2_btree_post_write_cleanup(c, b);
2285                         six_unlock_write(&b->c.lock);
2286                 }
2287
2288                 if (lock_type_held == SIX_LOCK_read)
2289                         six_lock_downgrade(&b->c.lock);
2290         } else {
2291                 __bch2_btree_node_write(c, b, flags);
2292                 if (lock_type_held == SIX_LOCK_write &&
2293                     btree_node_just_written(b))
2294                         bch2_btree_post_write_cleanup(c, b);
2295         }
2296 }
2297
2298 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2299 {
2300         struct bucket_table *tbl;
2301         struct rhash_head *pos;
2302         struct btree *b;
2303         unsigned i;
2304         bool ret = false;
2305 restart:
2306         rcu_read_lock();
2307         for_each_cached_btree(b, c, tbl, i, pos)
2308                 if (test_bit(flag, &b->flags)) {
2309                         rcu_read_unlock();
2310                         wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2311                         ret = true;
2312                         goto restart;
2313                 }
2314         rcu_read_unlock();
2315
2316         return ret;
2317 }
2318
2319 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2320 {
2321         return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2322 }
2323
2324 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2325 {
2326         return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2327 }
2328
2329 static const char * const bch2_btree_write_types[] = {
2330 #define x(t, n) [n] = #t,
2331         BCH_BTREE_WRITE_TYPES()
2332         NULL
2333 };
2334
2335 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2336 {
2337         printbuf_tabstop_push(out, 20);
2338         printbuf_tabstop_push(out, 10);
2339
2340         prt_tab(out);
2341         prt_str(out, "nr");
2342         prt_tab(out);
2343         prt_str(out, "size");
2344         prt_newline(out);
2345
2346         for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2347                 u64 nr          = atomic64_read(&c->btree_write_stats[i].nr);
2348                 u64 bytes       = atomic64_read(&c->btree_write_stats[i].bytes);
2349
2350                 prt_printf(out, "%s:", bch2_btree_write_types[i]);
2351                 prt_tab(out);
2352                 prt_u64(out, nr);
2353                 prt_tab(out);
2354                 prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
2355                 prt_newline(out);
2356         }
2357 }