cifs: fix creating sockets when using sfu mount options
[sfrench/cifs-2.6.git] / fs / bcachefs / io_write.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
4  * Copyright 2012 Google, Inc.
5  */
6
7 #include "bcachefs.h"
8 #include "alloc_foreground.h"
9 #include "bkey_buf.h"
10 #include "bset.h"
11 #include "btree_update.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "clock.h"
15 #include "compress.h"
16 #include "debug.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "extent_update.h"
20 #include "inode.h"
21 #include "io_write.h"
22 #include "journal.h"
23 #include "keylist.h"
24 #include "move.h"
25 #include "nocow_locking.h"
26 #include "rebalance.h"
27 #include "subvolume.h"
28 #include "super.h"
29 #include "super-io.h"
30 #include "trace.h"
31
32 #include <linux/blkdev.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/sched/mm.h>
36
37 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
38
39 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
40                                        u64 now, int rw)
41 {
42         u64 latency_capable =
43                 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
44         /* ideally we'd be taking into account the device's variance here: */
45         u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
46         s64 latency_over = io_latency - latency_threshold;
47
48         if (latency_threshold && latency_over > 0) {
49                 /*
50                  * bump up congested by approximately latency_over * 4 /
51                  * latency_threshold - we don't need much accuracy here so don't
52                  * bother with the divide:
53                  */
54                 if (atomic_read(&ca->congested) < CONGESTED_MAX)
55                         atomic_add(latency_over >>
56                                    max_t(int, ilog2(latency_threshold) - 2, 0),
57                                    &ca->congested);
58
59                 ca->congested_last = now;
60         } else if (atomic_read(&ca->congested) > 0) {
61                 atomic_dec(&ca->congested);
62         }
63 }
64
65 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
66 {
67         atomic64_t *latency = &ca->cur_latency[rw];
68         u64 now = local_clock();
69         u64 io_latency = time_after64(now, submit_time)
70                 ? now - submit_time
71                 : 0;
72         u64 old, new, v = atomic64_read(latency);
73
74         do {
75                 old = v;
76
77                 /*
78                  * If the io latency was reasonably close to the current
79                  * latency, skip doing the update and atomic operation - most of
80                  * the time:
81                  */
82                 if (abs((int) (old - io_latency)) < (old >> 1) &&
83                     now & ~(~0U << 5))
84                         break;
85
86                 new = ewma_add(old, io_latency, 5);
87         } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
88
89         bch2_congested_acct(ca, io_latency, now, rw);
90
91         __bch2_time_stats_update(&ca->io_latency[rw].stats, submit_time, now);
92 }
93
94 #endif
95
96 /* Allocate, free from mempool: */
97
98 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
99 {
100         struct bvec_iter_all iter;
101         struct bio_vec *bv;
102
103         bio_for_each_segment_all(bv, bio, iter)
104                 if (bv->bv_page != ZERO_PAGE(0))
105                         mempool_free(bv->bv_page, &c->bio_bounce_pages);
106         bio->bi_vcnt = 0;
107 }
108
109 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
110 {
111         struct page *page;
112
113         if (likely(!*using_mempool)) {
114                 page = alloc_page(GFP_NOFS);
115                 if (unlikely(!page)) {
116                         mutex_lock(&c->bio_bounce_pages_lock);
117                         *using_mempool = true;
118                         goto pool_alloc;
119
120                 }
121         } else {
122 pool_alloc:
123                 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
124         }
125
126         return page;
127 }
128
129 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
130                                size_t size)
131 {
132         bool using_mempool = false;
133
134         while (size) {
135                 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
136                 unsigned len = min_t(size_t, PAGE_SIZE, size);
137
138                 BUG_ON(!bio_add_page(bio, page, len, 0));
139                 size -= len;
140         }
141
142         if (using_mempool)
143                 mutex_unlock(&c->bio_bounce_pages_lock);
144 }
145
146 /* Extent update path: */
147
148 int bch2_sum_sector_overwrites(struct btree_trans *trans,
149                                struct btree_iter *extent_iter,
150                                struct bkey_i *new,
151                                bool *usage_increasing,
152                                s64 *i_sectors_delta,
153                                s64 *disk_sectors_delta)
154 {
155         struct bch_fs *c = trans->c;
156         struct btree_iter iter;
157         struct bkey_s_c old;
158         unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
159         bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
160         int ret = 0;
161
162         *usage_increasing       = false;
163         *i_sectors_delta        = 0;
164         *disk_sectors_delta     = 0;
165
166         bch2_trans_copy_iter(&iter, extent_iter);
167
168         for_each_btree_key_upto_continue_norestart(iter,
169                                 new->k.p, BTREE_ITER_SLOTS, old, ret) {
170                 s64 sectors = min(new->k.p.offset, old.k->p.offset) -
171                         max(bkey_start_offset(&new->k),
172                             bkey_start_offset(old.k));
173
174                 *i_sectors_delta += sectors *
175                         (bkey_extent_is_allocation(&new->k) -
176                          bkey_extent_is_allocation(old.k));
177
178                 *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
179                 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
180                         ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
181                         : 0;
182
183                 if (!*usage_increasing &&
184                     (new->k.p.snapshot != old.k->p.snapshot ||
185                      new_replicas > bch2_bkey_replicas(c, old) ||
186                      (!new_compressed && bch2_bkey_sectors_compressed(old))))
187                         *usage_increasing = true;
188
189                 if (bkey_ge(old.k->p, new->k.p))
190                         break;
191         }
192
193         bch2_trans_iter_exit(trans, &iter);
194         return ret;
195 }
196
197 static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
198                                                     struct btree_iter *extent_iter,
199                                                     u64 new_i_size,
200                                                     s64 i_sectors_delta)
201 {
202         /*
203          * Crazy performance optimization:
204          * Every extent update needs to also update the inode: the inode trigger
205          * will set bi->journal_seq to the journal sequence number of this
206          * transaction - for fsync.
207          *
208          * But if that's the only reason we're updating the inode (we're not
209          * updating bi_size or bi_sectors), then we don't need the inode update
210          * to be journalled - if we crash, the bi_journal_seq update will be
211          * lost, but that's fine.
212          */
213         unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
214
215         struct btree_iter iter;
216         struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
217                               SPOS(0,
218                                    extent_iter->pos.inode,
219                                    extent_iter->snapshot),
220                               BTREE_ITER_CACHED);
221         int ret = bkey_err(k);
222         if (unlikely(ret))
223                 return ret;
224
225         /*
226          * varint_decode_fast(), in the inode .invalid method, reads up to 7
227          * bytes past the end of the buffer:
228          */
229         struct bkey_i *k_mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + 8);
230         ret = PTR_ERR_OR_ZERO(k_mut);
231         if (unlikely(ret))
232                 goto err;
233
234         bkey_reassemble(k_mut, k);
235
236         if (unlikely(k_mut->k.type != KEY_TYPE_inode_v3)) {
237                 k_mut = bch2_inode_to_v3(trans, k_mut);
238                 ret = PTR_ERR_OR_ZERO(k_mut);
239                 if (unlikely(ret))
240                         goto err;
241         }
242
243         struct bkey_i_inode_v3 *inode = bkey_i_to_inode_v3(k_mut);
244
245         if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
246             new_i_size > le64_to_cpu(inode->v.bi_size)) {
247                 inode->v.bi_size = cpu_to_le64(new_i_size);
248                 inode_update_flags = 0;
249         }
250
251         if (i_sectors_delta) {
252                 le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
253                 inode_update_flags = 0;
254         }
255
256         if (inode->k.p.snapshot != iter.snapshot) {
257                 inode->k.p.snapshot = iter.snapshot;
258                 inode_update_flags = 0;
259         }
260
261         ret = bch2_trans_update(trans, &iter, &inode->k_i,
262                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
263                                 inode_update_flags);
264 err:
265         bch2_trans_iter_exit(trans, &iter);
266         return ret;
267 }
268
269 int bch2_extent_update(struct btree_trans *trans,
270                        subvol_inum inum,
271                        struct btree_iter *iter,
272                        struct bkey_i *k,
273                        struct disk_reservation *disk_res,
274                        u64 new_i_size,
275                        s64 *i_sectors_delta_total,
276                        bool check_enospc)
277 {
278         struct bpos next_pos;
279         bool usage_increasing;
280         s64 i_sectors_delta = 0, disk_sectors_delta = 0;
281         int ret;
282
283         /*
284          * This traverses us the iterator without changing iter->path->pos to
285          * search_key() (which is pos + 1 for extents): we want there to be a
286          * path already traversed at iter->pos because
287          * bch2_trans_extent_update() will use it to attempt extent merging
288          */
289         ret = __bch2_btree_iter_traverse(iter);
290         if (ret)
291                 return ret;
292
293         ret = bch2_extent_trim_atomic(trans, iter, k);
294         if (ret)
295                 return ret;
296
297         next_pos = k->k.p;
298
299         ret = bch2_sum_sector_overwrites(trans, iter, k,
300                         &usage_increasing,
301                         &i_sectors_delta,
302                         &disk_sectors_delta);
303         if (ret)
304                 return ret;
305
306         if (disk_res &&
307             disk_sectors_delta > (s64) disk_res->sectors) {
308                 ret = bch2_disk_reservation_add(trans->c, disk_res,
309                                         disk_sectors_delta - disk_res->sectors,
310                                         !check_enospc || !usage_increasing
311                                         ? BCH_DISK_RESERVATION_NOFAIL : 0);
312                 if (ret)
313                         return ret;
314         }
315
316         /*
317          * Note:
318          * We always have to do an inode update - even when i_size/i_sectors
319          * aren't changing - for fsync to work properly; fsync relies on
320          * inode->bi_journal_seq which is updated by the trigger code:
321          */
322         ret =   bch2_extent_update_i_size_sectors(trans, iter,
323                                                   min(k->k.p.offset << 9, new_i_size),
324                                                   i_sectors_delta) ?:
325                 bch2_trans_update(trans, iter, k, 0) ?:
326                 bch2_trans_commit(trans, disk_res, NULL,
327                                 BCH_TRANS_COMMIT_no_check_rw|
328                                 BCH_TRANS_COMMIT_no_enospc);
329         if (unlikely(ret))
330                 return ret;
331
332         if (i_sectors_delta_total)
333                 *i_sectors_delta_total += i_sectors_delta;
334         bch2_btree_iter_set_pos(iter, next_pos);
335         return 0;
336 }
337
338 static int bch2_write_index_default(struct bch_write_op *op)
339 {
340         struct bch_fs *c = op->c;
341         struct bkey_buf sk;
342         struct keylist *keys = &op->insert_keys;
343         struct bkey_i *k = bch2_keylist_front(keys);
344         struct btree_trans *trans = bch2_trans_get(c);
345         struct btree_iter iter;
346         subvol_inum inum = {
347                 .subvol = op->subvol,
348                 .inum   = k->k.p.inode,
349         };
350         int ret;
351
352         BUG_ON(!inum.subvol);
353
354         bch2_bkey_buf_init(&sk);
355
356         do {
357                 bch2_trans_begin(trans);
358
359                 k = bch2_keylist_front(keys);
360                 bch2_bkey_buf_copy(&sk, c, k);
361
362                 ret = bch2_subvolume_get_snapshot(trans, inum.subvol,
363                                                   &sk.k->k.p.snapshot);
364                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
365                         continue;
366                 if (ret)
367                         break;
368
369                 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
370                                      bkey_start_pos(&sk.k->k),
371                                      BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
372
373                 ret =   bch2_bkey_set_needs_rebalance(c, sk.k, &op->opts) ?:
374                         bch2_extent_update(trans, inum, &iter, sk.k,
375                                         &op->res,
376                                         op->new_i_size, &op->i_sectors_delta,
377                                         op->flags & BCH_WRITE_CHECK_ENOSPC);
378                 bch2_trans_iter_exit(trans, &iter);
379
380                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
381                         continue;
382                 if (ret)
383                         break;
384
385                 if (bkey_ge(iter.pos, k->k.p))
386                         bch2_keylist_pop_front(&op->insert_keys);
387                 else
388                         bch2_cut_front(iter.pos, k);
389         } while (!bch2_keylist_empty(keys));
390
391         bch2_trans_put(trans);
392         bch2_bkey_buf_exit(&sk, c);
393
394         return ret;
395 }
396
397 /* Writes */
398
399 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
400                                enum bch_data_type type,
401                                const struct bkey_i *k,
402                                bool nocow)
403 {
404         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
405         struct bch_write_bio *n;
406
407         BUG_ON(c->opts.nochanges);
408
409         bkey_for_each_ptr(ptrs, ptr) {
410                 BUG_ON(!bch2_dev_exists2(c, ptr->dev));
411
412                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
413
414                 if (to_entry(ptr + 1) < ptrs.end) {
415                         n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
416                                                 GFP_NOFS, &ca->replica_set));
417
418                         n->bio.bi_end_io        = wbio->bio.bi_end_io;
419                         n->bio.bi_private       = wbio->bio.bi_private;
420                         n->parent               = wbio;
421                         n->split                = true;
422                         n->bounce               = false;
423                         n->put_bio              = true;
424                         n->bio.bi_opf           = wbio->bio.bi_opf;
425                         bio_inc_remaining(&wbio->bio);
426                 } else {
427                         n = wbio;
428                         n->split                = false;
429                 }
430
431                 n->c                    = c;
432                 n->dev                  = ptr->dev;
433                 n->have_ioref           = nocow || bch2_dev_get_ioref(ca,
434                                         type == BCH_DATA_btree ? READ : WRITE);
435                 n->nocow                = nocow;
436                 n->submit_time          = local_clock();
437                 n->inode_offset         = bkey_start_offset(&k->k);
438                 n->bio.bi_iter.bi_sector = ptr->offset;
439
440                 if (likely(n->have_ioref)) {
441                         this_cpu_add(ca->io_done->sectors[WRITE][type],
442                                      bio_sectors(&n->bio));
443
444                         bio_set_dev(&n->bio, ca->disk_sb.bdev);
445
446                         if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
447                                 bio_endio(&n->bio);
448                                 continue;
449                         }
450
451                         submit_bio(&n->bio);
452                 } else {
453                         n->bio.bi_status        = BLK_STS_REMOVED;
454                         bio_endio(&n->bio);
455                 }
456         }
457 }
458
459 static void __bch2_write(struct bch_write_op *);
460
461 static void bch2_write_done(struct closure *cl)
462 {
463         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
464         struct bch_fs *c = op->c;
465
466         EBUG_ON(op->open_buckets.nr);
467
468         bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
469         bch2_disk_reservation_put(c, &op->res);
470
471         if (!(op->flags & BCH_WRITE_MOVE))
472                 bch2_write_ref_put(c, BCH_WRITE_REF_write);
473         bch2_keylist_free(&op->insert_keys, op->inline_keys);
474
475         EBUG_ON(cl->parent);
476         closure_debug_destroy(cl);
477         if (op->end_io)
478                 op->end_io(op);
479 }
480
481 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
482 {
483         struct keylist *keys = &op->insert_keys;
484         struct bch_extent_ptr *ptr;
485         struct bkey_i *src, *dst = keys->keys, *n;
486
487         for (src = keys->keys; src != keys->top; src = n) {
488                 n = bkey_next(src);
489
490                 if (bkey_extent_is_direct_data(&src->k)) {
491                         bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
492                                             test_bit(ptr->dev, op->failed.d));
493
494                         if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
495                                 return -EIO;
496                 }
497
498                 if (dst != src)
499                         memmove_u64s_down(dst, src, src->k.u64s);
500                 dst = bkey_next(dst);
501         }
502
503         keys->top = dst;
504         return 0;
505 }
506
507 /**
508  * __bch2_write_index - after a write, update index to point to new data
509  * @op:         bch_write_op to process
510  */
511 static void __bch2_write_index(struct bch_write_op *op)
512 {
513         struct bch_fs *c = op->c;
514         struct keylist *keys = &op->insert_keys;
515         unsigned dev;
516         int ret = 0;
517
518         if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
519                 ret = bch2_write_drop_io_error_ptrs(op);
520                 if (ret)
521                         goto err;
522         }
523
524         if (!bch2_keylist_empty(keys)) {
525                 u64 sectors_start = keylist_sectors(keys);
526
527                 ret = !(op->flags & BCH_WRITE_MOVE)
528                         ? bch2_write_index_default(op)
529                         : bch2_data_update_index_update(op);
530
531                 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
532                 BUG_ON(keylist_sectors(keys) && !ret);
533
534                 op->written += sectors_start - keylist_sectors(keys);
535
536                 if (ret && !bch2_err_matches(ret, EROFS)) {
537                         struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
538
539                         bch_err_inum_offset_ratelimited(c,
540                                 insert->k.p.inode, insert->k.p.offset << 9,
541                                 "%s write error while doing btree update: %s",
542                                 op->flags & BCH_WRITE_MOVE ? "move" : "user",
543                                 bch2_err_str(ret));
544                 }
545
546                 if (ret)
547                         goto err;
548         }
549 out:
550         /* If some a bucket wasn't written, we can't erasure code it: */
551         for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
552                 bch2_open_bucket_write_error(c, &op->open_buckets, dev);
553
554         bch2_open_buckets_put(c, &op->open_buckets);
555         return;
556 err:
557         keys->top = keys->keys;
558         op->error = ret;
559         op->flags |= BCH_WRITE_DONE;
560         goto out;
561 }
562
563 static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
564 {
565         if (state != wp->state) {
566                 u64 now = ktime_get_ns();
567
568                 if (wp->last_state_change &&
569                     time_after64(now, wp->last_state_change))
570                         wp->time[wp->state] += now - wp->last_state_change;
571                 wp->state = state;
572                 wp->last_state_change = now;
573         }
574 }
575
576 static inline void wp_update_state(struct write_point *wp, bool running)
577 {
578         enum write_point_state state;
579
580         state = running                  ? WRITE_POINT_running :
581                 !list_empty(&wp->writes) ? WRITE_POINT_waiting_io
582                                          : WRITE_POINT_stopped;
583
584         __wp_update_state(wp, state);
585 }
586
587 static CLOSURE_CALLBACK(bch2_write_index)
588 {
589         closure_type(op, struct bch_write_op, cl);
590         struct write_point *wp = op->wp;
591         struct workqueue_struct *wq = index_update_wq(op);
592         unsigned long flags;
593
594         if ((op->flags & BCH_WRITE_DONE) &&
595             (op->flags & BCH_WRITE_MOVE))
596                 bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
597
598         spin_lock_irqsave(&wp->writes_lock, flags);
599         if (wp->state == WRITE_POINT_waiting_io)
600                 __wp_update_state(wp, WRITE_POINT_waiting_work);
601         list_add_tail(&op->wp_list, &wp->writes);
602         spin_unlock_irqrestore (&wp->writes_lock, flags);
603
604         queue_work(wq, &wp->index_update_work);
605 }
606
607 static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
608 {
609         op->wp = wp;
610
611         if (wp->state == WRITE_POINT_stopped) {
612                 spin_lock_irq(&wp->writes_lock);
613                 __wp_update_state(wp, WRITE_POINT_waiting_io);
614                 spin_unlock_irq(&wp->writes_lock);
615         }
616 }
617
618 void bch2_write_point_do_index_updates(struct work_struct *work)
619 {
620         struct write_point *wp =
621                 container_of(work, struct write_point, index_update_work);
622         struct bch_write_op *op;
623
624         while (1) {
625                 spin_lock_irq(&wp->writes_lock);
626                 op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
627                 if (op)
628                         list_del(&op->wp_list);
629                 wp_update_state(wp, op != NULL);
630                 spin_unlock_irq(&wp->writes_lock);
631
632                 if (!op)
633                         break;
634
635                 op->flags |= BCH_WRITE_IN_WORKER;
636
637                 __bch2_write_index(op);
638
639                 if (!(op->flags & BCH_WRITE_DONE))
640                         __bch2_write(op);
641                 else
642                         bch2_write_done(&op->cl);
643         }
644 }
645
646 static void bch2_write_endio(struct bio *bio)
647 {
648         struct closure *cl              = bio->bi_private;
649         struct bch_write_op *op         = container_of(cl, struct bch_write_op, cl);
650         struct bch_write_bio *wbio      = to_wbio(bio);
651         struct bch_write_bio *parent    = wbio->split ? wbio->parent : NULL;
652         struct bch_fs *c                = wbio->c;
653         struct bch_dev *ca              = bch_dev_bkey_exists(c, wbio->dev);
654
655         if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
656                                     op->pos.inode,
657                                     wbio->inode_offset << 9,
658                                     "data write error: %s",
659                                     bch2_blk_status_to_str(bio->bi_status))) {
660                 set_bit(wbio->dev, op->failed.d);
661                 op->flags |= BCH_WRITE_IO_ERROR;
662         }
663
664         if (wbio->nocow)
665                 set_bit(wbio->dev, op->devs_need_flush->d);
666
667         if (wbio->have_ioref) {
668                 bch2_latency_acct(ca, wbio->submit_time, WRITE);
669                 percpu_ref_put(&ca->io_ref);
670         }
671
672         if (wbio->bounce)
673                 bch2_bio_free_pages_pool(c, bio);
674
675         if (wbio->put_bio)
676                 bio_put(bio);
677
678         if (parent)
679                 bio_endio(&parent->bio);
680         else
681                 closure_put(cl);
682 }
683
684 static void init_append_extent(struct bch_write_op *op,
685                                struct write_point *wp,
686                                struct bversion version,
687                                struct bch_extent_crc_unpacked crc)
688 {
689         struct bkey_i_extent *e;
690
691         op->pos.offset += crc.uncompressed_size;
692
693         e = bkey_extent_init(op->insert_keys.top);
694         e->k.p          = op->pos;
695         e->k.size       = crc.uncompressed_size;
696         e->k.version    = version;
697
698         if (crc.csum_type ||
699             crc.compression_type ||
700             crc.nonce)
701                 bch2_extent_crc_append(&e->k_i, crc);
702
703         bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
704                                        op->flags & BCH_WRITE_CACHED);
705
706         bch2_keylist_push(&op->insert_keys);
707 }
708
709 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
710                                         struct write_point *wp,
711                                         struct bio *src,
712                                         bool *page_alloc_failed,
713                                         void *buf)
714 {
715         struct bch_write_bio *wbio;
716         struct bio *bio;
717         unsigned output_available =
718                 min(wp->sectors_free << 9, src->bi_iter.bi_size);
719         unsigned pages = DIV_ROUND_UP(output_available +
720                                       (buf
721                                        ? ((unsigned long) buf & (PAGE_SIZE - 1))
722                                        : 0), PAGE_SIZE);
723
724         pages = min(pages, BIO_MAX_VECS);
725
726         bio = bio_alloc_bioset(NULL, pages, 0,
727                                GFP_NOFS, &c->bio_write);
728         wbio                    = wbio_init(bio);
729         wbio->put_bio           = true;
730         /* copy WRITE_SYNC flag */
731         wbio->bio.bi_opf        = src->bi_opf;
732
733         if (buf) {
734                 bch2_bio_map(bio, buf, output_available);
735                 return bio;
736         }
737
738         wbio->bounce            = true;
739
740         /*
741          * We can't use mempool for more than c->sb.encoded_extent_max
742          * worth of pages, but we'd like to allocate more if we can:
743          */
744         bch2_bio_alloc_pages_pool(c, bio,
745                                   min_t(unsigned, output_available,
746                                         c->opts.encoded_extent_max));
747
748         if (bio->bi_iter.bi_size < output_available)
749                 *page_alloc_failed =
750                         bch2_bio_alloc_pages(bio,
751                                              output_available -
752                                              bio->bi_iter.bi_size,
753                                              GFP_NOFS) != 0;
754
755         return bio;
756 }
757
758 static int bch2_write_rechecksum(struct bch_fs *c,
759                                  struct bch_write_op *op,
760                                  unsigned new_csum_type)
761 {
762         struct bio *bio = &op->wbio.bio;
763         struct bch_extent_crc_unpacked new_crc;
764         int ret;
765
766         /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
767
768         if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
769             bch2_csum_type_is_encryption(new_csum_type))
770                 new_csum_type = op->crc.csum_type;
771
772         ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
773                                   NULL, &new_crc,
774                                   op->crc.offset, op->crc.live_size,
775                                   new_csum_type);
776         if (ret)
777                 return ret;
778
779         bio_advance(bio, op->crc.offset << 9);
780         bio->bi_iter.bi_size = op->crc.live_size << 9;
781         op->crc = new_crc;
782         return 0;
783 }
784
785 static int bch2_write_decrypt(struct bch_write_op *op)
786 {
787         struct bch_fs *c = op->c;
788         struct nonce nonce = extent_nonce(op->version, op->crc);
789         struct bch_csum csum;
790         int ret;
791
792         if (!bch2_csum_type_is_encryption(op->crc.csum_type))
793                 return 0;
794
795         /*
796          * If we need to decrypt data in the write path, we'll no longer be able
797          * to verify the existing checksum (poly1305 mac, in this case) after
798          * it's decrypted - this is the last point we'll be able to reverify the
799          * checksum:
800          */
801         csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
802         if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
803                 return -EIO;
804
805         ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
806         op->crc.csum_type = 0;
807         op->crc.csum = (struct bch_csum) { 0, 0 };
808         return ret;
809 }
810
811 static enum prep_encoded_ret {
812         PREP_ENCODED_OK,
813         PREP_ENCODED_ERR,
814         PREP_ENCODED_CHECKSUM_ERR,
815         PREP_ENCODED_DO_WRITE,
816 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
817 {
818         struct bch_fs *c = op->c;
819         struct bio *bio = &op->wbio.bio;
820
821         if (!(op->flags & BCH_WRITE_DATA_ENCODED))
822                 return PREP_ENCODED_OK;
823
824         BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
825
826         /* Can we just write the entire extent as is? */
827         if (op->crc.uncompressed_size == op->crc.live_size &&
828             op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 &&
829             op->crc.compressed_size <= wp->sectors_free &&
830             (op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
831              op->incompressible)) {
832                 if (!crc_is_compressed(op->crc) &&
833                     op->csum_type != op->crc.csum_type &&
834                     bch2_write_rechecksum(c, op, op->csum_type) &&
835                     !c->opts.no_data_io)
836                         return PREP_ENCODED_CHECKSUM_ERR;
837
838                 return PREP_ENCODED_DO_WRITE;
839         }
840
841         /*
842          * If the data is compressed and we couldn't write the entire extent as
843          * is, we have to decompress it:
844          */
845         if (crc_is_compressed(op->crc)) {
846                 struct bch_csum csum;
847
848                 if (bch2_write_decrypt(op))
849                         return PREP_ENCODED_CHECKSUM_ERR;
850
851                 /* Last point we can still verify checksum: */
852                 csum = bch2_checksum_bio(c, op->crc.csum_type,
853                                          extent_nonce(op->version, op->crc),
854                                          bio);
855                 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
856                         return PREP_ENCODED_CHECKSUM_ERR;
857
858                 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
859                         return PREP_ENCODED_ERR;
860         }
861
862         /*
863          * No longer have compressed data after this point - data might be
864          * encrypted:
865          */
866
867         /*
868          * If the data is checksummed and we're only writing a subset,
869          * rechecksum and adjust bio to point to currently live data:
870          */
871         if ((op->crc.live_size != op->crc.uncompressed_size ||
872              op->crc.csum_type != op->csum_type) &&
873             bch2_write_rechecksum(c, op, op->csum_type) &&
874             !c->opts.no_data_io)
875                 return PREP_ENCODED_CHECKSUM_ERR;
876
877         /*
878          * If we want to compress the data, it has to be decrypted:
879          */
880         if ((op->compression_opt ||
881              bch2_csum_type_is_encryption(op->crc.csum_type) !=
882              bch2_csum_type_is_encryption(op->csum_type)) &&
883             bch2_write_decrypt(op))
884                 return PREP_ENCODED_CHECKSUM_ERR;
885
886         return PREP_ENCODED_OK;
887 }
888
889 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
890                              struct bio **_dst)
891 {
892         struct bch_fs *c = op->c;
893         struct bio *src = &op->wbio.bio, *dst = src;
894         struct bvec_iter saved_iter;
895         void *ec_buf;
896         unsigned total_output = 0, total_input = 0;
897         bool bounce = false;
898         bool page_alloc_failed = false;
899         int ret, more = 0;
900
901         BUG_ON(!bio_sectors(src));
902
903         ec_buf = bch2_writepoint_ec_buf(c, wp);
904
905         switch (bch2_write_prep_encoded_data(op, wp)) {
906         case PREP_ENCODED_OK:
907                 break;
908         case PREP_ENCODED_ERR:
909                 ret = -EIO;
910                 goto err;
911         case PREP_ENCODED_CHECKSUM_ERR:
912                 goto csum_err;
913         case PREP_ENCODED_DO_WRITE:
914                 /* XXX look for bug here */
915                 if (ec_buf) {
916                         dst = bch2_write_bio_alloc(c, wp, src,
917                                                    &page_alloc_failed,
918                                                    ec_buf);
919                         bio_copy_data(dst, src);
920                         bounce = true;
921                 }
922                 init_append_extent(op, wp, op->version, op->crc);
923                 goto do_write;
924         }
925
926         if (ec_buf ||
927             op->compression_opt ||
928             (op->csum_type &&
929              !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
930             (bch2_csum_type_is_encryption(op->csum_type) &&
931              !(op->flags & BCH_WRITE_PAGES_OWNED))) {
932                 dst = bch2_write_bio_alloc(c, wp, src,
933                                            &page_alloc_failed,
934                                            ec_buf);
935                 bounce = true;
936         }
937
938         saved_iter = dst->bi_iter;
939
940         do {
941                 struct bch_extent_crc_unpacked crc = { 0 };
942                 struct bversion version = op->version;
943                 size_t dst_len = 0, src_len = 0;
944
945                 if (page_alloc_failed &&
946                     dst->bi_iter.bi_size  < (wp->sectors_free << 9) &&
947                     dst->bi_iter.bi_size < c->opts.encoded_extent_max)
948                         break;
949
950                 BUG_ON(op->compression_opt &&
951                        (op->flags & BCH_WRITE_DATA_ENCODED) &&
952                        bch2_csum_type_is_encryption(op->crc.csum_type));
953                 BUG_ON(op->compression_opt && !bounce);
954
955                 crc.compression_type = op->incompressible
956                         ? BCH_COMPRESSION_TYPE_incompressible
957                         : op->compression_opt
958                         ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
959                                             op->compression_opt)
960                         : 0;
961                 if (!crc_is_compressed(crc)) {
962                         dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
963                         dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
964
965                         if (op->csum_type)
966                                 dst_len = min_t(unsigned, dst_len,
967                                                 c->opts.encoded_extent_max);
968
969                         if (bounce) {
970                                 swap(dst->bi_iter.bi_size, dst_len);
971                                 bio_copy_data(dst, src);
972                                 swap(dst->bi_iter.bi_size, dst_len);
973                         }
974
975                         src_len = dst_len;
976                 }
977
978                 BUG_ON(!src_len || !dst_len);
979
980                 if (bch2_csum_type_is_encryption(op->csum_type)) {
981                         if (bversion_zero(version)) {
982                                 version.lo = atomic64_inc_return(&c->key_version);
983                         } else {
984                                 crc.nonce = op->nonce;
985                                 op->nonce += src_len >> 9;
986                         }
987                 }
988
989                 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
990                     !crc_is_compressed(crc) &&
991                     bch2_csum_type_is_encryption(op->crc.csum_type) ==
992                     bch2_csum_type_is_encryption(op->csum_type)) {
993                         u8 compression_type = crc.compression_type;
994                         u16 nonce = crc.nonce;
995                         /*
996                          * Note: when we're using rechecksum(), we need to be
997                          * checksumming @src because it has all the data our
998                          * existing checksum covers - if we bounced (because we
999                          * were trying to compress), @dst will only have the
1000                          * part of the data the new checksum will cover.
1001                          *
1002                          * But normally we want to be checksumming post bounce,
1003                          * because part of the reason for bouncing is so the
1004                          * data can't be modified (by userspace) while it's in
1005                          * flight.
1006                          */
1007                         if (bch2_rechecksum_bio(c, src, version, op->crc,
1008                                         &crc, &op->crc,
1009                                         src_len >> 9,
1010                                         bio_sectors(src) - (src_len >> 9),
1011                                         op->csum_type))
1012                                 goto csum_err;
1013                         /*
1014                          * rchecksum_bio sets compression_type on crc from op->crc,
1015                          * this isn't always correct as sometimes we're changing
1016                          * an extent from uncompressed to incompressible.
1017                          */
1018                         crc.compression_type = compression_type;
1019                         crc.nonce = nonce;
1020                 } else {
1021                         if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1022                             bch2_rechecksum_bio(c, src, version, op->crc,
1023                                         NULL, &op->crc,
1024                                         src_len >> 9,
1025                                         bio_sectors(src) - (src_len >> 9),
1026                                         op->crc.csum_type))
1027                                 goto csum_err;
1028
1029                         crc.compressed_size     = dst_len >> 9;
1030                         crc.uncompressed_size   = src_len >> 9;
1031                         crc.live_size           = src_len >> 9;
1032
1033                         swap(dst->bi_iter.bi_size, dst_len);
1034                         ret = bch2_encrypt_bio(c, op->csum_type,
1035                                                extent_nonce(version, crc), dst);
1036                         if (ret)
1037                                 goto err;
1038
1039                         crc.csum = bch2_checksum_bio(c, op->csum_type,
1040                                          extent_nonce(version, crc), dst);
1041                         crc.csum_type = op->csum_type;
1042                         swap(dst->bi_iter.bi_size, dst_len);
1043                 }
1044
1045                 init_append_extent(op, wp, version, crc);
1046
1047                 if (dst != src)
1048                         bio_advance(dst, dst_len);
1049                 bio_advance(src, src_len);
1050                 total_output    += dst_len;
1051                 total_input     += src_len;
1052         } while (dst->bi_iter.bi_size &&
1053                  src->bi_iter.bi_size &&
1054                  wp->sectors_free &&
1055                  !bch2_keylist_realloc(&op->insert_keys,
1056                                       op->inline_keys,
1057                                       ARRAY_SIZE(op->inline_keys),
1058                                       BKEY_EXTENT_U64s_MAX));
1059
1060         more = src->bi_iter.bi_size != 0;
1061
1062         dst->bi_iter = saved_iter;
1063
1064         if (dst == src && more) {
1065                 BUG_ON(total_output != total_input);
1066
1067                 dst = bio_split(src, total_input >> 9,
1068                                 GFP_NOFS, &c->bio_write);
1069                 wbio_init(dst)->put_bio = true;
1070                 /* copy WRITE_SYNC flag */
1071                 dst->bi_opf             = src->bi_opf;
1072         }
1073
1074         dst->bi_iter.bi_size = total_output;
1075 do_write:
1076         *_dst = dst;
1077         return more;
1078 csum_err:
1079         bch_err(c, "%s writ error: error verifying existing checksum while rewriting existing data (memory corruption?)",
1080                 op->flags & BCH_WRITE_MOVE ? "move" : "user");
1081         ret = -EIO;
1082 err:
1083         if (to_wbio(dst)->bounce)
1084                 bch2_bio_free_pages_pool(c, dst);
1085         if (to_wbio(dst)->put_bio)
1086                 bio_put(dst);
1087
1088         return ret;
1089 }
1090
1091 static bool bch2_extent_is_writeable(struct bch_write_op *op,
1092                                      struct bkey_s_c k)
1093 {
1094         struct bch_fs *c = op->c;
1095         struct bkey_s_c_extent e;
1096         struct extent_ptr_decoded p;
1097         const union bch_extent_entry *entry;
1098         unsigned replicas = 0;
1099
1100         if (k.k->type != KEY_TYPE_extent)
1101                 return false;
1102
1103         e = bkey_s_c_to_extent(k);
1104         extent_for_each_ptr_decode(e, p, entry) {
1105                 if (crc_is_encoded(p.crc) || p.has_ec)
1106                         return false;
1107
1108                 replicas += bch2_extent_ptr_durability(c, &p);
1109         }
1110
1111         return replicas >= op->opts.data_replicas;
1112 }
1113
1114 static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
1115 {
1116         struct bch_fs *c = op->c;
1117
1118         for_each_keylist_key(&op->insert_keys, k) {
1119                 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
1120
1121                 bkey_for_each_ptr(ptrs, ptr)
1122                         bch2_bucket_nocow_unlock(&c->nocow_locks,
1123                                                  PTR_BUCKET_POS(c, ptr),
1124                                                  BUCKET_NOCOW_LOCK_UPDATE);
1125         }
1126 }
1127
1128 static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
1129                                                   struct btree_iter *iter,
1130                                                   struct bkey_i *orig,
1131                                                   struct bkey_s_c k,
1132                                                   u64 new_i_size)
1133 {
1134         if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
1135                 /* trace this */
1136                 return 0;
1137         }
1138
1139         struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1140         int ret = PTR_ERR_OR_ZERO(new);
1141         if (ret)
1142                 return ret;
1143
1144         bch2_cut_front(bkey_start_pos(&orig->k), new);
1145         bch2_cut_back(orig->k.p, new);
1146
1147         struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
1148         bkey_for_each_ptr(ptrs, ptr)
1149                 ptr->unwritten = 0;
1150
1151         /*
1152          * Note that we're not calling bch2_subvol_get_snapshot() in this path -
1153          * that was done when we kicked off the write, and here it's important
1154          * that we update the extent that we wrote to - even if a snapshot has
1155          * since been created. The write is still outstanding, so we're ok
1156          * w.r.t. snapshot atomicity:
1157          */
1158         return  bch2_extent_update_i_size_sectors(trans, iter,
1159                                         min(new->k.p.offset << 9, new_i_size), 0) ?:
1160                 bch2_trans_update(trans, iter, new,
1161                                   BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1162 }
1163
1164 static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
1165 {
1166         struct bch_fs *c = op->c;
1167         struct btree_trans *trans = bch2_trans_get(c);
1168
1169         for_each_keylist_key(&op->insert_keys, orig) {
1170                 int ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents,
1171                                      bkey_start_pos(&orig->k), orig->k.p,
1172                                      BTREE_ITER_INTENT, k,
1173                                      NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
1174                         bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
1175                 }));
1176
1177                 if (ret && !bch2_err_matches(ret, EROFS)) {
1178                         struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
1179
1180                         bch_err_inum_offset_ratelimited(c,
1181                                 insert->k.p.inode, insert->k.p.offset << 9,
1182                                 "%s write error while doing btree update: %s",
1183                                 op->flags & BCH_WRITE_MOVE ? "move" : "user",
1184                                 bch2_err_str(ret));
1185                 }
1186
1187                 if (ret) {
1188                         op->error = ret;
1189                         break;
1190                 }
1191         }
1192
1193         bch2_trans_put(trans);
1194 }
1195
1196 static void __bch2_nocow_write_done(struct bch_write_op *op)
1197 {
1198         bch2_nocow_write_unlock(op);
1199
1200         if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
1201                 op->error = -EIO;
1202         } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
1203                 bch2_nocow_write_convert_unwritten(op);
1204 }
1205
1206 static CLOSURE_CALLBACK(bch2_nocow_write_done)
1207 {
1208         closure_type(op, struct bch_write_op, cl);
1209
1210         __bch2_nocow_write_done(op);
1211         bch2_write_done(cl);
1212 }
1213
1214 struct bucket_to_lock {
1215         struct bpos             b;
1216         unsigned                gen;
1217         struct nocow_lock_bucket *l;
1218 };
1219
1220 static void bch2_nocow_write(struct bch_write_op *op)
1221 {
1222         struct bch_fs *c = op->c;
1223         struct btree_trans *trans;
1224         struct btree_iter iter;
1225         struct bkey_s_c k;
1226         DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
1227         u32 snapshot;
1228         struct bucket_to_lock *stale_at;
1229         int ret;
1230
1231         if (op->flags & BCH_WRITE_MOVE)
1232                 return;
1233
1234         darray_init(&buckets);
1235         trans = bch2_trans_get(c);
1236 retry:
1237         bch2_trans_begin(trans);
1238
1239         ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot);
1240         if (unlikely(ret))
1241                 goto err;
1242
1243         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1244                              SPOS(op->pos.inode, op->pos.offset, snapshot),
1245                              BTREE_ITER_SLOTS);
1246         while (1) {
1247                 struct bio *bio = &op->wbio.bio;
1248
1249                 buckets.nr = 0;
1250
1251                 k = bch2_btree_iter_peek_slot(&iter);
1252                 ret = bkey_err(k);
1253                 if (ret)
1254                         break;
1255
1256                 /* fall back to normal cow write path? */
1257                 if (unlikely(k.k->p.snapshot != snapshot ||
1258                              !bch2_extent_is_writeable(op, k)))
1259                         break;
1260
1261                 if (bch2_keylist_realloc(&op->insert_keys,
1262                                          op->inline_keys,
1263                                          ARRAY_SIZE(op->inline_keys),
1264                                          k.k->u64s))
1265                         break;
1266
1267                 /* Get iorefs before dropping btree locks: */
1268                 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1269                 bkey_for_each_ptr(ptrs, ptr) {
1270                         struct bpos b = PTR_BUCKET_POS(c, ptr);
1271                         struct nocow_lock_bucket *l =
1272                                 bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
1273                         prefetch(l);
1274
1275                         if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
1276                                 goto err_get_ioref;
1277
1278                         /* XXX allocating memory with btree locks held - rare */
1279                         darray_push_gfp(&buckets, ((struct bucket_to_lock) {
1280                                                    .b = b, .gen = ptr->gen, .l = l,
1281                                                    }), GFP_KERNEL|__GFP_NOFAIL);
1282
1283                         if (ptr->unwritten)
1284                                 op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
1285                 }
1286
1287                 /* Unlock before taking nocow locks, doing IO: */
1288                 bkey_reassemble(op->insert_keys.top, k);
1289                 bch2_trans_unlock(trans);
1290
1291                 bch2_cut_front(op->pos, op->insert_keys.top);
1292                 if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
1293                         bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
1294
1295                 darray_for_each(buckets, i) {
1296                         struct bch_dev *ca = bch_dev_bkey_exists(c, i->b.inode);
1297
1298                         __bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
1299                                                  bucket_to_u64(i->b),
1300                                                  BUCKET_NOCOW_LOCK_UPDATE);
1301
1302                         rcu_read_lock();
1303                         bool stale = gen_after(*bucket_gen(ca, i->b.offset), i->gen);
1304                         rcu_read_unlock();
1305
1306                         if (unlikely(stale)) {
1307                                 stale_at = i;
1308                                 goto err_bucket_stale;
1309                         }
1310                 }
1311
1312                 bio = &op->wbio.bio;
1313                 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
1314                         bio = bio_split(bio, k.k->p.offset - op->pos.offset,
1315                                         GFP_KERNEL, &c->bio_write);
1316                         wbio_init(bio)->put_bio = true;
1317                         bio->bi_opf = op->wbio.bio.bi_opf;
1318                 } else {
1319                         op->flags |= BCH_WRITE_DONE;
1320                 }
1321
1322                 op->pos.offset += bio_sectors(bio);
1323                 op->written += bio_sectors(bio);
1324
1325                 bio->bi_end_io  = bch2_write_endio;
1326                 bio->bi_private = &op->cl;
1327                 bio->bi_opf |= REQ_OP_WRITE;
1328                 closure_get(&op->cl);
1329                 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1330                                           op->insert_keys.top, true);
1331
1332                 bch2_keylist_push(&op->insert_keys);
1333                 if (op->flags & BCH_WRITE_DONE)
1334                         break;
1335                 bch2_btree_iter_advance(&iter);
1336         }
1337 out:
1338         bch2_trans_iter_exit(trans, &iter);
1339 err:
1340         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1341                 goto retry;
1342
1343         if (ret) {
1344                 bch_err_inum_offset_ratelimited(c,
1345                         op->pos.inode, op->pos.offset << 9,
1346                         "%s: btree lookup error %s", __func__, bch2_err_str(ret));
1347                 op->error = ret;
1348                 op->flags |= BCH_WRITE_DONE;
1349         }
1350
1351         bch2_trans_put(trans);
1352         darray_exit(&buckets);
1353
1354         /* fallback to cow write path? */
1355         if (!(op->flags & BCH_WRITE_DONE)) {
1356                 closure_sync(&op->cl);
1357                 __bch2_nocow_write_done(op);
1358                 op->insert_keys.top = op->insert_keys.keys;
1359         } else if (op->flags & BCH_WRITE_SYNC) {
1360                 closure_sync(&op->cl);
1361                 bch2_nocow_write_done(&op->cl.work);
1362         } else {
1363                 /*
1364                  * XXX
1365                  * needs to run out of process context because ei_quota_lock is
1366                  * a mutex
1367                  */
1368                 continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
1369         }
1370         return;
1371 err_get_ioref:
1372         darray_for_each(buckets, i)
1373                 percpu_ref_put(&bch_dev_bkey_exists(c, i->b.inode)->io_ref);
1374
1375         /* Fall back to COW path: */
1376         goto out;
1377 err_bucket_stale:
1378         darray_for_each(buckets, i) {
1379                 bch2_bucket_nocow_unlock(&c->nocow_locks, i->b, BUCKET_NOCOW_LOCK_UPDATE);
1380                 if (i == stale_at)
1381                         break;
1382         }
1383
1384         /* We can retry this: */
1385         ret = -BCH_ERR_transaction_restart;
1386         goto err_get_ioref;
1387 }
1388
1389 static void __bch2_write(struct bch_write_op *op)
1390 {
1391         struct bch_fs *c = op->c;
1392         struct write_point *wp = NULL;
1393         struct bio *bio = NULL;
1394         unsigned nofs_flags;
1395         int ret;
1396
1397         nofs_flags = memalloc_nofs_save();
1398
1399         if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
1400                 bch2_nocow_write(op);
1401                 if (op->flags & BCH_WRITE_DONE)
1402                         goto out_nofs_restore;
1403         }
1404 again:
1405         memset(&op->failed, 0, sizeof(op->failed));
1406
1407         do {
1408                 struct bkey_i *key_to_write;
1409                 unsigned key_to_write_offset = op->insert_keys.top_p -
1410                         op->insert_keys.keys_p;
1411
1412                 /* +1 for possible cache device: */
1413                 if (op->open_buckets.nr + op->nr_replicas + 1 >
1414                     ARRAY_SIZE(op->open_buckets.v))
1415                         break;
1416
1417                 if (bch2_keylist_realloc(&op->insert_keys,
1418                                         op->inline_keys,
1419                                         ARRAY_SIZE(op->inline_keys),
1420                                         BKEY_EXTENT_U64s_MAX))
1421                         break;
1422
1423                 /*
1424                  * The copygc thread is now global, which means it's no longer
1425                  * freeing up space on specific disks, which means that
1426                  * allocations for specific disks may hang arbitrarily long:
1427                  */
1428                 ret = bch2_trans_do(c, NULL, NULL, 0,
1429                         bch2_alloc_sectors_start_trans(trans,
1430                                 op->target,
1431                                 op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
1432                                 op->write_point,
1433                                 &op->devs_have,
1434                                 op->nr_replicas,
1435                                 op->nr_replicas_required,
1436                                 op->watermark,
1437                                 op->flags,
1438                                 (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
1439                                               BCH_WRITE_ONLY_SPECIFIED_DEVS))
1440                                 ? NULL : &op->cl, &wp));
1441                 if (unlikely(ret)) {
1442                         if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
1443                                 break;
1444
1445                         goto err;
1446                 }
1447
1448                 EBUG_ON(!wp);
1449
1450                 bch2_open_bucket_get(c, wp, &op->open_buckets);
1451                 ret = bch2_write_extent(op, wp, &bio);
1452
1453                 bch2_alloc_sectors_done_inlined(c, wp);
1454 err:
1455                 if (ret <= 0) {
1456                         op->flags |= BCH_WRITE_DONE;
1457
1458                         if (ret < 0) {
1459                                 if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT))
1460                                         bch_err_inum_offset_ratelimited(c,
1461                                                 op->pos.inode,
1462                                                 op->pos.offset << 9,
1463                                                 "%s(): %s error: %s", __func__,
1464                                                 op->flags & BCH_WRITE_MOVE ? "move" : "user",
1465                                                 bch2_err_str(ret));
1466                                 op->error = ret;
1467                                 break;
1468                         }
1469                 }
1470
1471                 bio->bi_end_io  = bch2_write_endio;
1472                 bio->bi_private = &op->cl;
1473                 bio->bi_opf |= REQ_OP_WRITE;
1474
1475                 closure_get(bio->bi_private);
1476
1477                 key_to_write = (void *) (op->insert_keys.keys_p +
1478                                          key_to_write_offset);
1479
1480                 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1481                                           key_to_write, false);
1482         } while (ret);
1483
1484         /*
1485          * Sync or no?
1486          *
1487          * If we're running asynchronously, wne may still want to block
1488          * synchronously here if we weren't able to submit all of the IO at
1489          * once, as that signals backpressure to the caller.
1490          */
1491         if ((op->flags & BCH_WRITE_SYNC) ||
1492             (!(op->flags & BCH_WRITE_DONE) &&
1493              !(op->flags & BCH_WRITE_IN_WORKER))) {
1494                 closure_sync(&op->cl);
1495                 __bch2_write_index(op);
1496
1497                 if (!(op->flags & BCH_WRITE_DONE))
1498                         goto again;
1499                 bch2_write_done(&op->cl);
1500         } else {
1501                 bch2_write_queue(op, wp);
1502                 continue_at(&op->cl, bch2_write_index, NULL);
1503         }
1504 out_nofs_restore:
1505         memalloc_nofs_restore(nofs_flags);
1506 }
1507
1508 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1509 {
1510         struct bio *bio = &op->wbio.bio;
1511         struct bvec_iter iter;
1512         struct bkey_i_inline_data *id;
1513         unsigned sectors;
1514         int ret;
1515
1516         memset(&op->failed, 0, sizeof(op->failed));
1517
1518         op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
1519         op->flags |= BCH_WRITE_DONE;
1520
1521         bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
1522
1523         ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1524                                    ARRAY_SIZE(op->inline_keys),
1525                                    BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1526         if (ret) {
1527                 op->error = ret;
1528                 goto err;
1529         }
1530
1531         sectors = bio_sectors(bio);
1532         op->pos.offset += sectors;
1533
1534         id = bkey_inline_data_init(op->insert_keys.top);
1535         id->k.p         = op->pos;
1536         id->k.version   = op->version;
1537         id->k.size      = sectors;
1538
1539         iter = bio->bi_iter;
1540         iter.bi_size = data_len;
1541         memcpy_from_bio(id->v.data, bio, iter);
1542
1543         while (data_len & 7)
1544                 id->v.data[data_len++] = '\0';
1545         set_bkey_val_bytes(&id->k, data_len);
1546         bch2_keylist_push(&op->insert_keys);
1547
1548         __bch2_write_index(op);
1549 err:
1550         bch2_write_done(&op->cl);
1551 }
1552
1553 /**
1554  * bch2_write() - handle a write to a cache device or flash only volume
1555  * @cl:         &bch_write_op->cl
1556  *
1557  * This is the starting point for any data to end up in a cache device; it could
1558  * be from a normal write, or a writeback write, or a write to a flash only
1559  * volume - it's also used by the moving garbage collector to compact data in
1560  * mostly empty buckets.
1561  *
1562  * It first writes the data to the cache, creating a list of keys to be inserted
1563  * (if the data won't fit in a single open bucket, there will be multiple keys);
1564  * after the data is written it calls bch_journal, and after the keys have been
1565  * added to the next journal write they're inserted into the btree.
1566  *
1567  * If op->discard is true, instead of inserting the data it invalidates the
1568  * region of the cache represented by op->bio and op->inode.
1569  */
1570 CLOSURE_CALLBACK(bch2_write)
1571 {
1572         closure_type(op, struct bch_write_op, cl);
1573         struct bio *bio = &op->wbio.bio;
1574         struct bch_fs *c = op->c;
1575         unsigned data_len;
1576
1577         EBUG_ON(op->cl.parent);
1578         BUG_ON(!op->nr_replicas);
1579         BUG_ON(!op->write_point.v);
1580         BUG_ON(bkey_eq(op->pos, POS_MAX));
1581
1582         op->nr_replicas_required = min_t(unsigned, op->nr_replicas_required, op->nr_replicas);
1583         op->start_time = local_clock();
1584         bch2_keylist_init(&op->insert_keys, op->inline_keys);
1585         wbio_init(bio)->put_bio = false;
1586
1587         if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
1588                 bch_err_inum_offset_ratelimited(c,
1589                         op->pos.inode,
1590                         op->pos.offset << 9,
1591                         "%s write error: misaligned write",
1592                         op->flags & BCH_WRITE_MOVE ? "move" : "user");
1593                 op->error = -EIO;
1594                 goto err;
1595         }
1596
1597         if (c->opts.nochanges) {
1598                 op->error = -BCH_ERR_erofs_no_writes;
1599                 goto err;
1600         }
1601
1602         if (!(op->flags & BCH_WRITE_MOVE) &&
1603             !bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
1604                 op->error = -BCH_ERR_erofs_no_writes;
1605                 goto err;
1606         }
1607
1608         this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
1609         bch2_increment_clock(c, bio_sectors(bio), WRITE);
1610
1611         data_len = min_t(u64, bio->bi_iter.bi_size,
1612                          op->new_i_size - (op->pos.offset << 9));
1613
1614         if (c->opts.inline_data &&
1615             data_len <= min(block_bytes(c) / 2, 1024U)) {
1616                 bch2_write_data_inline(op, data_len);
1617                 return;
1618         }
1619
1620         __bch2_write(op);
1621         return;
1622 err:
1623         bch2_disk_reservation_put(c, &op->res);
1624
1625         closure_debug_destroy(&op->cl);
1626         if (op->end_io)
1627                 op->end_io(op);
1628 }
1629
1630 static const char * const bch2_write_flags[] = {
1631 #define x(f)    #f,
1632         BCH_WRITE_FLAGS()
1633 #undef x
1634         NULL
1635 };
1636
1637 void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
1638 {
1639         prt_str(out, "pos: ");
1640         bch2_bpos_to_text(out, op->pos);
1641         prt_newline(out);
1642         printbuf_indent_add(out, 2);
1643
1644         prt_str(out, "started: ");
1645         bch2_pr_time_units(out, local_clock() - op->start_time);
1646         prt_newline(out);
1647
1648         prt_str(out, "flags: ");
1649         prt_bitflags(out, bch2_write_flags, op->flags);
1650         prt_newline(out);
1651
1652         prt_printf(out, "ref: %u", closure_nr_remaining(&op->cl));
1653         prt_newline(out);
1654
1655         printbuf_indent_sub(out, 2);
1656 }
1657
1658 void bch2_fs_io_write_exit(struct bch_fs *c)
1659 {
1660         mempool_exit(&c->bio_bounce_pages);
1661         bioset_exit(&c->bio_write);
1662 }
1663
1664 int bch2_fs_io_write_init(struct bch_fs *c)
1665 {
1666         if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
1667                         BIOSET_NEED_BVECS))
1668                 return -BCH_ERR_ENOMEM_bio_write_init;
1669
1670         if (mempool_init_page_pool(&c->bio_bounce_pages,
1671                                    max_t(unsigned,
1672                                          c->opts.btree_node_size,
1673                                          c->opts.encoded_extent_max) /
1674                                    PAGE_SIZE, 0))
1675                 return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
1676
1677         return 0;
1678 }