x86/efistub: Don't clear BSS twice in mixed mode
[sfrench/cifs-2.6.git] / fs / buffer.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/buffer.c
4  *
5  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
6  */
7
8 /*
9  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10  *
11  * Removed a lot of unnecessary code and simplified things now that
12  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13  *
14  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
15  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
16  *
17  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18  *
19  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
25 #include <linux/fs.h>
26 #include <linux/iomap.h>
27 #include <linux/mm.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
52 #include <linux/sched/isolation.h>
53
54 #include "internal.h"
55
56 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
58                           struct writeback_control *wbc);
59
60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
61
62 inline void touch_buffer(struct buffer_head *bh)
63 {
64         trace_block_touch_buffer(bh);
65         folio_mark_accessed(bh->b_folio);
66 }
67 EXPORT_SYMBOL(touch_buffer);
68
69 void __lock_buffer(struct buffer_head *bh)
70 {
71         wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
72 }
73 EXPORT_SYMBOL(__lock_buffer);
74
75 void unlock_buffer(struct buffer_head *bh)
76 {
77         clear_bit_unlock(BH_Lock, &bh->b_state);
78         smp_mb__after_atomic();
79         wake_up_bit(&bh->b_state, BH_Lock);
80 }
81 EXPORT_SYMBOL(unlock_buffer);
82
83 /*
84  * Returns if the folio has dirty or writeback buffers. If all the buffers
85  * are unlocked and clean then the folio_test_dirty information is stale. If
86  * any of the buffers are locked, it is assumed they are locked for IO.
87  */
88 void buffer_check_dirty_writeback(struct folio *folio,
89                                      bool *dirty, bool *writeback)
90 {
91         struct buffer_head *head, *bh;
92         *dirty = false;
93         *writeback = false;
94
95         BUG_ON(!folio_test_locked(folio));
96
97         head = folio_buffers(folio);
98         if (!head)
99                 return;
100
101         if (folio_test_writeback(folio))
102                 *writeback = true;
103
104         bh = head;
105         do {
106                 if (buffer_locked(bh))
107                         *writeback = true;
108
109                 if (buffer_dirty(bh))
110                         *dirty = true;
111
112                 bh = bh->b_this_page;
113         } while (bh != head);
114 }
115
116 /*
117  * Block until a buffer comes unlocked.  This doesn't stop it
118  * from becoming locked again - you have to lock it yourself
119  * if you want to preserve its state.
120  */
121 void __wait_on_buffer(struct buffer_head * bh)
122 {
123         wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124 }
125 EXPORT_SYMBOL(__wait_on_buffer);
126
127 static void buffer_io_error(struct buffer_head *bh, char *msg)
128 {
129         if (!test_bit(BH_Quiet, &bh->b_state))
130                 printk_ratelimited(KERN_ERR
131                         "Buffer I/O error on dev %pg, logical block %llu%s\n",
132                         bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
133 }
134
135 /*
136  * End-of-IO handler helper function which does not touch the bh after
137  * unlocking it.
138  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139  * a race there is benign: unlock_buffer() only use the bh's address for
140  * hashing after unlocking the buffer, so it doesn't actually touch the bh
141  * itself.
142  */
143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
144 {
145         if (uptodate) {
146                 set_buffer_uptodate(bh);
147         } else {
148                 /* This happens, due to failed read-ahead attempts. */
149                 clear_buffer_uptodate(bh);
150         }
151         unlock_buffer(bh);
152 }
153
154 /*
155  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
156  * unlock the buffer.
157  */
158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159 {
160         __end_buffer_read_notouch(bh, uptodate);
161         put_bh(bh);
162 }
163 EXPORT_SYMBOL(end_buffer_read_sync);
164
165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166 {
167         if (uptodate) {
168                 set_buffer_uptodate(bh);
169         } else {
170                 buffer_io_error(bh, ", lost sync page write");
171                 mark_buffer_write_io_error(bh);
172                 clear_buffer_uptodate(bh);
173         }
174         unlock_buffer(bh);
175         put_bh(bh);
176 }
177 EXPORT_SYMBOL(end_buffer_write_sync);
178
179 /*
180  * Various filesystems appear to want __find_get_block to be non-blocking.
181  * But it's the page lock which protects the buffers.  To get around this,
182  * we get exclusion from try_to_free_buffers with the blockdev mapping's
183  * i_private_lock.
184  *
185  * Hack idea: for the blockdev mapping, i_private_lock contention
186  * may be quite high.  This code could TryLock the page, and if that
187  * succeeds, there is no need to take i_private_lock.
188  */
189 static struct buffer_head *
190 __find_get_block_slow(struct block_device *bdev, sector_t block)
191 {
192         struct inode *bd_inode = bdev->bd_inode;
193         struct address_space *bd_mapping = bd_inode->i_mapping;
194         struct buffer_head *ret = NULL;
195         pgoff_t index;
196         struct buffer_head *bh;
197         struct buffer_head *head;
198         struct folio *folio;
199         int all_mapped = 1;
200         static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
201
202         index = ((loff_t)block << bd_inode->i_blkbits) / PAGE_SIZE;
203         folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
204         if (IS_ERR(folio))
205                 goto out;
206
207         spin_lock(&bd_mapping->i_private_lock);
208         head = folio_buffers(folio);
209         if (!head)
210                 goto out_unlock;
211         bh = head;
212         do {
213                 if (!buffer_mapped(bh))
214                         all_mapped = 0;
215                 else if (bh->b_blocknr == block) {
216                         ret = bh;
217                         get_bh(bh);
218                         goto out_unlock;
219                 }
220                 bh = bh->b_this_page;
221         } while (bh != head);
222
223         /* we might be here because some of the buffers on this page are
224          * not mapped.  This is due to various races between
225          * file io on the block device and getblk.  It gets dealt with
226          * elsewhere, don't buffer_error if we had some unmapped buffers
227          */
228         ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
229         if (all_mapped && __ratelimit(&last_warned)) {
230                 printk("__find_get_block_slow() failed. block=%llu, "
231                        "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
232                        "device %pg blocksize: %d\n",
233                        (unsigned long long)block,
234                        (unsigned long long)bh->b_blocknr,
235                        bh->b_state, bh->b_size, bdev,
236                        1 << bd_inode->i_blkbits);
237         }
238 out_unlock:
239         spin_unlock(&bd_mapping->i_private_lock);
240         folio_put(folio);
241 out:
242         return ret;
243 }
244
245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
246 {
247         unsigned long flags;
248         struct buffer_head *first;
249         struct buffer_head *tmp;
250         struct folio *folio;
251         int folio_uptodate = 1;
252
253         BUG_ON(!buffer_async_read(bh));
254
255         folio = bh->b_folio;
256         if (uptodate) {
257                 set_buffer_uptodate(bh);
258         } else {
259                 clear_buffer_uptodate(bh);
260                 buffer_io_error(bh, ", async page read");
261                 folio_set_error(folio);
262         }
263
264         /*
265          * Be _very_ careful from here on. Bad things can happen if
266          * two buffer heads end IO at almost the same time and both
267          * decide that the page is now completely done.
268          */
269         first = folio_buffers(folio);
270         spin_lock_irqsave(&first->b_uptodate_lock, flags);
271         clear_buffer_async_read(bh);
272         unlock_buffer(bh);
273         tmp = bh;
274         do {
275                 if (!buffer_uptodate(tmp))
276                         folio_uptodate = 0;
277                 if (buffer_async_read(tmp)) {
278                         BUG_ON(!buffer_locked(tmp));
279                         goto still_busy;
280                 }
281                 tmp = tmp->b_this_page;
282         } while (tmp != bh);
283         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
284
285         folio_end_read(folio, folio_uptodate);
286         return;
287
288 still_busy:
289         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
290         return;
291 }
292
293 struct postprocess_bh_ctx {
294         struct work_struct work;
295         struct buffer_head *bh;
296 };
297
298 static void verify_bh(struct work_struct *work)
299 {
300         struct postprocess_bh_ctx *ctx =
301                 container_of(work, struct postprocess_bh_ctx, work);
302         struct buffer_head *bh = ctx->bh;
303         bool valid;
304
305         valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
306         end_buffer_async_read(bh, valid);
307         kfree(ctx);
308 }
309
310 static bool need_fsverity(struct buffer_head *bh)
311 {
312         struct folio *folio = bh->b_folio;
313         struct inode *inode = folio->mapping->host;
314
315         return fsverity_active(inode) &&
316                 /* needed by ext4 */
317                 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
318 }
319
320 static void decrypt_bh(struct work_struct *work)
321 {
322         struct postprocess_bh_ctx *ctx =
323                 container_of(work, struct postprocess_bh_ctx, work);
324         struct buffer_head *bh = ctx->bh;
325         int err;
326
327         err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
328                                                bh_offset(bh));
329         if (err == 0 && need_fsverity(bh)) {
330                 /*
331                  * We use different work queues for decryption and for verity
332                  * because verity may require reading metadata pages that need
333                  * decryption, and we shouldn't recurse to the same workqueue.
334                  */
335                 INIT_WORK(&ctx->work, verify_bh);
336                 fsverity_enqueue_verify_work(&ctx->work);
337                 return;
338         }
339         end_buffer_async_read(bh, err == 0);
340         kfree(ctx);
341 }
342
343 /*
344  * I/O completion handler for block_read_full_folio() - pages
345  * which come unlocked at the end of I/O.
346  */
347 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
348 {
349         struct inode *inode = bh->b_folio->mapping->host;
350         bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
351         bool verify = need_fsverity(bh);
352
353         /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
354         if (uptodate && (decrypt || verify)) {
355                 struct postprocess_bh_ctx *ctx =
356                         kmalloc(sizeof(*ctx), GFP_ATOMIC);
357
358                 if (ctx) {
359                         ctx->bh = bh;
360                         if (decrypt) {
361                                 INIT_WORK(&ctx->work, decrypt_bh);
362                                 fscrypt_enqueue_decrypt_work(&ctx->work);
363                         } else {
364                                 INIT_WORK(&ctx->work, verify_bh);
365                                 fsverity_enqueue_verify_work(&ctx->work);
366                         }
367                         return;
368                 }
369                 uptodate = 0;
370         }
371         end_buffer_async_read(bh, uptodate);
372 }
373
374 /*
375  * Completion handler for block_write_full_folio() - folios which are unlocked
376  * during I/O, and which have the writeback flag cleared upon I/O completion.
377  */
378 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
379 {
380         unsigned long flags;
381         struct buffer_head *first;
382         struct buffer_head *tmp;
383         struct folio *folio;
384
385         BUG_ON(!buffer_async_write(bh));
386
387         folio = bh->b_folio;
388         if (uptodate) {
389                 set_buffer_uptodate(bh);
390         } else {
391                 buffer_io_error(bh, ", lost async page write");
392                 mark_buffer_write_io_error(bh);
393                 clear_buffer_uptodate(bh);
394                 folio_set_error(folio);
395         }
396
397         first = folio_buffers(folio);
398         spin_lock_irqsave(&first->b_uptodate_lock, flags);
399
400         clear_buffer_async_write(bh);
401         unlock_buffer(bh);
402         tmp = bh->b_this_page;
403         while (tmp != bh) {
404                 if (buffer_async_write(tmp)) {
405                         BUG_ON(!buffer_locked(tmp));
406                         goto still_busy;
407                 }
408                 tmp = tmp->b_this_page;
409         }
410         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
411         folio_end_writeback(folio);
412         return;
413
414 still_busy:
415         spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
416         return;
417 }
418
419 /*
420  * If a page's buffers are under async readin (end_buffer_async_read
421  * completion) then there is a possibility that another thread of
422  * control could lock one of the buffers after it has completed
423  * but while some of the other buffers have not completed.  This
424  * locked buffer would confuse end_buffer_async_read() into not unlocking
425  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
426  * that this buffer is not under async I/O.
427  *
428  * The page comes unlocked when it has no locked buffer_async buffers
429  * left.
430  *
431  * PageLocked prevents anyone starting new async I/O reads any of
432  * the buffers.
433  *
434  * PageWriteback is used to prevent simultaneous writeout of the same
435  * page.
436  *
437  * PageLocked prevents anyone from starting writeback of a page which is
438  * under read I/O (PageWriteback is only ever set against a locked page).
439  */
440 static void mark_buffer_async_read(struct buffer_head *bh)
441 {
442         bh->b_end_io = end_buffer_async_read_io;
443         set_buffer_async_read(bh);
444 }
445
446 static void mark_buffer_async_write_endio(struct buffer_head *bh,
447                                           bh_end_io_t *handler)
448 {
449         bh->b_end_io = handler;
450         set_buffer_async_write(bh);
451 }
452
453 void mark_buffer_async_write(struct buffer_head *bh)
454 {
455         mark_buffer_async_write_endio(bh, end_buffer_async_write);
456 }
457 EXPORT_SYMBOL(mark_buffer_async_write);
458
459
460 /*
461  * fs/buffer.c contains helper functions for buffer-backed address space's
462  * fsync functions.  A common requirement for buffer-based filesystems is
463  * that certain data from the backing blockdev needs to be written out for
464  * a successful fsync().  For example, ext2 indirect blocks need to be
465  * written back and waited upon before fsync() returns.
466  *
467  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
468  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
469  * management of a list of dependent buffers at ->i_mapping->i_private_list.
470  *
471  * Locking is a little subtle: try_to_free_buffers() will remove buffers
472  * from their controlling inode's queue when they are being freed.  But
473  * try_to_free_buffers() will be operating against the *blockdev* mapping
474  * at the time, not against the S_ISREG file which depends on those buffers.
475  * So the locking for i_private_list is via the i_private_lock in the address_space
476  * which backs the buffers.  Which is different from the address_space 
477  * against which the buffers are listed.  So for a particular address_space,
478  * mapping->i_private_lock does *not* protect mapping->i_private_list!  In fact,
479  * mapping->i_private_list will always be protected by the backing blockdev's
480  * ->i_private_lock.
481  *
482  * Which introduces a requirement: all buffers on an address_space's
483  * ->i_private_list must be from the same address_space: the blockdev's.
484  *
485  * address_spaces which do not place buffers at ->i_private_list via these
486  * utility functions are free to use i_private_lock and i_private_list for
487  * whatever they want.  The only requirement is that list_empty(i_private_list)
488  * be true at clear_inode() time.
489  *
490  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
491  * filesystems should do that.  invalidate_inode_buffers() should just go
492  * BUG_ON(!list_empty).
493  *
494  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
495  * take an address_space, not an inode.  And it should be called
496  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
497  * queued up.
498  *
499  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
500  * list if it is already on a list.  Because if the buffer is on a list,
501  * it *must* already be on the right one.  If not, the filesystem is being
502  * silly.  This will save a ton of locking.  But first we have to ensure
503  * that buffers are taken *off* the old inode's list when they are freed
504  * (presumably in truncate).  That requires careful auditing of all
505  * filesystems (do it inside bforget()).  It could also be done by bringing
506  * b_inode back.
507  */
508
509 /*
510  * The buffer's backing address_space's i_private_lock must be held
511  */
512 static void __remove_assoc_queue(struct buffer_head *bh)
513 {
514         list_del_init(&bh->b_assoc_buffers);
515         WARN_ON(!bh->b_assoc_map);
516         bh->b_assoc_map = NULL;
517 }
518
519 int inode_has_buffers(struct inode *inode)
520 {
521         return !list_empty(&inode->i_data.i_private_list);
522 }
523
524 /*
525  * osync is designed to support O_SYNC io.  It waits synchronously for
526  * all already-submitted IO to complete, but does not queue any new
527  * writes to the disk.
528  *
529  * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
530  * as you dirty the buffers, and then use osync_inode_buffers to wait for
531  * completion.  Any other dirty buffers which are not yet queued for
532  * write will not be flushed to disk by the osync.
533  */
534 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
535 {
536         struct buffer_head *bh;
537         struct list_head *p;
538         int err = 0;
539
540         spin_lock(lock);
541 repeat:
542         list_for_each_prev(p, list) {
543                 bh = BH_ENTRY(p);
544                 if (buffer_locked(bh)) {
545                         get_bh(bh);
546                         spin_unlock(lock);
547                         wait_on_buffer(bh);
548                         if (!buffer_uptodate(bh))
549                                 err = -EIO;
550                         brelse(bh);
551                         spin_lock(lock);
552                         goto repeat;
553                 }
554         }
555         spin_unlock(lock);
556         return err;
557 }
558
559 /**
560  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
561  * @mapping: the mapping which wants those buffers written
562  *
563  * Starts I/O against the buffers at mapping->i_private_list, and waits upon
564  * that I/O.
565  *
566  * Basically, this is a convenience function for fsync().
567  * @mapping is a file or directory which needs those buffers to be written for
568  * a successful fsync().
569  */
570 int sync_mapping_buffers(struct address_space *mapping)
571 {
572         struct address_space *buffer_mapping = mapping->i_private_data;
573
574         if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
575                 return 0;
576
577         return fsync_buffers_list(&buffer_mapping->i_private_lock,
578                                         &mapping->i_private_list);
579 }
580 EXPORT_SYMBOL(sync_mapping_buffers);
581
582 /**
583  * generic_buffers_fsync_noflush - generic buffer fsync implementation
584  * for simple filesystems with no inode lock
585  *
586  * @file:       file to synchronize
587  * @start:      start offset in bytes
588  * @end:        end offset in bytes (inclusive)
589  * @datasync:   only synchronize essential metadata if true
590  *
591  * This is a generic implementation of the fsync method for simple
592  * filesystems which track all non-inode metadata in the buffers list
593  * hanging off the address_space structure.
594  */
595 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
596                                   bool datasync)
597 {
598         struct inode *inode = file->f_mapping->host;
599         int err;
600         int ret;
601
602         err = file_write_and_wait_range(file, start, end);
603         if (err)
604                 return err;
605
606         ret = sync_mapping_buffers(inode->i_mapping);
607         if (!(inode->i_state & I_DIRTY_ALL))
608                 goto out;
609         if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
610                 goto out;
611
612         err = sync_inode_metadata(inode, 1);
613         if (ret == 0)
614                 ret = err;
615
616 out:
617         /* check and advance again to catch errors after syncing out buffers */
618         err = file_check_and_advance_wb_err(file);
619         if (ret == 0)
620                 ret = err;
621         return ret;
622 }
623 EXPORT_SYMBOL(generic_buffers_fsync_noflush);
624
625 /**
626  * generic_buffers_fsync - generic buffer fsync implementation
627  * for simple filesystems with no inode lock
628  *
629  * @file:       file to synchronize
630  * @start:      start offset in bytes
631  * @end:        end offset in bytes (inclusive)
632  * @datasync:   only synchronize essential metadata if true
633  *
634  * This is a generic implementation of the fsync method for simple
635  * filesystems which track all non-inode metadata in the buffers list
636  * hanging off the address_space structure. This also makes sure that
637  * a device cache flush operation is called at the end.
638  */
639 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
640                           bool datasync)
641 {
642         struct inode *inode = file->f_mapping->host;
643         int ret;
644
645         ret = generic_buffers_fsync_noflush(file, start, end, datasync);
646         if (!ret)
647                 ret = blkdev_issue_flush(inode->i_sb->s_bdev);
648         return ret;
649 }
650 EXPORT_SYMBOL(generic_buffers_fsync);
651
652 /*
653  * Called when we've recently written block `bblock', and it is known that
654  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
655  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
656  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
657  */
658 void write_boundary_block(struct block_device *bdev,
659                         sector_t bblock, unsigned blocksize)
660 {
661         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
662         if (bh) {
663                 if (buffer_dirty(bh))
664                         write_dirty_buffer(bh, 0);
665                 put_bh(bh);
666         }
667 }
668
669 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
670 {
671         struct address_space *mapping = inode->i_mapping;
672         struct address_space *buffer_mapping = bh->b_folio->mapping;
673
674         mark_buffer_dirty(bh);
675         if (!mapping->i_private_data) {
676                 mapping->i_private_data = buffer_mapping;
677         } else {
678                 BUG_ON(mapping->i_private_data != buffer_mapping);
679         }
680         if (!bh->b_assoc_map) {
681                 spin_lock(&buffer_mapping->i_private_lock);
682                 list_move_tail(&bh->b_assoc_buffers,
683                                 &mapping->i_private_list);
684                 bh->b_assoc_map = mapping;
685                 spin_unlock(&buffer_mapping->i_private_lock);
686         }
687 }
688 EXPORT_SYMBOL(mark_buffer_dirty_inode);
689
690 /*
691  * Add a page to the dirty page list.
692  *
693  * It is a sad fact of life that this function is called from several places
694  * deeply under spinlocking.  It may not sleep.
695  *
696  * If the page has buffers, the uptodate buffers are set dirty, to preserve
697  * dirty-state coherency between the page and the buffers.  It the page does
698  * not have buffers then when they are later attached they will all be set
699  * dirty.
700  *
701  * The buffers are dirtied before the page is dirtied.  There's a small race
702  * window in which a writepage caller may see the page cleanness but not the
703  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
704  * before the buffers, a concurrent writepage caller could clear the page dirty
705  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
706  * page on the dirty page list.
707  *
708  * We use i_private_lock to lock against try_to_free_buffers while using the
709  * page's buffer list.  Also use this to protect against clean buffers being
710  * added to the page after it was set dirty.
711  *
712  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
713  * address_space though.
714  */
715 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
716 {
717         struct buffer_head *head;
718         bool newly_dirty;
719
720         spin_lock(&mapping->i_private_lock);
721         head = folio_buffers(folio);
722         if (head) {
723                 struct buffer_head *bh = head;
724
725                 do {
726                         set_buffer_dirty(bh);
727                         bh = bh->b_this_page;
728                 } while (bh != head);
729         }
730         /*
731          * Lock out page's memcg migration to keep PageDirty
732          * synchronized with per-memcg dirty page counters.
733          */
734         folio_memcg_lock(folio);
735         newly_dirty = !folio_test_set_dirty(folio);
736         spin_unlock(&mapping->i_private_lock);
737
738         if (newly_dirty)
739                 __folio_mark_dirty(folio, mapping, 1);
740
741         folio_memcg_unlock(folio);
742
743         if (newly_dirty)
744                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
745
746         return newly_dirty;
747 }
748 EXPORT_SYMBOL(block_dirty_folio);
749
750 /*
751  * Write out and wait upon a list of buffers.
752  *
753  * We have conflicting pressures: we want to make sure that all
754  * initially dirty buffers get waited on, but that any subsequently
755  * dirtied buffers don't.  After all, we don't want fsync to last
756  * forever if somebody is actively writing to the file.
757  *
758  * Do this in two main stages: first we copy dirty buffers to a
759  * temporary inode list, queueing the writes as we go.  Then we clean
760  * up, waiting for those writes to complete.
761  * 
762  * During this second stage, any subsequent updates to the file may end
763  * up refiling the buffer on the original inode's dirty list again, so
764  * there is a chance we will end up with a buffer queued for write but
765  * not yet completed on that list.  So, as a final cleanup we go through
766  * the osync code to catch these locked, dirty buffers without requeuing
767  * any newly dirty buffers for write.
768  */
769 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
770 {
771         struct buffer_head *bh;
772         struct list_head tmp;
773         struct address_space *mapping;
774         int err = 0, err2;
775         struct blk_plug plug;
776
777         INIT_LIST_HEAD(&tmp);
778         blk_start_plug(&plug);
779
780         spin_lock(lock);
781         while (!list_empty(list)) {
782                 bh = BH_ENTRY(list->next);
783                 mapping = bh->b_assoc_map;
784                 __remove_assoc_queue(bh);
785                 /* Avoid race with mark_buffer_dirty_inode() which does
786                  * a lockless check and we rely on seeing the dirty bit */
787                 smp_mb();
788                 if (buffer_dirty(bh) || buffer_locked(bh)) {
789                         list_add(&bh->b_assoc_buffers, &tmp);
790                         bh->b_assoc_map = mapping;
791                         if (buffer_dirty(bh)) {
792                                 get_bh(bh);
793                                 spin_unlock(lock);
794                                 /*
795                                  * Ensure any pending I/O completes so that
796                                  * write_dirty_buffer() actually writes the
797                                  * current contents - it is a noop if I/O is
798                                  * still in flight on potentially older
799                                  * contents.
800                                  */
801                                 write_dirty_buffer(bh, REQ_SYNC);
802
803                                 /*
804                                  * Kick off IO for the previous mapping. Note
805                                  * that we will not run the very last mapping,
806                                  * wait_on_buffer() will do that for us
807                                  * through sync_buffer().
808                                  */
809                                 brelse(bh);
810                                 spin_lock(lock);
811                         }
812                 }
813         }
814
815         spin_unlock(lock);
816         blk_finish_plug(&plug);
817         spin_lock(lock);
818
819         while (!list_empty(&tmp)) {
820                 bh = BH_ENTRY(tmp.prev);
821                 get_bh(bh);
822                 mapping = bh->b_assoc_map;
823                 __remove_assoc_queue(bh);
824                 /* Avoid race with mark_buffer_dirty_inode() which does
825                  * a lockless check and we rely on seeing the dirty bit */
826                 smp_mb();
827                 if (buffer_dirty(bh)) {
828                         list_add(&bh->b_assoc_buffers,
829                                  &mapping->i_private_list);
830                         bh->b_assoc_map = mapping;
831                 }
832                 spin_unlock(lock);
833                 wait_on_buffer(bh);
834                 if (!buffer_uptodate(bh))
835                         err = -EIO;
836                 brelse(bh);
837                 spin_lock(lock);
838         }
839         
840         spin_unlock(lock);
841         err2 = osync_buffers_list(lock, list);
842         if (err)
843                 return err;
844         else
845                 return err2;
846 }
847
848 /*
849  * Invalidate any and all dirty buffers on a given inode.  We are
850  * probably unmounting the fs, but that doesn't mean we have already
851  * done a sync().  Just drop the buffers from the inode list.
852  *
853  * NOTE: we take the inode's blockdev's mapping's i_private_lock.  Which
854  * assumes that all the buffers are against the blockdev.  Not true
855  * for reiserfs.
856  */
857 void invalidate_inode_buffers(struct inode *inode)
858 {
859         if (inode_has_buffers(inode)) {
860                 struct address_space *mapping = &inode->i_data;
861                 struct list_head *list = &mapping->i_private_list;
862                 struct address_space *buffer_mapping = mapping->i_private_data;
863
864                 spin_lock(&buffer_mapping->i_private_lock);
865                 while (!list_empty(list))
866                         __remove_assoc_queue(BH_ENTRY(list->next));
867                 spin_unlock(&buffer_mapping->i_private_lock);
868         }
869 }
870 EXPORT_SYMBOL(invalidate_inode_buffers);
871
872 /*
873  * Remove any clean buffers from the inode's buffer list.  This is called
874  * when we're trying to free the inode itself.  Those buffers can pin it.
875  *
876  * Returns true if all buffers were removed.
877  */
878 int remove_inode_buffers(struct inode *inode)
879 {
880         int ret = 1;
881
882         if (inode_has_buffers(inode)) {
883                 struct address_space *mapping = &inode->i_data;
884                 struct list_head *list = &mapping->i_private_list;
885                 struct address_space *buffer_mapping = mapping->i_private_data;
886
887                 spin_lock(&buffer_mapping->i_private_lock);
888                 while (!list_empty(list)) {
889                         struct buffer_head *bh = BH_ENTRY(list->next);
890                         if (buffer_dirty(bh)) {
891                                 ret = 0;
892                                 break;
893                         }
894                         __remove_assoc_queue(bh);
895                 }
896                 spin_unlock(&buffer_mapping->i_private_lock);
897         }
898         return ret;
899 }
900
901 /*
902  * Create the appropriate buffers when given a folio for data area and
903  * the size of each buffer.. Use the bh->b_this_page linked list to
904  * follow the buffers created.  Return NULL if unable to create more
905  * buffers.
906  *
907  * The retry flag is used to differentiate async IO (paging, swapping)
908  * which may not fail from ordinary buffer allocations.
909  */
910 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
911                                         gfp_t gfp)
912 {
913         struct buffer_head *bh, *head;
914         long offset;
915         struct mem_cgroup *memcg, *old_memcg;
916
917         /* The folio lock pins the memcg */
918         memcg = folio_memcg(folio);
919         old_memcg = set_active_memcg(memcg);
920
921         head = NULL;
922         offset = folio_size(folio);
923         while ((offset -= size) >= 0) {
924                 bh = alloc_buffer_head(gfp);
925                 if (!bh)
926                         goto no_grow;
927
928                 bh->b_this_page = head;
929                 bh->b_blocknr = -1;
930                 head = bh;
931
932                 bh->b_size = size;
933
934                 /* Link the buffer to its folio */
935                 folio_set_bh(bh, folio, offset);
936         }
937 out:
938         set_active_memcg(old_memcg);
939         return head;
940 /*
941  * In case anything failed, we just free everything we got.
942  */
943 no_grow:
944         if (head) {
945                 do {
946                         bh = head;
947                         head = head->b_this_page;
948                         free_buffer_head(bh);
949                 } while (head);
950         }
951
952         goto out;
953 }
954 EXPORT_SYMBOL_GPL(folio_alloc_buffers);
955
956 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
957                                        bool retry)
958 {
959         gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
960         if (retry)
961                 gfp |= __GFP_NOFAIL;
962
963         return folio_alloc_buffers(page_folio(page), size, gfp);
964 }
965 EXPORT_SYMBOL_GPL(alloc_page_buffers);
966
967 static inline void link_dev_buffers(struct folio *folio,
968                 struct buffer_head *head)
969 {
970         struct buffer_head *bh, *tail;
971
972         bh = head;
973         do {
974                 tail = bh;
975                 bh = bh->b_this_page;
976         } while (bh);
977         tail->b_this_page = head;
978         folio_attach_private(folio, head);
979 }
980
981 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
982 {
983         sector_t retval = ~((sector_t)0);
984         loff_t sz = bdev_nr_bytes(bdev);
985
986         if (sz) {
987                 unsigned int sizebits = blksize_bits(size);
988                 retval = (sz >> sizebits);
989         }
990         return retval;
991 }
992
993 /*
994  * Initialise the state of a blockdev folio's buffers.
995  */ 
996 static sector_t folio_init_buffers(struct folio *folio,
997                 struct block_device *bdev, unsigned size)
998 {
999         struct buffer_head *head = folio_buffers(folio);
1000         struct buffer_head *bh = head;
1001         bool uptodate = folio_test_uptodate(folio);
1002         sector_t block = div_u64(folio_pos(folio), size);
1003         sector_t end_block = blkdev_max_block(bdev, size);
1004
1005         do {
1006                 if (!buffer_mapped(bh)) {
1007                         bh->b_end_io = NULL;
1008                         bh->b_private = NULL;
1009                         bh->b_bdev = bdev;
1010                         bh->b_blocknr = block;
1011                         if (uptodate)
1012                                 set_buffer_uptodate(bh);
1013                         if (block < end_block)
1014                                 set_buffer_mapped(bh);
1015                 }
1016                 block++;
1017                 bh = bh->b_this_page;
1018         } while (bh != head);
1019
1020         /*
1021          * Caller needs to validate requested block against end of device.
1022          */
1023         return end_block;
1024 }
1025
1026 /*
1027  * Create the page-cache folio that contains the requested block.
1028  *
1029  * This is used purely for blockdev mappings.
1030  *
1031  * Returns false if we have a failure which cannot be cured by retrying
1032  * without sleeping.  Returns true if we succeeded, or the caller should retry.
1033  */
1034 static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1035                 pgoff_t index, unsigned size, gfp_t gfp)
1036 {
1037         struct inode *inode = bdev->bd_inode;
1038         struct folio *folio;
1039         struct buffer_head *bh;
1040         sector_t end_block = 0;
1041
1042         folio = __filemap_get_folio(inode->i_mapping, index,
1043                         FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1044         if (IS_ERR(folio))
1045                 return false;
1046
1047         bh = folio_buffers(folio);
1048         if (bh) {
1049                 if (bh->b_size == size) {
1050                         end_block = folio_init_buffers(folio, bdev, size);
1051                         goto unlock;
1052                 }
1053
1054                 /*
1055                  * Retrying may succeed; for example the folio may finish
1056                  * writeback, or buffers may be cleaned.  This should not
1057                  * happen very often; maybe we have old buffers attached to
1058                  * this blockdev's page cache and we're trying to change
1059                  * the block size?
1060                  */
1061                 if (!try_to_free_buffers(folio)) {
1062                         end_block = ~0ULL;
1063                         goto unlock;
1064                 }
1065         }
1066
1067         bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
1068         if (!bh)
1069                 goto unlock;
1070
1071         /*
1072          * Link the folio to the buffers and initialise them.  Take the
1073          * lock to be atomic wrt __find_get_block(), which does not
1074          * run under the folio lock.
1075          */
1076         spin_lock(&inode->i_mapping->i_private_lock);
1077         link_dev_buffers(folio, bh);
1078         end_block = folio_init_buffers(folio, bdev, size);
1079         spin_unlock(&inode->i_mapping->i_private_lock);
1080 unlock:
1081         folio_unlock(folio);
1082         folio_put(folio);
1083         return block < end_block;
1084 }
1085
1086 /*
1087  * Create buffers for the specified block device block's folio.  If
1088  * that folio was dirty, the buffers are set dirty also.  Returns false
1089  * if we've hit a permanent error.
1090  */
1091 static bool grow_buffers(struct block_device *bdev, sector_t block,
1092                 unsigned size, gfp_t gfp)
1093 {
1094         loff_t pos;
1095
1096         /*
1097          * Check for a block which lies outside our maximum possible
1098          * pagecache index.
1099          */
1100         if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
1101                 printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
1102                         __func__, (unsigned long long)block,
1103                         bdev);
1104                 return false;
1105         }
1106
1107         /* Create a folio with the proper size buffers */
1108         return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
1109 }
1110
1111 static struct buffer_head *
1112 __getblk_slow(struct block_device *bdev, sector_t block,
1113              unsigned size, gfp_t gfp)
1114 {
1115         /* Size must be multiple of hard sectorsize */
1116         if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1117                         (size < 512 || size > PAGE_SIZE))) {
1118                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1119                                         size);
1120                 printk(KERN_ERR "logical block size: %d\n",
1121                                         bdev_logical_block_size(bdev));
1122
1123                 dump_stack();
1124                 return NULL;
1125         }
1126
1127         for (;;) {
1128                 struct buffer_head *bh;
1129
1130                 bh = __find_get_block(bdev, block, size);
1131                 if (bh)
1132                         return bh;
1133
1134                 if (!grow_buffers(bdev, block, size, gfp))
1135                         return NULL;
1136         }
1137 }
1138
1139 /*
1140  * The relationship between dirty buffers and dirty pages:
1141  *
1142  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1143  * the page is tagged dirty in the page cache.
1144  *
1145  * At all times, the dirtiness of the buffers represents the dirtiness of
1146  * subsections of the page.  If the page has buffers, the page dirty bit is
1147  * merely a hint about the true dirty state.
1148  *
1149  * When a page is set dirty in its entirety, all its buffers are marked dirty
1150  * (if the page has buffers).
1151  *
1152  * When a buffer is marked dirty, its page is dirtied, but the page's other
1153  * buffers are not.
1154  *
1155  * Also.  When blockdev buffers are explicitly read with bread(), they
1156  * individually become uptodate.  But their backing page remains not
1157  * uptodate - even if all of its buffers are uptodate.  A subsequent
1158  * block_read_full_folio() against that folio will discover all the uptodate
1159  * buffers, will set the folio uptodate and will perform no I/O.
1160  */
1161
1162 /**
1163  * mark_buffer_dirty - mark a buffer_head as needing writeout
1164  * @bh: the buffer_head to mark dirty
1165  *
1166  * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1167  * its backing page dirty, then tag the page as dirty in the page cache
1168  * and then attach the address_space's inode to its superblock's dirty
1169  * inode list.
1170  *
1171  * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->i_private_lock,
1172  * i_pages lock and mapping->host->i_lock.
1173  */
1174 void mark_buffer_dirty(struct buffer_head *bh)
1175 {
1176         WARN_ON_ONCE(!buffer_uptodate(bh));
1177
1178         trace_block_dirty_buffer(bh);
1179
1180         /*
1181          * Very *carefully* optimize the it-is-already-dirty case.
1182          *
1183          * Don't let the final "is it dirty" escape to before we
1184          * perhaps modified the buffer.
1185          */
1186         if (buffer_dirty(bh)) {
1187                 smp_mb();
1188                 if (buffer_dirty(bh))
1189                         return;
1190         }
1191
1192         if (!test_set_buffer_dirty(bh)) {
1193                 struct folio *folio = bh->b_folio;
1194                 struct address_space *mapping = NULL;
1195
1196                 folio_memcg_lock(folio);
1197                 if (!folio_test_set_dirty(folio)) {
1198                         mapping = folio->mapping;
1199                         if (mapping)
1200                                 __folio_mark_dirty(folio, mapping, 0);
1201                 }
1202                 folio_memcg_unlock(folio);
1203                 if (mapping)
1204                         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1205         }
1206 }
1207 EXPORT_SYMBOL(mark_buffer_dirty);
1208
1209 void mark_buffer_write_io_error(struct buffer_head *bh)
1210 {
1211         set_buffer_write_io_error(bh);
1212         /* FIXME: do we need to set this in both places? */
1213         if (bh->b_folio && bh->b_folio->mapping)
1214                 mapping_set_error(bh->b_folio->mapping, -EIO);
1215         if (bh->b_assoc_map) {
1216                 mapping_set_error(bh->b_assoc_map, -EIO);
1217                 errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
1218         }
1219 }
1220 EXPORT_SYMBOL(mark_buffer_write_io_error);
1221
1222 /*
1223  * Decrement a buffer_head's reference count.  If all buffers against a page
1224  * have zero reference count, are clean and unlocked, and if the page is clean
1225  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1226  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1227  * a page but it ends up not being freed, and buffers may later be reattached).
1228  */
1229 void __brelse(struct buffer_head * buf)
1230 {
1231         if (atomic_read(&buf->b_count)) {
1232                 put_bh(buf);
1233                 return;
1234         }
1235         WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1236 }
1237 EXPORT_SYMBOL(__brelse);
1238
1239 /*
1240  * bforget() is like brelse(), except it discards any
1241  * potentially dirty data.
1242  */
1243 void __bforget(struct buffer_head *bh)
1244 {
1245         clear_buffer_dirty(bh);
1246         if (bh->b_assoc_map) {
1247                 struct address_space *buffer_mapping = bh->b_folio->mapping;
1248
1249                 spin_lock(&buffer_mapping->i_private_lock);
1250                 list_del_init(&bh->b_assoc_buffers);
1251                 bh->b_assoc_map = NULL;
1252                 spin_unlock(&buffer_mapping->i_private_lock);
1253         }
1254         __brelse(bh);
1255 }
1256 EXPORT_SYMBOL(__bforget);
1257
1258 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1259 {
1260         lock_buffer(bh);
1261         if (buffer_uptodate(bh)) {
1262                 unlock_buffer(bh);
1263                 return bh;
1264         } else {
1265                 get_bh(bh);
1266                 bh->b_end_io = end_buffer_read_sync;
1267                 submit_bh(REQ_OP_READ, bh);
1268                 wait_on_buffer(bh);
1269                 if (buffer_uptodate(bh))
1270                         return bh;
1271         }
1272         brelse(bh);
1273         return NULL;
1274 }
1275
1276 /*
1277  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1278  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1279  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1280  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1281  * CPU's LRUs at the same time.
1282  *
1283  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1284  * sb_find_get_block().
1285  *
1286  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1287  * a local interrupt disable for that.
1288  */
1289
1290 #define BH_LRU_SIZE     16
1291
1292 struct bh_lru {
1293         struct buffer_head *bhs[BH_LRU_SIZE];
1294 };
1295
1296 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1297
1298 #ifdef CONFIG_SMP
1299 #define bh_lru_lock()   local_irq_disable()
1300 #define bh_lru_unlock() local_irq_enable()
1301 #else
1302 #define bh_lru_lock()   preempt_disable()
1303 #define bh_lru_unlock() preempt_enable()
1304 #endif
1305
1306 static inline void check_irqs_on(void)
1307 {
1308 #ifdef irqs_disabled
1309         BUG_ON(irqs_disabled());
1310 #endif
1311 }
1312
1313 /*
1314  * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1315  * inserted at the front, and the buffer_head at the back if any is evicted.
1316  * Or, if already in the LRU it is moved to the front.
1317  */
1318 static void bh_lru_install(struct buffer_head *bh)
1319 {
1320         struct buffer_head *evictee = bh;
1321         struct bh_lru *b;
1322         int i;
1323
1324         check_irqs_on();
1325         bh_lru_lock();
1326
1327         /*
1328          * the refcount of buffer_head in bh_lru prevents dropping the
1329          * attached page(i.e., try_to_free_buffers) so it could cause
1330          * failing page migration.
1331          * Skip putting upcoming bh into bh_lru until migration is done.
1332          */
1333         if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1334                 bh_lru_unlock();
1335                 return;
1336         }
1337
1338         b = this_cpu_ptr(&bh_lrus);
1339         for (i = 0; i < BH_LRU_SIZE; i++) {
1340                 swap(evictee, b->bhs[i]);
1341                 if (evictee == bh) {
1342                         bh_lru_unlock();
1343                         return;
1344                 }
1345         }
1346
1347         get_bh(bh);
1348         bh_lru_unlock();
1349         brelse(evictee);
1350 }
1351
1352 /*
1353  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1354  */
1355 static struct buffer_head *
1356 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1357 {
1358         struct buffer_head *ret = NULL;
1359         unsigned int i;
1360
1361         check_irqs_on();
1362         bh_lru_lock();
1363         if (cpu_is_isolated(smp_processor_id())) {
1364                 bh_lru_unlock();
1365                 return NULL;
1366         }
1367         for (i = 0; i < BH_LRU_SIZE; i++) {
1368                 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1369
1370                 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1371                     bh->b_size == size) {
1372                         if (i) {
1373                                 while (i) {
1374                                         __this_cpu_write(bh_lrus.bhs[i],
1375                                                 __this_cpu_read(bh_lrus.bhs[i - 1]));
1376                                         i--;
1377                                 }
1378                                 __this_cpu_write(bh_lrus.bhs[0], bh);
1379                         }
1380                         get_bh(bh);
1381                         ret = bh;
1382                         break;
1383                 }
1384         }
1385         bh_lru_unlock();
1386         return ret;
1387 }
1388
1389 /*
1390  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1391  * it in the LRU and mark it as accessed.  If it is not present then return
1392  * NULL
1393  */
1394 struct buffer_head *
1395 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1396 {
1397         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1398
1399         if (bh == NULL) {
1400                 /* __find_get_block_slow will mark the page accessed */
1401                 bh = __find_get_block_slow(bdev, block);
1402                 if (bh)
1403                         bh_lru_install(bh);
1404         } else
1405                 touch_buffer(bh);
1406
1407         return bh;
1408 }
1409 EXPORT_SYMBOL(__find_get_block);
1410
1411 /**
1412  * bdev_getblk - Get a buffer_head in a block device's buffer cache.
1413  * @bdev: The block device.
1414  * @block: The block number.
1415  * @size: The size of buffer_heads for this @bdev.
1416  * @gfp: The memory allocation flags to use.
1417  *
1418  * Return: The buffer head, or NULL if memory could not be allocated.
1419  */
1420 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
1421                 unsigned size, gfp_t gfp)
1422 {
1423         struct buffer_head *bh = __find_get_block(bdev, block, size);
1424
1425         might_alloc(gfp);
1426         if (bh)
1427                 return bh;
1428
1429         return __getblk_slow(bdev, block, size, gfp);
1430 }
1431 EXPORT_SYMBOL(bdev_getblk);
1432
1433 /*
1434  * Do async read-ahead on a buffer..
1435  */
1436 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1437 {
1438         struct buffer_head *bh = bdev_getblk(bdev, block, size,
1439                         GFP_NOWAIT | __GFP_MOVABLE);
1440
1441         if (likely(bh)) {
1442                 bh_readahead(bh, REQ_RAHEAD);
1443                 brelse(bh);
1444         }
1445 }
1446 EXPORT_SYMBOL(__breadahead);
1447
1448 /**
1449  *  __bread_gfp() - reads a specified block and returns the bh
1450  *  @bdev: the block_device to read from
1451  *  @block: number of block
1452  *  @size: size (in bytes) to read
1453  *  @gfp: page allocation flag
1454  *
1455  *  Reads a specified block, and returns buffer head that contains it.
1456  *  The page cache can be allocated from non-movable area
1457  *  not to prevent page migration if you set gfp to zero.
1458  *  It returns NULL if the block was unreadable.
1459  */
1460 struct buffer_head *
1461 __bread_gfp(struct block_device *bdev, sector_t block,
1462                    unsigned size, gfp_t gfp)
1463 {
1464         struct buffer_head *bh;
1465
1466         gfp |= mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
1467
1468         /*
1469          * Prefer looping in the allocator rather than here, at least that
1470          * code knows what it's doing.
1471          */
1472         gfp |= __GFP_NOFAIL;
1473
1474         bh = bdev_getblk(bdev, block, size, gfp);
1475
1476         if (likely(bh) && !buffer_uptodate(bh))
1477                 bh = __bread_slow(bh);
1478         return bh;
1479 }
1480 EXPORT_SYMBOL(__bread_gfp);
1481
1482 static void __invalidate_bh_lrus(struct bh_lru *b)
1483 {
1484         int i;
1485
1486         for (i = 0; i < BH_LRU_SIZE; i++) {
1487                 brelse(b->bhs[i]);
1488                 b->bhs[i] = NULL;
1489         }
1490 }
1491 /*
1492  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1493  * This doesn't race because it runs in each cpu either in irq
1494  * or with preempt disabled.
1495  */
1496 static void invalidate_bh_lru(void *arg)
1497 {
1498         struct bh_lru *b = &get_cpu_var(bh_lrus);
1499
1500         __invalidate_bh_lrus(b);
1501         put_cpu_var(bh_lrus);
1502 }
1503
1504 bool has_bh_in_lru(int cpu, void *dummy)
1505 {
1506         struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1507         int i;
1508         
1509         for (i = 0; i < BH_LRU_SIZE; i++) {
1510                 if (b->bhs[i])
1511                         return true;
1512         }
1513
1514         return false;
1515 }
1516
1517 void invalidate_bh_lrus(void)
1518 {
1519         on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1520 }
1521 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1522
1523 /*
1524  * It's called from workqueue context so we need a bh_lru_lock to close
1525  * the race with preemption/irq.
1526  */
1527 void invalidate_bh_lrus_cpu(void)
1528 {
1529         struct bh_lru *b;
1530
1531         bh_lru_lock();
1532         b = this_cpu_ptr(&bh_lrus);
1533         __invalidate_bh_lrus(b);
1534         bh_lru_unlock();
1535 }
1536
1537 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1538                   unsigned long offset)
1539 {
1540         bh->b_folio = folio;
1541         BUG_ON(offset >= folio_size(folio));
1542         if (folio_test_highmem(folio))
1543                 /*
1544                  * This catches illegal uses and preserves the offset:
1545                  */
1546                 bh->b_data = (char *)(0 + offset);
1547         else
1548                 bh->b_data = folio_address(folio) + offset;
1549 }
1550 EXPORT_SYMBOL(folio_set_bh);
1551
1552 /*
1553  * Called when truncating a buffer on a page completely.
1554  */
1555
1556 /* Bits that are cleared during an invalidate */
1557 #define BUFFER_FLAGS_DISCARD \
1558         (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1559          1 << BH_Delay | 1 << BH_Unwritten)
1560
1561 static void discard_buffer(struct buffer_head * bh)
1562 {
1563         unsigned long b_state;
1564
1565         lock_buffer(bh);
1566         clear_buffer_dirty(bh);
1567         bh->b_bdev = NULL;
1568         b_state = READ_ONCE(bh->b_state);
1569         do {
1570         } while (!try_cmpxchg(&bh->b_state, &b_state,
1571                               b_state & ~BUFFER_FLAGS_DISCARD));
1572         unlock_buffer(bh);
1573 }
1574
1575 /**
1576  * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1577  * @folio: The folio which is affected.
1578  * @offset: start of the range to invalidate
1579  * @length: length of the range to invalidate
1580  *
1581  * block_invalidate_folio() is called when all or part of the folio has been
1582  * invalidated by a truncate operation.
1583  *
1584  * block_invalidate_folio() does not have to release all buffers, but it must
1585  * ensure that no dirty buffer is left outside @offset and that no I/O
1586  * is underway against any of the blocks which are outside the truncation
1587  * point.  Because the caller is about to free (and possibly reuse) those
1588  * blocks on-disk.
1589  */
1590 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1591 {
1592         struct buffer_head *head, *bh, *next;
1593         size_t curr_off = 0;
1594         size_t stop = length + offset;
1595
1596         BUG_ON(!folio_test_locked(folio));
1597
1598         /*
1599          * Check for overflow
1600          */
1601         BUG_ON(stop > folio_size(folio) || stop < length);
1602
1603         head = folio_buffers(folio);
1604         if (!head)
1605                 return;
1606
1607         bh = head;
1608         do {
1609                 size_t next_off = curr_off + bh->b_size;
1610                 next = bh->b_this_page;
1611
1612                 /*
1613                  * Are we still fully in range ?
1614                  */
1615                 if (next_off > stop)
1616                         goto out;
1617
1618                 /*
1619                  * is this block fully invalidated?
1620                  */
1621                 if (offset <= curr_off)
1622                         discard_buffer(bh);
1623                 curr_off = next_off;
1624                 bh = next;
1625         } while (bh != head);
1626
1627         /*
1628          * We release buffers only if the entire folio is being invalidated.
1629          * The get_block cached value has been unconditionally invalidated,
1630          * so real IO is not possible anymore.
1631          */
1632         if (length == folio_size(folio))
1633                 filemap_release_folio(folio, 0);
1634 out:
1635         return;
1636 }
1637 EXPORT_SYMBOL(block_invalidate_folio);
1638
1639 /*
1640  * We attach and possibly dirty the buffers atomically wrt
1641  * block_dirty_folio() via i_private_lock.  try_to_free_buffers
1642  * is already excluded via the folio lock.
1643  */
1644 struct buffer_head *create_empty_buffers(struct folio *folio,
1645                 unsigned long blocksize, unsigned long b_state)
1646 {
1647         struct buffer_head *bh, *head, *tail;
1648         gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
1649
1650         head = folio_alloc_buffers(folio, blocksize, gfp);
1651         bh = head;
1652         do {
1653                 bh->b_state |= b_state;
1654                 tail = bh;
1655                 bh = bh->b_this_page;
1656         } while (bh);
1657         tail->b_this_page = head;
1658
1659         spin_lock(&folio->mapping->i_private_lock);
1660         if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1661                 bh = head;
1662                 do {
1663                         if (folio_test_dirty(folio))
1664                                 set_buffer_dirty(bh);
1665                         if (folio_test_uptodate(folio))
1666                                 set_buffer_uptodate(bh);
1667                         bh = bh->b_this_page;
1668                 } while (bh != head);
1669         }
1670         folio_attach_private(folio, head);
1671         spin_unlock(&folio->mapping->i_private_lock);
1672
1673         return head;
1674 }
1675 EXPORT_SYMBOL(create_empty_buffers);
1676
1677 /**
1678  * clean_bdev_aliases: clean a range of buffers in block device
1679  * @bdev: Block device to clean buffers in
1680  * @block: Start of a range of blocks to clean
1681  * @len: Number of blocks to clean
1682  *
1683  * We are taking a range of blocks for data and we don't want writeback of any
1684  * buffer-cache aliases starting from return from this function and until the
1685  * moment when something will explicitly mark the buffer dirty (hopefully that
1686  * will not happen until we will free that block ;-) We don't even need to mark
1687  * it not-uptodate - nobody can expect anything from a newly allocated buffer
1688  * anyway. We used to use unmap_buffer() for such invalidation, but that was
1689  * wrong. We definitely don't want to mark the alias unmapped, for example - it
1690  * would confuse anyone who might pick it with bread() afterwards...
1691  *
1692  * Also..  Note that bforget() doesn't lock the buffer.  So there can be
1693  * writeout I/O going on against recently-freed buffers.  We don't wait on that
1694  * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1695  * need to.  That happens here.
1696  */
1697 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1698 {
1699         struct inode *bd_inode = bdev->bd_inode;
1700         struct address_space *bd_mapping = bd_inode->i_mapping;
1701         struct folio_batch fbatch;
1702         pgoff_t index = ((loff_t)block << bd_inode->i_blkbits) / PAGE_SIZE;
1703         pgoff_t end;
1704         int i, count;
1705         struct buffer_head *bh;
1706         struct buffer_head *head;
1707
1708         end = ((loff_t)(block + len - 1) << bd_inode->i_blkbits) / PAGE_SIZE;
1709         folio_batch_init(&fbatch);
1710         while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1711                 count = folio_batch_count(&fbatch);
1712                 for (i = 0; i < count; i++) {
1713                         struct folio *folio = fbatch.folios[i];
1714
1715                         if (!folio_buffers(folio))
1716                                 continue;
1717                         /*
1718                          * We use folio lock instead of bd_mapping->i_private_lock
1719                          * to pin buffers here since we can afford to sleep and
1720                          * it scales better than a global spinlock lock.
1721                          */
1722                         folio_lock(folio);
1723                         /* Recheck when the folio is locked which pins bhs */
1724                         head = folio_buffers(folio);
1725                         if (!head)
1726                                 goto unlock_page;
1727                         bh = head;
1728                         do {
1729                                 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1730                                         goto next;
1731                                 if (bh->b_blocknr >= block + len)
1732                                         break;
1733                                 clear_buffer_dirty(bh);
1734                                 wait_on_buffer(bh);
1735                                 clear_buffer_req(bh);
1736 next:
1737                                 bh = bh->b_this_page;
1738                         } while (bh != head);
1739 unlock_page:
1740                         folio_unlock(folio);
1741                 }
1742                 folio_batch_release(&fbatch);
1743                 cond_resched();
1744                 /* End of range already reached? */
1745                 if (index > end || !index)
1746                         break;
1747         }
1748 }
1749 EXPORT_SYMBOL(clean_bdev_aliases);
1750
1751 static struct buffer_head *folio_create_buffers(struct folio *folio,
1752                                                 struct inode *inode,
1753                                                 unsigned int b_state)
1754 {
1755         struct buffer_head *bh;
1756
1757         BUG_ON(!folio_test_locked(folio));
1758
1759         bh = folio_buffers(folio);
1760         if (!bh)
1761                 bh = create_empty_buffers(folio,
1762                                 1 << READ_ONCE(inode->i_blkbits), b_state);
1763         return bh;
1764 }
1765
1766 /*
1767  * NOTE! All mapped/uptodate combinations are valid:
1768  *
1769  *      Mapped  Uptodate        Meaning
1770  *
1771  *      No      No              "unknown" - must do get_block()
1772  *      No      Yes             "hole" - zero-filled
1773  *      Yes     No              "allocated" - allocated on disk, not read in
1774  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1775  *
1776  * "Dirty" is valid only with the last case (mapped+uptodate).
1777  */
1778
1779 /*
1780  * While block_write_full_folio is writing back the dirty buffers under
1781  * the page lock, whoever dirtied the buffers may decide to clean them
1782  * again at any time.  We handle that by only looking at the buffer
1783  * state inside lock_buffer().
1784  *
1785  * If block_write_full_folio() is called for regular writeback
1786  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1787  * locked buffer.   This only can happen if someone has written the buffer
1788  * directly, with submit_bh().  At the address_space level PageWriteback
1789  * prevents this contention from occurring.
1790  *
1791  * If block_write_full_folio() is called with wbc->sync_mode ==
1792  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1793  * causes the writes to be flagged as synchronous writes.
1794  */
1795 int __block_write_full_folio(struct inode *inode, struct folio *folio,
1796                         get_block_t *get_block, struct writeback_control *wbc)
1797 {
1798         int err;
1799         sector_t block;
1800         sector_t last_block;
1801         struct buffer_head *bh, *head;
1802         size_t blocksize;
1803         int nr_underway = 0;
1804         blk_opf_t write_flags = wbc_to_write_flags(wbc);
1805
1806         head = folio_create_buffers(folio, inode,
1807                                     (1 << BH_Dirty) | (1 << BH_Uptodate));
1808
1809         /*
1810          * Be very careful.  We have no exclusion from block_dirty_folio
1811          * here, and the (potentially unmapped) buffers may become dirty at
1812          * any time.  If a buffer becomes dirty here after we've inspected it
1813          * then we just miss that fact, and the folio stays dirty.
1814          *
1815          * Buffers outside i_size may be dirtied by block_dirty_folio;
1816          * handle that here by just cleaning them.
1817          */
1818
1819         bh = head;
1820         blocksize = bh->b_size;
1821
1822         block = div_u64(folio_pos(folio), blocksize);
1823         last_block = div_u64(i_size_read(inode) - 1, blocksize);
1824
1825         /*
1826          * Get all the dirty buffers mapped to disk addresses and
1827          * handle any aliases from the underlying blockdev's mapping.
1828          */
1829         do {
1830                 if (block > last_block) {
1831                         /*
1832                          * mapped buffers outside i_size will occur, because
1833                          * this folio can be outside i_size when there is a
1834                          * truncate in progress.
1835                          */
1836                         /*
1837                          * The buffer was zeroed by block_write_full_folio()
1838                          */
1839                         clear_buffer_dirty(bh);
1840                         set_buffer_uptodate(bh);
1841                 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1842                            buffer_dirty(bh)) {
1843                         WARN_ON(bh->b_size != blocksize);
1844                         err = get_block(inode, block, bh, 1);
1845                         if (err)
1846                                 goto recover;
1847                         clear_buffer_delay(bh);
1848                         if (buffer_new(bh)) {
1849                                 /* blockdev mappings never come here */
1850                                 clear_buffer_new(bh);
1851                                 clean_bdev_bh_alias(bh);
1852                         }
1853                 }
1854                 bh = bh->b_this_page;
1855                 block++;
1856         } while (bh != head);
1857
1858         do {
1859                 if (!buffer_mapped(bh))
1860                         continue;
1861                 /*
1862                  * If it's a fully non-blocking write attempt and we cannot
1863                  * lock the buffer then redirty the folio.  Note that this can
1864                  * potentially cause a busy-wait loop from writeback threads
1865                  * and kswapd activity, but those code paths have their own
1866                  * higher-level throttling.
1867                  */
1868                 if (wbc->sync_mode != WB_SYNC_NONE) {
1869                         lock_buffer(bh);
1870                 } else if (!trylock_buffer(bh)) {
1871                         folio_redirty_for_writepage(wbc, folio);
1872                         continue;
1873                 }
1874                 if (test_clear_buffer_dirty(bh)) {
1875                         mark_buffer_async_write_endio(bh,
1876                                 end_buffer_async_write);
1877                 } else {
1878                         unlock_buffer(bh);
1879                 }
1880         } while ((bh = bh->b_this_page) != head);
1881
1882         /*
1883          * The folio and its buffers are protected by the writeback flag,
1884          * so we can drop the bh refcounts early.
1885          */
1886         BUG_ON(folio_test_writeback(folio));
1887         folio_start_writeback(folio);
1888
1889         do {
1890                 struct buffer_head *next = bh->b_this_page;
1891                 if (buffer_async_write(bh)) {
1892                         submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1893                         nr_underway++;
1894                 }
1895                 bh = next;
1896         } while (bh != head);
1897         folio_unlock(folio);
1898
1899         err = 0;
1900 done:
1901         if (nr_underway == 0) {
1902                 /*
1903                  * The folio was marked dirty, but the buffers were
1904                  * clean.  Someone wrote them back by hand with
1905                  * write_dirty_buffer/submit_bh.  A rare case.
1906                  */
1907                 folio_end_writeback(folio);
1908
1909                 /*
1910                  * The folio and buffer_heads can be released at any time from
1911                  * here on.
1912                  */
1913         }
1914         return err;
1915
1916 recover:
1917         /*
1918          * ENOSPC, or some other error.  We may already have added some
1919          * blocks to the file, so we need to write these out to avoid
1920          * exposing stale data.
1921          * The folio is currently locked and not marked for writeback
1922          */
1923         bh = head;
1924         /* Recovery: lock and submit the mapped buffers */
1925         do {
1926                 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1927                     !buffer_delay(bh)) {
1928                         lock_buffer(bh);
1929                         mark_buffer_async_write_endio(bh,
1930                                 end_buffer_async_write);
1931                 } else {
1932                         /*
1933                          * The buffer may have been set dirty during
1934                          * attachment to a dirty folio.
1935                          */
1936                         clear_buffer_dirty(bh);
1937                 }
1938         } while ((bh = bh->b_this_page) != head);
1939         folio_set_error(folio);
1940         BUG_ON(folio_test_writeback(folio));
1941         mapping_set_error(folio->mapping, err);
1942         folio_start_writeback(folio);
1943         do {
1944                 struct buffer_head *next = bh->b_this_page;
1945                 if (buffer_async_write(bh)) {
1946                         clear_buffer_dirty(bh);
1947                         submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1948                         nr_underway++;
1949                 }
1950                 bh = next;
1951         } while (bh != head);
1952         folio_unlock(folio);
1953         goto done;
1954 }
1955 EXPORT_SYMBOL(__block_write_full_folio);
1956
1957 /*
1958  * If a folio has any new buffers, zero them out here, and mark them uptodate
1959  * and dirty so they'll be written out (in order to prevent uninitialised
1960  * block data from leaking). And clear the new bit.
1961  */
1962 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1963 {
1964         size_t block_start, block_end;
1965         struct buffer_head *head, *bh;
1966
1967         BUG_ON(!folio_test_locked(folio));
1968         head = folio_buffers(folio);
1969         if (!head)
1970                 return;
1971
1972         bh = head;
1973         block_start = 0;
1974         do {
1975                 block_end = block_start + bh->b_size;
1976
1977                 if (buffer_new(bh)) {
1978                         if (block_end > from && block_start < to) {
1979                                 if (!folio_test_uptodate(folio)) {
1980                                         size_t start, xend;
1981
1982                                         start = max(from, block_start);
1983                                         xend = min(to, block_end);
1984
1985                                         folio_zero_segment(folio, start, xend);
1986                                         set_buffer_uptodate(bh);
1987                                 }
1988
1989                                 clear_buffer_new(bh);
1990                                 mark_buffer_dirty(bh);
1991                         }
1992                 }
1993
1994                 block_start = block_end;
1995                 bh = bh->b_this_page;
1996         } while (bh != head);
1997 }
1998 EXPORT_SYMBOL(folio_zero_new_buffers);
1999
2000 static int
2001 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2002                 const struct iomap *iomap)
2003 {
2004         loff_t offset = (loff_t)block << inode->i_blkbits;
2005
2006         bh->b_bdev = iomap->bdev;
2007
2008         /*
2009          * Block points to offset in file we need to map, iomap contains
2010          * the offset at which the map starts. If the map ends before the
2011          * current block, then do not map the buffer and let the caller
2012          * handle it.
2013          */
2014         if (offset >= iomap->offset + iomap->length)
2015                 return -EIO;
2016
2017         switch (iomap->type) {
2018         case IOMAP_HOLE:
2019                 /*
2020                  * If the buffer is not up to date or beyond the current EOF,
2021                  * we need to mark it as new to ensure sub-block zeroing is
2022                  * executed if necessary.
2023                  */
2024                 if (!buffer_uptodate(bh) ||
2025                     (offset >= i_size_read(inode)))
2026                         set_buffer_new(bh);
2027                 return 0;
2028         case IOMAP_DELALLOC:
2029                 if (!buffer_uptodate(bh) ||
2030                     (offset >= i_size_read(inode)))
2031                         set_buffer_new(bh);
2032                 set_buffer_uptodate(bh);
2033                 set_buffer_mapped(bh);
2034                 set_buffer_delay(bh);
2035                 return 0;
2036         case IOMAP_UNWRITTEN:
2037                 /*
2038                  * For unwritten regions, we always need to ensure that regions
2039                  * in the block we are not writing to are zeroed. Mark the
2040                  * buffer as new to ensure this.
2041                  */
2042                 set_buffer_new(bh);
2043                 set_buffer_unwritten(bh);
2044                 fallthrough;
2045         case IOMAP_MAPPED:
2046                 if ((iomap->flags & IOMAP_F_NEW) ||
2047                     offset >= i_size_read(inode)) {
2048                         /*
2049                          * This can happen if truncating the block device races
2050                          * with the check in the caller as i_size updates on
2051                          * block devices aren't synchronized by i_rwsem for
2052                          * block devices.
2053                          */
2054                         if (S_ISBLK(inode->i_mode))
2055                                 return -EIO;
2056                         set_buffer_new(bh);
2057                 }
2058                 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2059                                 inode->i_blkbits;
2060                 set_buffer_mapped(bh);
2061                 return 0;
2062         default:
2063                 WARN_ON_ONCE(1);
2064                 return -EIO;
2065         }
2066 }
2067
2068 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2069                 get_block_t *get_block, const struct iomap *iomap)
2070 {
2071         size_t from = offset_in_folio(folio, pos);
2072         size_t to = from + len;
2073         struct inode *inode = folio->mapping->host;
2074         size_t block_start, block_end;
2075         sector_t block;
2076         int err = 0;
2077         size_t blocksize;
2078         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2079
2080         BUG_ON(!folio_test_locked(folio));
2081         BUG_ON(to > folio_size(folio));
2082         BUG_ON(from > to);
2083
2084         head = folio_create_buffers(folio, inode, 0);
2085         blocksize = head->b_size;
2086         block = div_u64(folio_pos(folio), blocksize);
2087
2088         for (bh = head, block_start = 0; bh != head || !block_start;
2089             block++, block_start=block_end, bh = bh->b_this_page) {
2090                 block_end = block_start + blocksize;
2091                 if (block_end <= from || block_start >= to) {
2092                         if (folio_test_uptodate(folio)) {
2093                                 if (!buffer_uptodate(bh))
2094                                         set_buffer_uptodate(bh);
2095                         }
2096                         continue;
2097                 }
2098                 if (buffer_new(bh))
2099                         clear_buffer_new(bh);
2100                 if (!buffer_mapped(bh)) {
2101                         WARN_ON(bh->b_size != blocksize);
2102                         if (get_block)
2103                                 err = get_block(inode, block, bh, 1);
2104                         else
2105                                 err = iomap_to_bh(inode, block, bh, iomap);
2106                         if (err)
2107                                 break;
2108
2109                         if (buffer_new(bh)) {
2110                                 clean_bdev_bh_alias(bh);
2111                                 if (folio_test_uptodate(folio)) {
2112                                         clear_buffer_new(bh);
2113                                         set_buffer_uptodate(bh);
2114                                         mark_buffer_dirty(bh);
2115                                         continue;
2116                                 }
2117                                 if (block_end > to || block_start < from)
2118                                         folio_zero_segments(folio,
2119                                                 to, block_end,
2120                                                 block_start, from);
2121                                 continue;
2122                         }
2123                 }
2124                 if (folio_test_uptodate(folio)) {
2125                         if (!buffer_uptodate(bh))
2126                                 set_buffer_uptodate(bh);
2127                         continue; 
2128                 }
2129                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2130                     !buffer_unwritten(bh) &&
2131                      (block_start < from || block_end > to)) {
2132                         bh_read_nowait(bh, 0);
2133                         *wait_bh++=bh;
2134                 }
2135         }
2136         /*
2137          * If we issued read requests - let them complete.
2138          */
2139         while(wait_bh > wait) {
2140                 wait_on_buffer(*--wait_bh);
2141                 if (!buffer_uptodate(*wait_bh))
2142                         err = -EIO;
2143         }
2144         if (unlikely(err))
2145                 folio_zero_new_buffers(folio, from, to);
2146         return err;
2147 }
2148
2149 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2150                 get_block_t *get_block)
2151 {
2152         return __block_write_begin_int(page_folio(page), pos, len, get_block,
2153                                        NULL);
2154 }
2155 EXPORT_SYMBOL(__block_write_begin);
2156
2157 static void __block_commit_write(struct folio *folio, size_t from, size_t to)
2158 {
2159         size_t block_start, block_end;
2160         bool partial = false;
2161         unsigned blocksize;
2162         struct buffer_head *bh, *head;
2163
2164         bh = head = folio_buffers(folio);
2165         blocksize = bh->b_size;
2166
2167         block_start = 0;
2168         do {
2169                 block_end = block_start + blocksize;
2170                 if (block_end <= from || block_start >= to) {
2171                         if (!buffer_uptodate(bh))
2172                                 partial = true;
2173                 } else {
2174                         set_buffer_uptodate(bh);
2175                         mark_buffer_dirty(bh);
2176                 }
2177                 if (buffer_new(bh))
2178                         clear_buffer_new(bh);
2179
2180                 block_start = block_end;
2181                 bh = bh->b_this_page;
2182         } while (bh != head);
2183
2184         /*
2185          * If this is a partial write which happened to make all buffers
2186          * uptodate then we can optimize away a bogus read_folio() for
2187          * the next read(). Here we 'discover' whether the folio went
2188          * uptodate as a result of this (potentially partial) write.
2189          */
2190         if (!partial)
2191                 folio_mark_uptodate(folio);
2192 }
2193
2194 /*
2195  * block_write_begin takes care of the basic task of block allocation and
2196  * bringing partial write blocks uptodate first.
2197  *
2198  * The filesystem needs to handle block truncation upon failure.
2199  */
2200 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2201                 struct page **pagep, get_block_t *get_block)
2202 {
2203         pgoff_t index = pos >> PAGE_SHIFT;
2204         struct page *page;
2205         int status;
2206
2207         page = grab_cache_page_write_begin(mapping, index);
2208         if (!page)
2209                 return -ENOMEM;
2210
2211         status = __block_write_begin(page, pos, len, get_block);
2212         if (unlikely(status)) {
2213                 unlock_page(page);
2214                 put_page(page);
2215                 page = NULL;
2216         }
2217
2218         *pagep = page;
2219         return status;
2220 }
2221 EXPORT_SYMBOL(block_write_begin);
2222
2223 int block_write_end(struct file *file, struct address_space *mapping,
2224                         loff_t pos, unsigned len, unsigned copied,
2225                         struct page *page, void *fsdata)
2226 {
2227         struct folio *folio = page_folio(page);
2228         size_t start = pos - folio_pos(folio);
2229
2230         if (unlikely(copied < len)) {
2231                 /*
2232                  * The buffers that were written will now be uptodate, so
2233                  * we don't have to worry about a read_folio reading them
2234                  * and overwriting a partial write. However if we have
2235                  * encountered a short write and only partially written
2236                  * into a buffer, it will not be marked uptodate, so a
2237                  * read_folio might come in and destroy our partial write.
2238                  *
2239                  * Do the simplest thing, and just treat any short write to a
2240                  * non uptodate folio as a zero-length write, and force the
2241                  * caller to redo the whole thing.
2242                  */
2243                 if (!folio_test_uptodate(folio))
2244                         copied = 0;
2245
2246                 folio_zero_new_buffers(folio, start+copied, start+len);
2247         }
2248         flush_dcache_folio(folio);
2249
2250         /* This could be a short (even 0-length) commit */
2251         __block_commit_write(folio, start, start + copied);
2252
2253         return copied;
2254 }
2255 EXPORT_SYMBOL(block_write_end);
2256
2257 int generic_write_end(struct file *file, struct address_space *mapping,
2258                         loff_t pos, unsigned len, unsigned copied,
2259                         struct page *page, void *fsdata)
2260 {
2261         struct inode *inode = mapping->host;
2262         loff_t old_size = inode->i_size;
2263         bool i_size_changed = false;
2264
2265         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2266
2267         /*
2268          * No need to use i_size_read() here, the i_size cannot change under us
2269          * because we hold i_rwsem.
2270          *
2271          * But it's important to update i_size while still holding page lock:
2272          * page writeout could otherwise come in and zero beyond i_size.
2273          */
2274         if (pos + copied > inode->i_size) {
2275                 i_size_write(inode, pos + copied);
2276                 i_size_changed = true;
2277         }
2278
2279         unlock_page(page);
2280         put_page(page);
2281
2282         if (old_size < pos)
2283                 pagecache_isize_extended(inode, old_size, pos);
2284         /*
2285          * Don't mark the inode dirty under page lock. First, it unnecessarily
2286          * makes the holding time of page lock longer. Second, it forces lock
2287          * ordering of page lock and transaction start for journaling
2288          * filesystems.
2289          */
2290         if (i_size_changed)
2291                 mark_inode_dirty(inode);
2292         return copied;
2293 }
2294 EXPORT_SYMBOL(generic_write_end);
2295
2296 /*
2297  * block_is_partially_uptodate checks whether buffers within a folio are
2298  * uptodate or not.
2299  *
2300  * Returns true if all buffers which correspond to the specified part
2301  * of the folio are uptodate.
2302  */
2303 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2304 {
2305         unsigned block_start, block_end, blocksize;
2306         unsigned to;
2307         struct buffer_head *bh, *head;
2308         bool ret = true;
2309
2310         head = folio_buffers(folio);
2311         if (!head)
2312                 return false;
2313         blocksize = head->b_size;
2314         to = min_t(unsigned, folio_size(folio) - from, count);
2315         to = from + to;
2316         if (from < blocksize && to > folio_size(folio) - blocksize)
2317                 return false;
2318
2319         bh = head;
2320         block_start = 0;
2321         do {
2322                 block_end = block_start + blocksize;
2323                 if (block_end > from && block_start < to) {
2324                         if (!buffer_uptodate(bh)) {
2325                                 ret = false;
2326                                 break;
2327                         }
2328                         if (block_end >= to)
2329                                 break;
2330                 }
2331                 block_start = block_end;
2332                 bh = bh->b_this_page;
2333         } while (bh != head);
2334
2335         return ret;
2336 }
2337 EXPORT_SYMBOL(block_is_partially_uptodate);
2338
2339 /*
2340  * Generic "read_folio" function for block devices that have the normal
2341  * get_block functionality. This is most of the block device filesystems.
2342  * Reads the folio asynchronously --- the unlock_buffer() and
2343  * set/clear_buffer_uptodate() functions propagate buffer state into the
2344  * folio once IO has completed.
2345  */
2346 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2347 {
2348         struct inode *inode = folio->mapping->host;
2349         sector_t iblock, lblock;
2350         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2351         size_t blocksize;
2352         int nr, i;
2353         int fully_mapped = 1;
2354         bool page_error = false;
2355         loff_t limit = i_size_read(inode);
2356
2357         /* This is needed for ext4. */
2358         if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2359                 limit = inode->i_sb->s_maxbytes;
2360
2361         VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2362
2363         head = folio_create_buffers(folio, inode, 0);
2364         blocksize = head->b_size;
2365
2366         iblock = div_u64(folio_pos(folio), blocksize);
2367         lblock = div_u64(limit + blocksize - 1, blocksize);
2368         bh = head;
2369         nr = 0;
2370         i = 0;
2371
2372         do {
2373                 if (buffer_uptodate(bh))
2374                         continue;
2375
2376                 if (!buffer_mapped(bh)) {
2377                         int err = 0;
2378
2379                         fully_mapped = 0;
2380                         if (iblock < lblock) {
2381                                 WARN_ON(bh->b_size != blocksize);
2382                                 err = get_block(inode, iblock, bh, 0);
2383                                 if (err) {
2384                                         folio_set_error(folio);
2385                                         page_error = true;
2386                                 }
2387                         }
2388                         if (!buffer_mapped(bh)) {
2389                                 folio_zero_range(folio, i * blocksize,
2390                                                 blocksize);
2391                                 if (!err)
2392                                         set_buffer_uptodate(bh);
2393                                 continue;
2394                         }
2395                         /*
2396                          * get_block() might have updated the buffer
2397                          * synchronously
2398                          */
2399                         if (buffer_uptodate(bh))
2400                                 continue;
2401                 }
2402                 arr[nr++] = bh;
2403         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2404
2405         if (fully_mapped)
2406                 folio_set_mappedtodisk(folio);
2407
2408         if (!nr) {
2409                 /*
2410                  * All buffers are uptodate or get_block() returned an
2411                  * error when trying to map them - we can finish the read.
2412                  */
2413                 folio_end_read(folio, !page_error);
2414                 return 0;
2415         }
2416
2417         /* Stage two: lock the buffers */
2418         for (i = 0; i < nr; i++) {
2419                 bh = arr[i];
2420                 lock_buffer(bh);
2421                 mark_buffer_async_read(bh);
2422         }
2423
2424         /*
2425          * Stage 3: start the IO.  Check for uptodateness
2426          * inside the buffer lock in case another process reading
2427          * the underlying blockdev brought it uptodate (the sct fix).
2428          */
2429         for (i = 0; i < nr; i++) {
2430                 bh = arr[i];
2431                 if (buffer_uptodate(bh))
2432                         end_buffer_async_read(bh, 1);
2433                 else
2434                         submit_bh(REQ_OP_READ, bh);
2435         }
2436         return 0;
2437 }
2438 EXPORT_SYMBOL(block_read_full_folio);
2439
2440 /* utility function for filesystems that need to do work on expanding
2441  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2442  * deal with the hole.  
2443  */
2444 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2445 {
2446         struct address_space *mapping = inode->i_mapping;
2447         const struct address_space_operations *aops = mapping->a_ops;
2448         struct page *page;
2449         void *fsdata = NULL;
2450         int err;
2451
2452         err = inode_newsize_ok(inode, size);
2453         if (err)
2454                 goto out;
2455
2456         err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
2457         if (err)
2458                 goto out;
2459
2460         err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
2461         BUG_ON(err > 0);
2462
2463 out:
2464         return err;
2465 }
2466 EXPORT_SYMBOL(generic_cont_expand_simple);
2467
2468 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2469                             loff_t pos, loff_t *bytes)
2470 {
2471         struct inode *inode = mapping->host;
2472         const struct address_space_operations *aops = mapping->a_ops;
2473         unsigned int blocksize = i_blocksize(inode);
2474         struct page *page;
2475         void *fsdata = NULL;
2476         pgoff_t index, curidx;
2477         loff_t curpos;
2478         unsigned zerofrom, offset, len;
2479         int err = 0;
2480
2481         index = pos >> PAGE_SHIFT;
2482         offset = pos & ~PAGE_MASK;
2483
2484         while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2485                 zerofrom = curpos & ~PAGE_MASK;
2486                 if (zerofrom & (blocksize-1)) {
2487                         *bytes |= (blocksize-1);
2488                         (*bytes)++;
2489                 }
2490                 len = PAGE_SIZE - zerofrom;
2491
2492                 err = aops->write_begin(file, mapping, curpos, len,
2493                                             &page, &fsdata);
2494                 if (err)
2495                         goto out;
2496                 zero_user(page, zerofrom, len);
2497                 err = aops->write_end(file, mapping, curpos, len, len,
2498                                                 page, fsdata);
2499                 if (err < 0)
2500                         goto out;
2501                 BUG_ON(err != len);
2502                 err = 0;
2503
2504                 balance_dirty_pages_ratelimited(mapping);
2505
2506                 if (fatal_signal_pending(current)) {
2507                         err = -EINTR;
2508                         goto out;
2509                 }
2510         }
2511
2512         /* page covers the boundary, find the boundary offset */
2513         if (index == curidx) {
2514                 zerofrom = curpos & ~PAGE_MASK;
2515                 /* if we will expand the thing last block will be filled */
2516                 if (offset <= zerofrom) {
2517                         goto out;
2518                 }
2519                 if (zerofrom & (blocksize-1)) {
2520                         *bytes |= (blocksize-1);
2521                         (*bytes)++;
2522                 }
2523                 len = offset - zerofrom;
2524
2525                 err = aops->write_begin(file, mapping, curpos, len,
2526                                             &page, &fsdata);
2527                 if (err)
2528                         goto out;
2529                 zero_user(page, zerofrom, len);
2530                 err = aops->write_end(file, mapping, curpos, len, len,
2531                                                 page, fsdata);
2532                 if (err < 0)
2533                         goto out;
2534                 BUG_ON(err != len);
2535                 err = 0;
2536         }
2537 out:
2538         return err;
2539 }
2540
2541 /*
2542  * For moronic filesystems that do not allow holes in file.
2543  * We may have to extend the file.
2544  */
2545 int cont_write_begin(struct file *file, struct address_space *mapping,
2546                         loff_t pos, unsigned len,
2547                         struct page **pagep, void **fsdata,
2548                         get_block_t *get_block, loff_t *bytes)
2549 {
2550         struct inode *inode = mapping->host;
2551         unsigned int blocksize = i_blocksize(inode);
2552         unsigned int zerofrom;
2553         int err;
2554
2555         err = cont_expand_zero(file, mapping, pos, bytes);
2556         if (err)
2557                 return err;
2558
2559         zerofrom = *bytes & ~PAGE_MASK;
2560         if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2561                 *bytes |= (blocksize-1);
2562                 (*bytes)++;
2563         }
2564
2565         return block_write_begin(mapping, pos, len, pagep, get_block);
2566 }
2567 EXPORT_SYMBOL(cont_write_begin);
2568
2569 void block_commit_write(struct page *page, unsigned from, unsigned to)
2570 {
2571         struct folio *folio = page_folio(page);
2572         __block_commit_write(folio, from, to);
2573 }
2574 EXPORT_SYMBOL(block_commit_write);
2575
2576 /*
2577  * block_page_mkwrite() is not allowed to change the file size as it gets
2578  * called from a page fault handler when a page is first dirtied. Hence we must
2579  * be careful to check for EOF conditions here. We set the page up correctly
2580  * for a written page which means we get ENOSPC checking when writing into
2581  * holes and correct delalloc and unwritten extent mapping on filesystems that
2582  * support these features.
2583  *
2584  * We are not allowed to take the i_mutex here so we have to play games to
2585  * protect against truncate races as the page could now be beyond EOF.  Because
2586  * truncate writes the inode size before removing pages, once we have the
2587  * page lock we can determine safely if the page is beyond EOF. If it is not
2588  * beyond EOF, then the page is guaranteed safe against truncation until we
2589  * unlock the page.
2590  *
2591  * Direct callers of this function should protect against filesystem freezing
2592  * using sb_start_pagefault() - sb_end_pagefault() functions.
2593  */
2594 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2595                          get_block_t get_block)
2596 {
2597         struct folio *folio = page_folio(vmf->page);
2598         struct inode *inode = file_inode(vma->vm_file);
2599         unsigned long end;
2600         loff_t size;
2601         int ret;
2602
2603         folio_lock(folio);
2604         size = i_size_read(inode);
2605         if ((folio->mapping != inode->i_mapping) ||
2606             (folio_pos(folio) >= size)) {
2607                 /* We overload EFAULT to mean page got truncated */
2608                 ret = -EFAULT;
2609                 goto out_unlock;
2610         }
2611
2612         end = folio_size(folio);
2613         /* folio is wholly or partially inside EOF */
2614         if (folio_pos(folio) + end > size)
2615                 end = size - folio_pos(folio);
2616
2617         ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2618         if (unlikely(ret))
2619                 goto out_unlock;
2620
2621         __block_commit_write(folio, 0, end);
2622
2623         folio_mark_dirty(folio);
2624         folio_wait_stable(folio);
2625         return 0;
2626 out_unlock:
2627         folio_unlock(folio);
2628         return ret;
2629 }
2630 EXPORT_SYMBOL(block_page_mkwrite);
2631
2632 int block_truncate_page(struct address_space *mapping,
2633                         loff_t from, get_block_t *get_block)
2634 {
2635         pgoff_t index = from >> PAGE_SHIFT;
2636         unsigned blocksize;
2637         sector_t iblock;
2638         size_t offset, length, pos;
2639         struct inode *inode = mapping->host;
2640         struct folio *folio;
2641         struct buffer_head *bh;
2642         int err = 0;
2643
2644         blocksize = i_blocksize(inode);
2645         length = from & (blocksize - 1);
2646
2647         /* Block boundary? Nothing to do */
2648         if (!length)
2649                 return 0;
2650
2651         length = blocksize - length;
2652         iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
2653
2654         folio = filemap_grab_folio(mapping, index);
2655         if (IS_ERR(folio))
2656                 return PTR_ERR(folio);
2657
2658         bh = folio_buffers(folio);
2659         if (!bh)
2660                 bh = create_empty_buffers(folio, blocksize, 0);
2661
2662         /* Find the buffer that contains "offset" */
2663         offset = offset_in_folio(folio, from);
2664         pos = blocksize;
2665         while (offset >= pos) {
2666                 bh = bh->b_this_page;
2667                 iblock++;
2668                 pos += blocksize;
2669         }
2670
2671         if (!buffer_mapped(bh)) {
2672                 WARN_ON(bh->b_size != blocksize);
2673                 err = get_block(inode, iblock, bh, 0);
2674                 if (err)
2675                         goto unlock;
2676                 /* unmapped? It's a hole - nothing to do */
2677                 if (!buffer_mapped(bh))
2678                         goto unlock;
2679         }
2680
2681         /* Ok, it's mapped. Make sure it's up-to-date */
2682         if (folio_test_uptodate(folio))
2683                 set_buffer_uptodate(bh);
2684
2685         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2686                 err = bh_read(bh, 0);
2687                 /* Uhhuh. Read error. Complain and punt. */
2688                 if (err < 0)
2689                         goto unlock;
2690         }
2691
2692         folio_zero_range(folio, offset, length);
2693         mark_buffer_dirty(bh);
2694
2695 unlock:
2696         folio_unlock(folio);
2697         folio_put(folio);
2698
2699         return err;
2700 }
2701 EXPORT_SYMBOL(block_truncate_page);
2702
2703 /*
2704  * The generic ->writepage function for buffer-backed address_spaces
2705  */
2706 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
2707                 void *get_block)
2708 {
2709         struct inode * const inode = folio->mapping->host;
2710         loff_t i_size = i_size_read(inode);
2711
2712         /* Is the folio fully inside i_size? */
2713         if (folio_pos(folio) + folio_size(folio) <= i_size)
2714                 return __block_write_full_folio(inode, folio, get_block, wbc);
2715
2716         /* Is the folio fully outside i_size? (truncate in progress) */
2717         if (folio_pos(folio) >= i_size) {
2718                 folio_unlock(folio);
2719                 return 0; /* don't care */
2720         }
2721
2722         /*
2723          * The folio straddles i_size.  It must be zeroed out on each and every
2724          * writepage invocation because it may be mmapped.  "A file is mapped
2725          * in multiples of the page size.  For a file that is not a multiple of
2726          * the page size, the remaining memory is zeroed when mapped, and
2727          * writes to that region are not written out to the file."
2728          */
2729         folio_zero_segment(folio, offset_in_folio(folio, i_size),
2730                         folio_size(folio));
2731         return __block_write_full_folio(inode, folio, get_block, wbc);
2732 }
2733
2734 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2735                             get_block_t *get_block)
2736 {
2737         struct inode *inode = mapping->host;
2738         struct buffer_head tmp = {
2739                 .b_size = i_blocksize(inode),
2740         };
2741
2742         get_block(inode, block, &tmp, 0);
2743         return tmp.b_blocknr;
2744 }
2745 EXPORT_SYMBOL(generic_block_bmap);
2746
2747 static void end_bio_bh_io_sync(struct bio *bio)
2748 {
2749         struct buffer_head *bh = bio->bi_private;
2750
2751         if (unlikely(bio_flagged(bio, BIO_QUIET)))
2752                 set_bit(BH_Quiet, &bh->b_state);
2753
2754         bh->b_end_io(bh, !bio->bi_status);
2755         bio_put(bio);
2756 }
2757
2758 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2759                           struct writeback_control *wbc)
2760 {
2761         const enum req_op op = opf & REQ_OP_MASK;
2762         struct bio *bio;
2763
2764         BUG_ON(!buffer_locked(bh));
2765         BUG_ON(!buffer_mapped(bh));
2766         BUG_ON(!bh->b_end_io);
2767         BUG_ON(buffer_delay(bh));
2768         BUG_ON(buffer_unwritten(bh));
2769
2770         /*
2771          * Only clear out a write error when rewriting
2772          */
2773         if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2774                 clear_buffer_write_io_error(bh);
2775
2776         if (buffer_meta(bh))
2777                 opf |= REQ_META;
2778         if (buffer_prio(bh))
2779                 opf |= REQ_PRIO;
2780
2781         bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2782
2783         fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2784
2785         bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2786
2787         __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
2788
2789         bio->bi_end_io = end_bio_bh_io_sync;
2790         bio->bi_private = bh;
2791
2792         /* Take care of bh's that straddle the end of the device */
2793         guard_bio_eod(bio);
2794
2795         if (wbc) {
2796                 wbc_init_bio(wbc, bio);
2797                 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2798         }
2799
2800         submit_bio(bio);
2801 }
2802
2803 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2804 {
2805         submit_bh_wbc(opf, bh, NULL);
2806 }
2807 EXPORT_SYMBOL(submit_bh);
2808
2809 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2810 {
2811         lock_buffer(bh);
2812         if (!test_clear_buffer_dirty(bh)) {
2813                 unlock_buffer(bh);
2814                 return;
2815         }
2816         bh->b_end_io = end_buffer_write_sync;
2817         get_bh(bh);
2818         submit_bh(REQ_OP_WRITE | op_flags, bh);
2819 }
2820 EXPORT_SYMBOL(write_dirty_buffer);
2821
2822 /*
2823  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2824  * and then start new I/O and then wait upon it.  The caller must have a ref on
2825  * the buffer_head.
2826  */
2827 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2828 {
2829         WARN_ON(atomic_read(&bh->b_count) < 1);
2830         lock_buffer(bh);
2831         if (test_clear_buffer_dirty(bh)) {
2832                 /*
2833                  * The bh should be mapped, but it might not be if the
2834                  * device was hot-removed. Not much we can do but fail the I/O.
2835                  */
2836                 if (!buffer_mapped(bh)) {
2837                         unlock_buffer(bh);
2838                         return -EIO;
2839                 }
2840
2841                 get_bh(bh);
2842                 bh->b_end_io = end_buffer_write_sync;
2843                 submit_bh(REQ_OP_WRITE | op_flags, bh);
2844                 wait_on_buffer(bh);
2845                 if (!buffer_uptodate(bh))
2846                         return -EIO;
2847         } else {
2848                 unlock_buffer(bh);
2849         }
2850         return 0;
2851 }
2852 EXPORT_SYMBOL(__sync_dirty_buffer);
2853
2854 int sync_dirty_buffer(struct buffer_head *bh)
2855 {
2856         return __sync_dirty_buffer(bh, REQ_SYNC);
2857 }
2858 EXPORT_SYMBOL(sync_dirty_buffer);
2859
2860 /*
2861  * try_to_free_buffers() checks if all the buffers on this particular folio
2862  * are unused, and releases them if so.
2863  *
2864  * Exclusion against try_to_free_buffers may be obtained by either
2865  * locking the folio or by holding its mapping's i_private_lock.
2866  *
2867  * If the folio is dirty but all the buffers are clean then we need to
2868  * be sure to mark the folio clean as well.  This is because the folio
2869  * may be against a block device, and a later reattachment of buffers
2870  * to a dirty folio will set *all* buffers dirty.  Which would corrupt
2871  * filesystem data on the same device.
2872  *
2873  * The same applies to regular filesystem folios: if all the buffers are
2874  * clean then we set the folio clean and proceed.  To do that, we require
2875  * total exclusion from block_dirty_folio().  That is obtained with
2876  * i_private_lock.
2877  *
2878  * try_to_free_buffers() is non-blocking.
2879  */
2880 static inline int buffer_busy(struct buffer_head *bh)
2881 {
2882         return atomic_read(&bh->b_count) |
2883                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2884 }
2885
2886 static bool
2887 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2888 {
2889         struct buffer_head *head = folio_buffers(folio);
2890         struct buffer_head *bh;
2891
2892         bh = head;
2893         do {
2894                 if (buffer_busy(bh))
2895                         goto failed;
2896                 bh = bh->b_this_page;
2897         } while (bh != head);
2898
2899         do {
2900                 struct buffer_head *next = bh->b_this_page;
2901
2902                 if (bh->b_assoc_map)
2903                         __remove_assoc_queue(bh);
2904                 bh = next;
2905         } while (bh != head);
2906         *buffers_to_free = head;
2907         folio_detach_private(folio);
2908         return true;
2909 failed:
2910         return false;
2911 }
2912
2913 bool try_to_free_buffers(struct folio *folio)
2914 {
2915         struct address_space * const mapping = folio->mapping;
2916         struct buffer_head *buffers_to_free = NULL;
2917         bool ret = 0;
2918
2919         BUG_ON(!folio_test_locked(folio));
2920         if (folio_test_writeback(folio))
2921                 return false;
2922
2923         if (mapping == NULL) {          /* can this still happen? */
2924                 ret = drop_buffers(folio, &buffers_to_free);
2925                 goto out;
2926         }
2927
2928         spin_lock(&mapping->i_private_lock);
2929         ret = drop_buffers(folio, &buffers_to_free);
2930
2931         /*
2932          * If the filesystem writes its buffers by hand (eg ext3)
2933          * then we can have clean buffers against a dirty folio.  We
2934          * clean the folio here; otherwise the VM will never notice
2935          * that the filesystem did any IO at all.
2936          *
2937          * Also, during truncate, discard_buffer will have marked all
2938          * the folio's buffers clean.  We discover that here and clean
2939          * the folio also.
2940          *
2941          * i_private_lock must be held over this entire operation in order
2942          * to synchronise against block_dirty_folio and prevent the
2943          * dirty bit from being lost.
2944          */
2945         if (ret)
2946                 folio_cancel_dirty(folio);
2947         spin_unlock(&mapping->i_private_lock);
2948 out:
2949         if (buffers_to_free) {
2950                 struct buffer_head *bh = buffers_to_free;
2951
2952                 do {
2953                         struct buffer_head *next = bh->b_this_page;
2954                         free_buffer_head(bh);
2955                         bh = next;
2956                 } while (bh != buffers_to_free);
2957         }
2958         return ret;
2959 }
2960 EXPORT_SYMBOL(try_to_free_buffers);
2961
2962 /*
2963  * Buffer-head allocation
2964  */
2965 static struct kmem_cache *bh_cachep __ro_after_init;
2966
2967 /*
2968  * Once the number of bh's in the machine exceeds this level, we start
2969  * stripping them in writeback.
2970  */
2971 static unsigned long max_buffer_heads __ro_after_init;
2972
2973 int buffer_heads_over_limit;
2974
2975 struct bh_accounting {
2976         int nr;                 /* Number of live bh's */
2977         int ratelimit;          /* Limit cacheline bouncing */
2978 };
2979
2980 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2981
2982 static void recalc_bh_state(void)
2983 {
2984         int i;
2985         int tot = 0;
2986
2987         if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
2988                 return;
2989         __this_cpu_write(bh_accounting.ratelimit, 0);
2990         for_each_online_cpu(i)
2991                 tot += per_cpu(bh_accounting, i).nr;
2992         buffer_heads_over_limit = (tot > max_buffer_heads);
2993 }
2994
2995 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
2996 {
2997         struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
2998         if (ret) {
2999                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3000                 spin_lock_init(&ret->b_uptodate_lock);
3001                 preempt_disable();
3002                 __this_cpu_inc(bh_accounting.nr);
3003                 recalc_bh_state();
3004                 preempt_enable();
3005         }
3006         return ret;
3007 }
3008 EXPORT_SYMBOL(alloc_buffer_head);
3009
3010 void free_buffer_head(struct buffer_head *bh)
3011 {
3012         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3013         kmem_cache_free(bh_cachep, bh);
3014         preempt_disable();
3015         __this_cpu_dec(bh_accounting.nr);
3016         recalc_bh_state();
3017         preempt_enable();
3018 }
3019 EXPORT_SYMBOL(free_buffer_head);
3020
3021 static int buffer_exit_cpu_dead(unsigned int cpu)
3022 {
3023         int i;
3024         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3025
3026         for (i = 0; i < BH_LRU_SIZE; i++) {
3027                 brelse(b->bhs[i]);
3028                 b->bhs[i] = NULL;
3029         }
3030         this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3031         per_cpu(bh_accounting, cpu).nr = 0;
3032         return 0;
3033 }
3034
3035 /**
3036  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3037  * @bh: struct buffer_head
3038  *
3039  * Return true if the buffer is up-to-date and false,
3040  * with the buffer locked, if not.
3041  */
3042 int bh_uptodate_or_lock(struct buffer_head *bh)
3043 {
3044         if (!buffer_uptodate(bh)) {
3045                 lock_buffer(bh);
3046                 if (!buffer_uptodate(bh))
3047                         return 0;
3048                 unlock_buffer(bh);
3049         }
3050         return 1;
3051 }
3052 EXPORT_SYMBOL(bh_uptodate_or_lock);
3053
3054 /**
3055  * __bh_read - Submit read for a locked buffer
3056  * @bh: struct buffer_head
3057  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3058  * @wait: wait until reading finish
3059  *
3060  * Returns zero on success or don't wait, and -EIO on error.
3061  */
3062 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3063 {
3064         int ret = 0;
3065
3066         BUG_ON(!buffer_locked(bh));
3067
3068         get_bh(bh);
3069         bh->b_end_io = end_buffer_read_sync;
3070         submit_bh(REQ_OP_READ | op_flags, bh);
3071         if (wait) {
3072                 wait_on_buffer(bh);
3073                 if (!buffer_uptodate(bh))
3074                         ret = -EIO;
3075         }
3076         return ret;
3077 }
3078 EXPORT_SYMBOL(__bh_read);
3079
3080 /**
3081  * __bh_read_batch - Submit read for a batch of unlocked buffers
3082  * @nr: entry number of the buffer batch
3083  * @bhs: a batch of struct buffer_head
3084  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3085  * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3086  *              buffer that cannot lock.
3087  *
3088  * Returns zero on success or don't wait, and -EIO on error.
3089  */
3090 void __bh_read_batch(int nr, struct buffer_head *bhs[],
3091                      blk_opf_t op_flags, bool force_lock)
3092 {
3093         int i;
3094
3095         for (i = 0; i < nr; i++) {
3096                 struct buffer_head *bh = bhs[i];
3097
3098                 if (buffer_uptodate(bh))
3099                         continue;
3100
3101                 if (force_lock)
3102                         lock_buffer(bh);
3103                 else
3104                         if (!trylock_buffer(bh))
3105                                 continue;
3106
3107                 if (buffer_uptodate(bh)) {
3108                         unlock_buffer(bh);
3109                         continue;
3110                 }
3111
3112                 bh->b_end_io = end_buffer_read_sync;
3113                 get_bh(bh);
3114                 submit_bh(REQ_OP_READ | op_flags, bh);
3115         }
3116 }
3117 EXPORT_SYMBOL(__bh_read_batch);
3118
3119 void __init buffer_init(void)
3120 {
3121         unsigned long nrpages;
3122         int ret;
3123
3124         bh_cachep = kmem_cache_create("buffer_head",
3125                         sizeof(struct buffer_head), 0,
3126                                 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3127                                 SLAB_MEM_SPREAD),
3128                                 NULL);
3129
3130         /*
3131          * Limit the bh occupancy to 10% of ZONE_NORMAL
3132          */
3133         nrpages = (nr_free_buffer_pages() * 10) / 100;
3134         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3135         ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3136                                         NULL, buffer_exit_cpu_dead);
3137         WARN_ON(ret < 0);
3138 }