Merge tag 'dma-mapping-6.9-2024-03-24' of git://git.infradead.org/users/hch/dma-mapping
[sfrench/cifs-2.6.git] / mm / huge_memory.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2009  Red Hat, Inc.
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/coredump.h>
12 #include <linux/sched/numa_balancing.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/backing-dev.h>
22 #include <linux/dax.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/pfn_t.h>
26 #include <linux/mman.h>
27 #include <linux/memremap.h>
28 #include <linux/pagemap.h>
29 #include <linux/debugfs.h>
30 #include <linux/migrate.h>
31 #include <linux/hashtable.h>
32 #include <linux/userfaultfd_k.h>
33 #include <linux/page_idle.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/oom.h>
36 #include <linux/numa.h>
37 #include <linux/page_owner.h>
38 #include <linux/sched/sysctl.h>
39 #include <linux/memory-tiers.h>
40 #include <linux/compat.h>
41
42 #include <asm/tlb.h>
43 #include <asm/pgalloc.h>
44 #include "internal.h"
45 #include "swap.h"
46
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/thp.h>
49
50 /*
51  * By default, transparent hugepage support is disabled in order to avoid
52  * risking an increased memory footprint for applications that are not
53  * guaranteed to benefit from it. When transparent hugepage support is
54  * enabled, it is for all mappings, and khugepaged scans all mappings.
55  * Defrag is invoked by khugepaged hugepage allocations and by page faults
56  * for all hugepage allocations.
57  */
58 unsigned long transparent_hugepage_flags __read_mostly =
59 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
60         (1<<TRANSPARENT_HUGEPAGE_FLAG)|
61 #endif
62 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
63         (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
64 #endif
65         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
66         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
67         (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
68
69 static struct shrinker *deferred_split_shrinker;
70 static unsigned long deferred_split_count(struct shrinker *shrink,
71                                           struct shrink_control *sc);
72 static unsigned long deferred_split_scan(struct shrinker *shrink,
73                                          struct shrink_control *sc);
74
75 static atomic_t huge_zero_refcount;
76 struct page *huge_zero_page __read_mostly;
77 unsigned long huge_zero_pfn __read_mostly = ~0UL;
78 unsigned long huge_anon_orders_always __read_mostly;
79 unsigned long huge_anon_orders_madvise __read_mostly;
80 unsigned long huge_anon_orders_inherit __read_mostly;
81
82 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
83                                          unsigned long vm_flags, bool smaps,
84                                          bool in_pf, bool enforce_sysfs,
85                                          unsigned long orders)
86 {
87         /* Check the intersection of requested and supported orders. */
88         orders &= vma_is_anonymous(vma) ?
89                         THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
90         if (!orders)
91                 return 0;
92
93         if (!vma->vm_mm)                /* vdso */
94                 return 0;
95
96         /*
97          * Explicitly disabled through madvise or prctl, or some
98          * architectures may disable THP for some mappings, for
99          * example, s390 kvm.
100          * */
101         if ((vm_flags & VM_NOHUGEPAGE) ||
102             test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
103                 return 0;
104         /*
105          * If the hardware/firmware marked hugepage support disabled.
106          */
107         if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
108                 return 0;
109
110         /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
111         if (vma_is_dax(vma))
112                 return in_pf ? orders : 0;
113
114         /*
115          * khugepaged special VMA and hugetlb VMA.
116          * Must be checked after dax since some dax mappings may have
117          * VM_MIXEDMAP set.
118          */
119         if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
120                 return 0;
121
122         /*
123          * Check alignment for file vma and size for both file and anon vma by
124          * filtering out the unsuitable orders.
125          *
126          * Skip the check for page fault. Huge fault does the check in fault
127          * handlers.
128          */
129         if (!in_pf) {
130                 int order = highest_order(orders);
131                 unsigned long addr;
132
133                 while (orders) {
134                         addr = vma->vm_end - (PAGE_SIZE << order);
135                         if (thp_vma_suitable_order(vma, addr, order))
136                                 break;
137                         order = next_order(&orders, order);
138                 }
139
140                 if (!orders)
141                         return 0;
142         }
143
144         /*
145          * Enabled via shmem mount options or sysfs settings.
146          * Must be done before hugepage flags check since shmem has its
147          * own flags.
148          */
149         if (!in_pf && shmem_file(vma->vm_file))
150                 return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
151                                      !enforce_sysfs, vma->vm_mm, vm_flags)
152                         ? orders : 0;
153
154         if (!vma_is_anonymous(vma)) {
155                 /*
156                  * Enforce sysfs THP requirements as necessary. Anonymous vmas
157                  * were already handled in thp_vma_allowable_orders().
158                  */
159                 if (enforce_sysfs &&
160                     (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
161                                                     !hugepage_global_always())))
162                         return 0;
163
164                 /*
165                  * Trust that ->huge_fault() handlers know what they are doing
166                  * in fault path.
167                  */
168                 if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
169                         return orders;
170                 /* Only regular file is valid in collapse path */
171                 if (((!in_pf || smaps)) && file_thp_enabled(vma))
172                         return orders;
173                 return 0;
174         }
175
176         if (vma_is_temporary_stack(vma))
177                 return 0;
178
179         /*
180          * THPeligible bit of smaps should show 1 for proper VMAs even
181          * though anon_vma is not initialized yet.
182          *
183          * Allow page fault since anon_vma may be not initialized until
184          * the first page fault.
185          */
186         if (!vma->anon_vma)
187                 return (smaps || in_pf) ? orders : 0;
188
189         return orders;
190 }
191
192 static bool get_huge_zero_page(void)
193 {
194         struct page *zero_page;
195 retry:
196         if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
197                 return true;
198
199         zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
200                         HPAGE_PMD_ORDER);
201         if (!zero_page) {
202                 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
203                 return false;
204         }
205         preempt_disable();
206         if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
207                 preempt_enable();
208                 __free_pages(zero_page, compound_order(zero_page));
209                 goto retry;
210         }
211         WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
212
213         /* We take additional reference here. It will be put back by shrinker */
214         atomic_set(&huge_zero_refcount, 2);
215         preempt_enable();
216         count_vm_event(THP_ZERO_PAGE_ALLOC);
217         return true;
218 }
219
220 static void put_huge_zero_page(void)
221 {
222         /*
223          * Counter should never go to zero here. Only shrinker can put
224          * last reference.
225          */
226         BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
227 }
228
229 struct page *mm_get_huge_zero_page(struct mm_struct *mm)
230 {
231         if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
232                 return READ_ONCE(huge_zero_page);
233
234         if (!get_huge_zero_page())
235                 return NULL;
236
237         if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
238                 put_huge_zero_page();
239
240         return READ_ONCE(huge_zero_page);
241 }
242
243 void mm_put_huge_zero_page(struct mm_struct *mm)
244 {
245         if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
246                 put_huge_zero_page();
247 }
248
249 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
250                                         struct shrink_control *sc)
251 {
252         /* we can free zero page only if last reference remains */
253         return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
254 }
255
256 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
257                                        struct shrink_control *sc)
258 {
259         if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
260                 struct page *zero_page = xchg(&huge_zero_page, NULL);
261                 BUG_ON(zero_page == NULL);
262                 WRITE_ONCE(huge_zero_pfn, ~0UL);
263                 __free_pages(zero_page, compound_order(zero_page));
264                 return HPAGE_PMD_NR;
265         }
266
267         return 0;
268 }
269
270 static struct shrinker *huge_zero_page_shrinker;
271
272 #ifdef CONFIG_SYSFS
273 static ssize_t enabled_show(struct kobject *kobj,
274                             struct kobj_attribute *attr, char *buf)
275 {
276         const char *output;
277
278         if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
279                 output = "[always] madvise never";
280         else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
281                           &transparent_hugepage_flags))
282                 output = "always [madvise] never";
283         else
284                 output = "always madvise [never]";
285
286         return sysfs_emit(buf, "%s\n", output);
287 }
288
289 static ssize_t enabled_store(struct kobject *kobj,
290                              struct kobj_attribute *attr,
291                              const char *buf, size_t count)
292 {
293         ssize_t ret = count;
294
295         if (sysfs_streq(buf, "always")) {
296                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
297                 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
298         } else if (sysfs_streq(buf, "madvise")) {
299                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
300                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
301         } else if (sysfs_streq(buf, "never")) {
302                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
303                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
304         } else
305                 ret = -EINVAL;
306
307         if (ret > 0) {
308                 int err = start_stop_khugepaged();
309                 if (err)
310                         ret = err;
311         }
312         return ret;
313 }
314
315 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
316
317 ssize_t single_hugepage_flag_show(struct kobject *kobj,
318                                   struct kobj_attribute *attr, char *buf,
319                                   enum transparent_hugepage_flag flag)
320 {
321         return sysfs_emit(buf, "%d\n",
322                           !!test_bit(flag, &transparent_hugepage_flags));
323 }
324
325 ssize_t single_hugepage_flag_store(struct kobject *kobj,
326                                  struct kobj_attribute *attr,
327                                  const char *buf, size_t count,
328                                  enum transparent_hugepage_flag flag)
329 {
330         unsigned long value;
331         int ret;
332
333         ret = kstrtoul(buf, 10, &value);
334         if (ret < 0)
335                 return ret;
336         if (value > 1)
337                 return -EINVAL;
338
339         if (value)
340                 set_bit(flag, &transparent_hugepage_flags);
341         else
342                 clear_bit(flag, &transparent_hugepage_flags);
343
344         return count;
345 }
346
347 static ssize_t defrag_show(struct kobject *kobj,
348                            struct kobj_attribute *attr, char *buf)
349 {
350         const char *output;
351
352         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
353                      &transparent_hugepage_flags))
354                 output = "[always] defer defer+madvise madvise never";
355         else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
356                           &transparent_hugepage_flags))
357                 output = "always [defer] defer+madvise madvise never";
358         else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
359                           &transparent_hugepage_flags))
360                 output = "always defer [defer+madvise] madvise never";
361         else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
362                           &transparent_hugepage_flags))
363                 output = "always defer defer+madvise [madvise] never";
364         else
365                 output = "always defer defer+madvise madvise [never]";
366
367         return sysfs_emit(buf, "%s\n", output);
368 }
369
370 static ssize_t defrag_store(struct kobject *kobj,
371                             struct kobj_attribute *attr,
372                             const char *buf, size_t count)
373 {
374         if (sysfs_streq(buf, "always")) {
375                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
376                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
377                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
378                 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
379         } else if (sysfs_streq(buf, "defer+madvise")) {
380                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
381                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
382                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
383                 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
384         } else if (sysfs_streq(buf, "defer")) {
385                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
386                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
387                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
388                 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
389         } else if (sysfs_streq(buf, "madvise")) {
390                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
391                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
392                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
393                 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
394         } else if (sysfs_streq(buf, "never")) {
395                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
396                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
397                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
398                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
399         } else
400                 return -EINVAL;
401
402         return count;
403 }
404 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
405
406 static ssize_t use_zero_page_show(struct kobject *kobj,
407                                   struct kobj_attribute *attr, char *buf)
408 {
409         return single_hugepage_flag_show(kobj, attr, buf,
410                                          TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
411 }
412 static ssize_t use_zero_page_store(struct kobject *kobj,
413                 struct kobj_attribute *attr, const char *buf, size_t count)
414 {
415         return single_hugepage_flag_store(kobj, attr, buf, count,
416                                  TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
417 }
418 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
419
420 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
421                                    struct kobj_attribute *attr, char *buf)
422 {
423         return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
424 }
425 static struct kobj_attribute hpage_pmd_size_attr =
426         __ATTR_RO(hpage_pmd_size);
427
428 static struct attribute *hugepage_attr[] = {
429         &enabled_attr.attr,
430         &defrag_attr.attr,
431         &use_zero_page_attr.attr,
432         &hpage_pmd_size_attr.attr,
433 #ifdef CONFIG_SHMEM
434         &shmem_enabled_attr.attr,
435 #endif
436         NULL,
437 };
438
439 static const struct attribute_group hugepage_attr_group = {
440         .attrs = hugepage_attr,
441 };
442
443 static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
444 static void thpsize_release(struct kobject *kobj);
445 static DEFINE_SPINLOCK(huge_anon_orders_lock);
446 static LIST_HEAD(thpsize_list);
447
448 struct thpsize {
449         struct kobject kobj;
450         struct list_head node;
451         int order;
452 };
453
454 #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
455
456 static ssize_t thpsize_enabled_show(struct kobject *kobj,
457                                     struct kobj_attribute *attr, char *buf)
458 {
459         int order = to_thpsize(kobj)->order;
460         const char *output;
461
462         if (test_bit(order, &huge_anon_orders_always))
463                 output = "[always] inherit madvise never";
464         else if (test_bit(order, &huge_anon_orders_inherit))
465                 output = "always [inherit] madvise never";
466         else if (test_bit(order, &huge_anon_orders_madvise))
467                 output = "always inherit [madvise] never";
468         else
469                 output = "always inherit madvise [never]";
470
471         return sysfs_emit(buf, "%s\n", output);
472 }
473
474 static ssize_t thpsize_enabled_store(struct kobject *kobj,
475                                      struct kobj_attribute *attr,
476                                      const char *buf, size_t count)
477 {
478         int order = to_thpsize(kobj)->order;
479         ssize_t ret = count;
480
481         if (sysfs_streq(buf, "always")) {
482                 spin_lock(&huge_anon_orders_lock);
483                 clear_bit(order, &huge_anon_orders_inherit);
484                 clear_bit(order, &huge_anon_orders_madvise);
485                 set_bit(order, &huge_anon_orders_always);
486                 spin_unlock(&huge_anon_orders_lock);
487         } else if (sysfs_streq(buf, "inherit")) {
488                 spin_lock(&huge_anon_orders_lock);
489                 clear_bit(order, &huge_anon_orders_always);
490                 clear_bit(order, &huge_anon_orders_madvise);
491                 set_bit(order, &huge_anon_orders_inherit);
492                 spin_unlock(&huge_anon_orders_lock);
493         } else if (sysfs_streq(buf, "madvise")) {
494                 spin_lock(&huge_anon_orders_lock);
495                 clear_bit(order, &huge_anon_orders_always);
496                 clear_bit(order, &huge_anon_orders_inherit);
497                 set_bit(order, &huge_anon_orders_madvise);
498                 spin_unlock(&huge_anon_orders_lock);
499         } else if (sysfs_streq(buf, "never")) {
500                 spin_lock(&huge_anon_orders_lock);
501                 clear_bit(order, &huge_anon_orders_always);
502                 clear_bit(order, &huge_anon_orders_inherit);
503                 clear_bit(order, &huge_anon_orders_madvise);
504                 spin_unlock(&huge_anon_orders_lock);
505         } else
506                 ret = -EINVAL;
507
508         return ret;
509 }
510
511 static struct kobj_attribute thpsize_enabled_attr =
512         __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store);
513
514 static struct attribute *thpsize_attrs[] = {
515         &thpsize_enabled_attr.attr,
516         NULL,
517 };
518
519 static const struct attribute_group thpsize_attr_group = {
520         .attrs = thpsize_attrs,
521 };
522
523 static const struct kobj_type thpsize_ktype = {
524         .release = &thpsize_release,
525         .sysfs_ops = &kobj_sysfs_ops,
526 };
527
528 static struct thpsize *thpsize_create(int order, struct kobject *parent)
529 {
530         unsigned long size = (PAGE_SIZE << order) / SZ_1K;
531         struct thpsize *thpsize;
532         int ret;
533
534         thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
535         if (!thpsize)
536                 return ERR_PTR(-ENOMEM);
537
538         ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
539                                    "hugepages-%lukB", size);
540         if (ret) {
541                 kfree(thpsize);
542                 return ERR_PTR(ret);
543         }
544
545         ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group);
546         if (ret) {
547                 kobject_put(&thpsize->kobj);
548                 return ERR_PTR(ret);
549         }
550
551         thpsize->order = order;
552         return thpsize;
553 }
554
555 static void thpsize_release(struct kobject *kobj)
556 {
557         kfree(to_thpsize(kobj));
558 }
559
560 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
561 {
562         int err;
563         struct thpsize *thpsize;
564         unsigned long orders;
565         int order;
566
567         /*
568          * Default to setting PMD-sized THP to inherit the global setting and
569          * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
570          * constant so we have to do this here.
571          */
572         huge_anon_orders_inherit = BIT(PMD_ORDER);
573
574         *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
575         if (unlikely(!*hugepage_kobj)) {
576                 pr_err("failed to create transparent hugepage kobject\n");
577                 return -ENOMEM;
578         }
579
580         err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
581         if (err) {
582                 pr_err("failed to register transparent hugepage group\n");
583                 goto delete_obj;
584         }
585
586         err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
587         if (err) {
588                 pr_err("failed to register transparent hugepage group\n");
589                 goto remove_hp_group;
590         }
591
592         orders = THP_ORDERS_ALL_ANON;
593         order = highest_order(orders);
594         while (orders) {
595                 thpsize = thpsize_create(order, *hugepage_kobj);
596                 if (IS_ERR(thpsize)) {
597                         pr_err("failed to create thpsize for order %d\n", order);
598                         err = PTR_ERR(thpsize);
599                         goto remove_all;
600                 }
601                 list_add(&thpsize->node, &thpsize_list);
602                 order = next_order(&orders, order);
603         }
604
605         return 0;
606
607 remove_all:
608         hugepage_exit_sysfs(*hugepage_kobj);
609         return err;
610 remove_hp_group:
611         sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
612 delete_obj:
613         kobject_put(*hugepage_kobj);
614         return err;
615 }
616
617 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
618 {
619         struct thpsize *thpsize, *tmp;
620
621         list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
622                 list_del(&thpsize->node);
623                 kobject_put(&thpsize->kobj);
624         }
625
626         sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
627         sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
628         kobject_put(hugepage_kobj);
629 }
630 #else
631 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
632 {
633         return 0;
634 }
635
636 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
637 {
638 }
639 #endif /* CONFIG_SYSFS */
640
641 static int __init thp_shrinker_init(void)
642 {
643         huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
644         if (!huge_zero_page_shrinker)
645                 return -ENOMEM;
646
647         deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
648                                                  SHRINKER_MEMCG_AWARE |
649                                                  SHRINKER_NONSLAB,
650                                                  "thp-deferred_split");
651         if (!deferred_split_shrinker) {
652                 shrinker_free(huge_zero_page_shrinker);
653                 return -ENOMEM;
654         }
655
656         huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
657         huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
658         shrinker_register(huge_zero_page_shrinker);
659
660         deferred_split_shrinker->count_objects = deferred_split_count;
661         deferred_split_shrinker->scan_objects = deferred_split_scan;
662         shrinker_register(deferred_split_shrinker);
663
664         return 0;
665 }
666
667 static void __init thp_shrinker_exit(void)
668 {
669         shrinker_free(huge_zero_page_shrinker);
670         shrinker_free(deferred_split_shrinker);
671 }
672
673 static int __init hugepage_init(void)
674 {
675         int err;
676         struct kobject *hugepage_kobj;
677
678         if (!has_transparent_hugepage()) {
679                 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
680                 return -EINVAL;
681         }
682
683         /*
684          * hugepages can't be allocated by the buddy allocator
685          */
686         MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
687         /*
688          * we use page->mapping and page->index in second tail page
689          * as list_head: assuming THP order >= 2
690          */
691         MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
692
693         err = hugepage_init_sysfs(&hugepage_kobj);
694         if (err)
695                 goto err_sysfs;
696
697         err = khugepaged_init();
698         if (err)
699                 goto err_slab;
700
701         err = thp_shrinker_init();
702         if (err)
703                 goto err_shrinker;
704
705         /*
706          * By default disable transparent hugepages on smaller systems,
707          * where the extra memory used could hurt more than TLB overhead
708          * is likely to save.  The admin can still enable it through /sys.
709          */
710         if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
711                 transparent_hugepage_flags = 0;
712                 return 0;
713         }
714
715         err = start_stop_khugepaged();
716         if (err)
717                 goto err_khugepaged;
718
719         return 0;
720 err_khugepaged:
721         thp_shrinker_exit();
722 err_shrinker:
723         khugepaged_destroy();
724 err_slab:
725         hugepage_exit_sysfs(hugepage_kobj);
726 err_sysfs:
727         return err;
728 }
729 subsys_initcall(hugepage_init);
730
731 static int __init setup_transparent_hugepage(char *str)
732 {
733         int ret = 0;
734         if (!str)
735                 goto out;
736         if (!strcmp(str, "always")) {
737                 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
738                         &transparent_hugepage_flags);
739                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
740                           &transparent_hugepage_flags);
741                 ret = 1;
742         } else if (!strcmp(str, "madvise")) {
743                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
744                           &transparent_hugepage_flags);
745                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
746                         &transparent_hugepage_flags);
747                 ret = 1;
748         } else if (!strcmp(str, "never")) {
749                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
750                           &transparent_hugepage_flags);
751                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
752                           &transparent_hugepage_flags);
753                 ret = 1;
754         }
755 out:
756         if (!ret)
757                 pr_warn("transparent_hugepage= cannot parse, ignored\n");
758         return ret;
759 }
760 __setup("transparent_hugepage=", setup_transparent_hugepage);
761
762 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
763 {
764         if (likely(vma->vm_flags & VM_WRITE))
765                 pmd = pmd_mkwrite(pmd, vma);
766         return pmd;
767 }
768
769 #ifdef CONFIG_MEMCG
770 static inline
771 struct deferred_split *get_deferred_split_queue(struct folio *folio)
772 {
773         struct mem_cgroup *memcg = folio_memcg(folio);
774         struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
775
776         if (memcg)
777                 return &memcg->deferred_split_queue;
778         else
779                 return &pgdat->deferred_split_queue;
780 }
781 #else
782 static inline
783 struct deferred_split *get_deferred_split_queue(struct folio *folio)
784 {
785         struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
786
787         return &pgdat->deferred_split_queue;
788 }
789 #endif
790
791 void folio_prep_large_rmappable(struct folio *folio)
792 {
793         if (!folio || !folio_test_large(folio))
794                 return;
795         if (folio_order(folio) > 1)
796                 INIT_LIST_HEAD(&folio->_deferred_list);
797         folio_set_large_rmappable(folio);
798 }
799
800 static inline bool is_transparent_hugepage(struct folio *folio)
801 {
802         if (!folio_test_large(folio))
803                 return false;
804
805         return is_huge_zero_page(&folio->page) ||
806                 folio_test_large_rmappable(folio);
807 }
808
809 static unsigned long __thp_get_unmapped_area(struct file *filp,
810                 unsigned long addr, unsigned long len,
811                 loff_t off, unsigned long flags, unsigned long size)
812 {
813         loff_t off_end = off + len;
814         loff_t off_align = round_up(off, size);
815         unsigned long len_pad, ret, off_sub;
816
817         if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall())
818                 return 0;
819
820         if (off_end <= off_align || (off_end - off_align) < size)
821                 return 0;
822
823         len_pad = len + size;
824         if (len_pad < len || (off + len_pad) < off)
825                 return 0;
826
827         ret = current->mm->get_unmapped_area(filp, addr, len_pad,
828                                               off >> PAGE_SHIFT, flags);
829
830         /*
831          * The failure might be due to length padding. The caller will retry
832          * without the padding.
833          */
834         if (IS_ERR_VALUE(ret))
835                 return 0;
836
837         /*
838          * Do not try to align to THP boundary if allocation at the address
839          * hint succeeds.
840          */
841         if (ret == addr)
842                 return addr;
843
844         off_sub = (off - ret) & (size - 1);
845
846         if (current->mm->get_unmapped_area == arch_get_unmapped_area_topdown &&
847             !off_sub)
848                 return ret + size;
849
850         ret += off_sub;
851         return ret;
852 }
853
854 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
855                 unsigned long len, unsigned long pgoff, unsigned long flags)
856 {
857         unsigned long ret;
858         loff_t off = (loff_t)pgoff << PAGE_SHIFT;
859
860         ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
861         if (ret)
862                 return ret;
863
864         return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
865 }
866 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
867
868 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
869                         struct page *page, gfp_t gfp)
870 {
871         struct vm_area_struct *vma = vmf->vma;
872         struct folio *folio = page_folio(page);
873         pgtable_t pgtable;
874         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
875         vm_fault_t ret = 0;
876
877         VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
878
879         if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
880                 folio_put(folio);
881                 count_vm_event(THP_FAULT_FALLBACK);
882                 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
883                 return VM_FAULT_FALLBACK;
884         }
885         folio_throttle_swaprate(folio, gfp);
886
887         pgtable = pte_alloc_one(vma->vm_mm);
888         if (unlikely(!pgtable)) {
889                 ret = VM_FAULT_OOM;
890                 goto release;
891         }
892
893         clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
894         /*
895          * The memory barrier inside __folio_mark_uptodate makes sure that
896          * clear_huge_page writes become visible before the set_pmd_at()
897          * write.
898          */
899         __folio_mark_uptodate(folio);
900
901         vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
902         if (unlikely(!pmd_none(*vmf->pmd))) {
903                 goto unlock_release;
904         } else {
905                 pmd_t entry;
906
907                 ret = check_stable_address_space(vma->vm_mm);
908                 if (ret)
909                         goto unlock_release;
910
911                 /* Deliver the page fault to userland */
912                 if (userfaultfd_missing(vma)) {
913                         spin_unlock(vmf->ptl);
914                         folio_put(folio);
915                         pte_free(vma->vm_mm, pgtable);
916                         ret = handle_userfault(vmf, VM_UFFD_MISSING);
917                         VM_BUG_ON(ret & VM_FAULT_FALLBACK);
918                         return ret;
919                 }
920
921                 entry = mk_huge_pmd(page, vma->vm_page_prot);
922                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
923                 folio_add_new_anon_rmap(folio, vma, haddr);
924                 folio_add_lru_vma(folio, vma);
925                 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
926                 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
927                 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
928                 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
929                 mm_inc_nr_ptes(vma->vm_mm);
930                 spin_unlock(vmf->ptl);
931                 count_vm_event(THP_FAULT_ALLOC);
932                 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
933         }
934
935         return 0;
936 unlock_release:
937         spin_unlock(vmf->ptl);
938 release:
939         if (pgtable)
940                 pte_free(vma->vm_mm, pgtable);
941         folio_put(folio);
942         return ret;
943
944 }
945
946 /*
947  * always: directly stall for all thp allocations
948  * defer: wake kswapd and fail if not immediately available
949  * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
950  *                fail if not immediately available
951  * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
952  *          available
953  * never: never stall for any thp allocation
954  */
955 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
956 {
957         const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
958
959         /* Always do synchronous compaction */
960         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
961                 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
962
963         /* Kick kcompactd and fail quickly */
964         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
965                 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
966
967         /* Synchronous compaction if madvised, otherwise kick kcompactd */
968         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
969                 return GFP_TRANSHUGE_LIGHT |
970                         (vma_madvised ? __GFP_DIRECT_RECLAIM :
971                                         __GFP_KSWAPD_RECLAIM);
972
973         /* Only do synchronous compaction if madvised */
974         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
975                 return GFP_TRANSHUGE_LIGHT |
976                        (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
977
978         return GFP_TRANSHUGE_LIGHT;
979 }
980
981 /* Caller must hold page table lock. */
982 static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
983                 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
984                 struct page *zero_page)
985 {
986         pmd_t entry;
987         if (!pmd_none(*pmd))
988                 return;
989         entry = mk_pmd(zero_page, vma->vm_page_prot);
990         entry = pmd_mkhuge(entry);
991         pgtable_trans_huge_deposit(mm, pmd, pgtable);
992         set_pmd_at(mm, haddr, pmd, entry);
993         mm_inc_nr_ptes(mm);
994 }
995
996 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
997 {
998         struct vm_area_struct *vma = vmf->vma;
999         gfp_t gfp;
1000         struct folio *folio;
1001         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1002
1003         if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
1004                 return VM_FAULT_FALLBACK;
1005         if (unlikely(anon_vma_prepare(vma)))
1006                 return VM_FAULT_OOM;
1007         khugepaged_enter_vma(vma, vma->vm_flags);
1008
1009         if (!(vmf->flags & FAULT_FLAG_WRITE) &&
1010                         !mm_forbids_zeropage(vma->vm_mm) &&
1011                         transparent_hugepage_use_zero_page()) {
1012                 pgtable_t pgtable;
1013                 struct page *zero_page;
1014                 vm_fault_t ret;
1015                 pgtable = pte_alloc_one(vma->vm_mm);
1016                 if (unlikely(!pgtable))
1017                         return VM_FAULT_OOM;
1018                 zero_page = mm_get_huge_zero_page(vma->vm_mm);
1019                 if (unlikely(!zero_page)) {
1020                         pte_free(vma->vm_mm, pgtable);
1021                         count_vm_event(THP_FAULT_FALLBACK);
1022                         return VM_FAULT_FALLBACK;
1023                 }
1024                 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1025                 ret = 0;
1026                 if (pmd_none(*vmf->pmd)) {
1027                         ret = check_stable_address_space(vma->vm_mm);
1028                         if (ret) {
1029                                 spin_unlock(vmf->ptl);
1030                                 pte_free(vma->vm_mm, pgtable);
1031                         } else if (userfaultfd_missing(vma)) {
1032                                 spin_unlock(vmf->ptl);
1033                                 pte_free(vma->vm_mm, pgtable);
1034                                 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1035                                 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1036                         } else {
1037                                 set_huge_zero_page(pgtable, vma->vm_mm, vma,
1038                                                    haddr, vmf->pmd, zero_page);
1039                                 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1040                                 spin_unlock(vmf->ptl);
1041                         }
1042                 } else {
1043                         spin_unlock(vmf->ptl);
1044                         pte_free(vma->vm_mm, pgtable);
1045                 }
1046                 return ret;
1047         }
1048         gfp = vma_thp_gfp_mask(vma);
1049         folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
1050         if (unlikely(!folio)) {
1051                 count_vm_event(THP_FAULT_FALLBACK);
1052                 return VM_FAULT_FALLBACK;
1053         }
1054         return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
1055 }
1056
1057 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
1058                 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
1059                 pgtable_t pgtable)
1060 {
1061         struct mm_struct *mm = vma->vm_mm;
1062         pmd_t entry;
1063         spinlock_t *ptl;
1064
1065         ptl = pmd_lock(mm, pmd);
1066         if (!pmd_none(*pmd)) {
1067                 if (write) {
1068                         if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
1069                                 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
1070                                 goto out_unlock;
1071                         }
1072                         entry = pmd_mkyoung(*pmd);
1073                         entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1074                         if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1075                                 update_mmu_cache_pmd(vma, addr, pmd);
1076                 }
1077
1078                 goto out_unlock;
1079         }
1080
1081         entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
1082         if (pfn_t_devmap(pfn))
1083                 entry = pmd_mkdevmap(entry);
1084         if (write) {
1085                 entry = pmd_mkyoung(pmd_mkdirty(entry));
1086                 entry = maybe_pmd_mkwrite(entry, vma);
1087         }
1088
1089         if (pgtable) {
1090                 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1091                 mm_inc_nr_ptes(mm);
1092                 pgtable = NULL;
1093         }
1094
1095         set_pmd_at(mm, addr, pmd, entry);
1096         update_mmu_cache_pmd(vma, addr, pmd);
1097
1098 out_unlock:
1099         spin_unlock(ptl);
1100         if (pgtable)
1101                 pte_free(mm, pgtable);
1102 }
1103
1104 /**
1105  * vmf_insert_pfn_pmd - insert a pmd size pfn
1106  * @vmf: Structure describing the fault
1107  * @pfn: pfn to insert
1108  * @write: whether it's a write fault
1109  *
1110  * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
1111  *
1112  * Return: vm_fault_t value.
1113  */
1114 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
1115 {
1116         unsigned long addr = vmf->address & PMD_MASK;
1117         struct vm_area_struct *vma = vmf->vma;
1118         pgprot_t pgprot = vma->vm_page_prot;
1119         pgtable_t pgtable = NULL;
1120
1121         /*
1122          * If we had pmd_special, we could avoid all these restrictions,
1123          * but we need to be consistent with PTEs and architectures that
1124          * can't support a 'special' bit.
1125          */
1126         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1127                         !pfn_t_devmap(pfn));
1128         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1129                                                 (VM_PFNMAP|VM_MIXEDMAP));
1130         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1131
1132         if (addr < vma->vm_start || addr >= vma->vm_end)
1133                 return VM_FAULT_SIGBUS;
1134
1135         if (arch_needs_pgtable_deposit()) {
1136                 pgtable = pte_alloc_one(vma->vm_mm);
1137                 if (!pgtable)
1138                         return VM_FAULT_OOM;
1139         }
1140
1141         track_pfn_insert(vma, &pgprot, pfn);
1142
1143         insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
1144         return VM_FAULT_NOPAGE;
1145 }
1146 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
1147
1148 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1149 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
1150 {
1151         if (likely(vma->vm_flags & VM_WRITE))
1152                 pud = pud_mkwrite(pud);
1153         return pud;
1154 }
1155
1156 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
1157                 pud_t *pud, pfn_t pfn, bool write)
1158 {
1159         struct mm_struct *mm = vma->vm_mm;
1160         pgprot_t prot = vma->vm_page_prot;
1161         pud_t entry;
1162         spinlock_t *ptl;
1163
1164         ptl = pud_lock(mm, pud);
1165         if (!pud_none(*pud)) {
1166                 if (write) {
1167                         if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
1168                                 WARN_ON_ONCE(!is_huge_zero_pud(*pud));
1169                                 goto out_unlock;
1170                         }
1171                         entry = pud_mkyoung(*pud);
1172                         entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1173                         if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1174                                 update_mmu_cache_pud(vma, addr, pud);
1175                 }
1176                 goto out_unlock;
1177         }
1178
1179         entry = pud_mkhuge(pfn_t_pud(pfn, prot));
1180         if (pfn_t_devmap(pfn))
1181                 entry = pud_mkdevmap(entry);
1182         if (write) {
1183                 entry = pud_mkyoung(pud_mkdirty(entry));
1184                 entry = maybe_pud_mkwrite(entry, vma);
1185         }
1186         set_pud_at(mm, addr, pud, entry);
1187         update_mmu_cache_pud(vma, addr, pud);
1188
1189 out_unlock:
1190         spin_unlock(ptl);
1191 }
1192
1193 /**
1194  * vmf_insert_pfn_pud - insert a pud size pfn
1195  * @vmf: Structure describing the fault
1196  * @pfn: pfn to insert
1197  * @write: whether it's a write fault
1198  *
1199  * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
1200  *
1201  * Return: vm_fault_t value.
1202  */
1203 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
1204 {
1205         unsigned long addr = vmf->address & PUD_MASK;
1206         struct vm_area_struct *vma = vmf->vma;
1207         pgprot_t pgprot = vma->vm_page_prot;
1208
1209         /*
1210          * If we had pud_special, we could avoid all these restrictions,
1211          * but we need to be consistent with PTEs and architectures that
1212          * can't support a 'special' bit.
1213          */
1214         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1215                         !pfn_t_devmap(pfn));
1216         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1217                                                 (VM_PFNMAP|VM_MIXEDMAP));
1218         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1219
1220         if (addr < vma->vm_start || addr >= vma->vm_end)
1221                 return VM_FAULT_SIGBUS;
1222
1223         track_pfn_insert(vma, &pgprot, pfn);
1224
1225         insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
1226         return VM_FAULT_NOPAGE;
1227 }
1228 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
1229 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1230
1231 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1232                       pmd_t *pmd, bool write)
1233 {
1234         pmd_t _pmd;
1235
1236         _pmd = pmd_mkyoung(*pmd);
1237         if (write)
1238                 _pmd = pmd_mkdirty(_pmd);
1239         if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1240                                   pmd, _pmd, write))
1241                 update_mmu_cache_pmd(vma, addr, pmd);
1242 }
1243
1244 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1245                 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1246 {
1247         unsigned long pfn = pmd_pfn(*pmd);
1248         struct mm_struct *mm = vma->vm_mm;
1249         struct page *page;
1250         int ret;
1251
1252         assert_spin_locked(pmd_lockptr(mm, pmd));
1253
1254         if (flags & FOLL_WRITE && !pmd_write(*pmd))
1255                 return NULL;
1256
1257         if (pmd_present(*pmd) && pmd_devmap(*pmd))
1258                 /* pass */;
1259         else
1260                 return NULL;
1261
1262         if (flags & FOLL_TOUCH)
1263                 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1264
1265         /*
1266          * device mapped pages can only be returned if the
1267          * caller will manage the page reference count.
1268          */
1269         if (!(flags & (FOLL_GET | FOLL_PIN)))
1270                 return ERR_PTR(-EEXIST);
1271
1272         pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1273         *pgmap = get_dev_pagemap(pfn, *pgmap);
1274         if (!*pgmap)
1275                 return ERR_PTR(-EFAULT);
1276         page = pfn_to_page(pfn);
1277         ret = try_grab_page(page, flags);
1278         if (ret)
1279                 page = ERR_PTR(ret);
1280
1281         return page;
1282 }
1283
1284 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1285                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1286                   struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1287 {
1288         spinlock_t *dst_ptl, *src_ptl;
1289         struct page *src_page;
1290         struct folio *src_folio;
1291         pmd_t pmd;
1292         pgtable_t pgtable = NULL;
1293         int ret = -ENOMEM;
1294
1295         /* Skip if can be re-fill on fault */
1296         if (!vma_is_anonymous(dst_vma))
1297                 return 0;
1298
1299         pgtable = pte_alloc_one(dst_mm);
1300         if (unlikely(!pgtable))
1301                 goto out;
1302
1303         dst_ptl = pmd_lock(dst_mm, dst_pmd);
1304         src_ptl = pmd_lockptr(src_mm, src_pmd);
1305         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1306
1307         ret = -EAGAIN;
1308         pmd = *src_pmd;
1309
1310 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1311         if (unlikely(is_swap_pmd(pmd))) {
1312                 swp_entry_t entry = pmd_to_swp_entry(pmd);
1313
1314                 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1315                 if (!is_readable_migration_entry(entry)) {
1316                         entry = make_readable_migration_entry(
1317                                                         swp_offset(entry));
1318                         pmd = swp_entry_to_pmd(entry);
1319                         if (pmd_swp_soft_dirty(*src_pmd))
1320                                 pmd = pmd_swp_mksoft_dirty(pmd);
1321                         if (pmd_swp_uffd_wp(*src_pmd))
1322                                 pmd = pmd_swp_mkuffd_wp(pmd);
1323                         set_pmd_at(src_mm, addr, src_pmd, pmd);
1324                 }
1325                 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1326                 mm_inc_nr_ptes(dst_mm);
1327                 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1328                 if (!userfaultfd_wp(dst_vma))
1329                         pmd = pmd_swp_clear_uffd_wp(pmd);
1330                 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1331                 ret = 0;
1332                 goto out_unlock;
1333         }
1334 #endif
1335
1336         if (unlikely(!pmd_trans_huge(pmd))) {
1337                 pte_free(dst_mm, pgtable);
1338                 goto out_unlock;
1339         }
1340         /*
1341          * When page table lock is held, the huge zero pmd should not be
1342          * under splitting since we don't split the page itself, only pmd to
1343          * a page table.
1344          */
1345         if (is_huge_zero_pmd(pmd)) {
1346                 /*
1347                  * get_huge_zero_page() will never allocate a new page here,
1348                  * since we already have a zero page to copy. It just takes a
1349                  * reference.
1350                  */
1351                 mm_get_huge_zero_page(dst_mm);
1352                 goto out_zero_page;
1353         }
1354
1355         src_page = pmd_page(pmd);
1356         VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1357         src_folio = page_folio(src_page);
1358
1359         folio_get(src_folio);
1360         if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
1361                 /* Page maybe pinned: split and retry the fault on PTEs. */
1362                 folio_put(src_folio);
1363                 pte_free(dst_mm, pgtable);
1364                 spin_unlock(src_ptl);
1365                 spin_unlock(dst_ptl);
1366                 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
1367                 return -EAGAIN;
1368         }
1369         add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1370 out_zero_page:
1371         mm_inc_nr_ptes(dst_mm);
1372         pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1373         pmdp_set_wrprotect(src_mm, addr, src_pmd);
1374         if (!userfaultfd_wp(dst_vma))
1375                 pmd = pmd_clear_uffd_wp(pmd);
1376         pmd = pmd_mkold(pmd_wrprotect(pmd));
1377         set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1378
1379         ret = 0;
1380 out_unlock:
1381         spin_unlock(src_ptl);
1382         spin_unlock(dst_ptl);
1383 out:
1384         return ret;
1385 }
1386
1387 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1388 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1389                       pud_t *pud, bool write)
1390 {
1391         pud_t _pud;
1392
1393         _pud = pud_mkyoung(*pud);
1394         if (write)
1395                 _pud = pud_mkdirty(_pud);
1396         if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1397                                   pud, _pud, write))
1398                 update_mmu_cache_pud(vma, addr, pud);
1399 }
1400
1401 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1402                 pud_t *pud, int flags, struct dev_pagemap **pgmap)
1403 {
1404         unsigned long pfn = pud_pfn(*pud);
1405         struct mm_struct *mm = vma->vm_mm;
1406         struct page *page;
1407         int ret;
1408
1409         assert_spin_locked(pud_lockptr(mm, pud));
1410
1411         if (flags & FOLL_WRITE && !pud_write(*pud))
1412                 return NULL;
1413
1414         if (pud_present(*pud) && pud_devmap(*pud))
1415                 /* pass */;
1416         else
1417                 return NULL;
1418
1419         if (flags & FOLL_TOUCH)
1420                 touch_pud(vma, addr, pud, flags & FOLL_WRITE);
1421
1422         /*
1423          * device mapped pages can only be returned if the
1424          * caller will manage the page reference count.
1425          *
1426          * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
1427          */
1428         if (!(flags & (FOLL_GET | FOLL_PIN)))
1429                 return ERR_PTR(-EEXIST);
1430
1431         pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1432         *pgmap = get_dev_pagemap(pfn, *pgmap);
1433         if (!*pgmap)
1434                 return ERR_PTR(-EFAULT);
1435         page = pfn_to_page(pfn);
1436
1437         ret = try_grab_page(page, flags);
1438         if (ret)
1439                 page = ERR_PTR(ret);
1440
1441         return page;
1442 }
1443
1444 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1445                   pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1446                   struct vm_area_struct *vma)
1447 {
1448         spinlock_t *dst_ptl, *src_ptl;
1449         pud_t pud;
1450         int ret;
1451
1452         dst_ptl = pud_lock(dst_mm, dst_pud);
1453         src_ptl = pud_lockptr(src_mm, src_pud);
1454         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1455
1456         ret = -EAGAIN;
1457         pud = *src_pud;
1458         if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1459                 goto out_unlock;
1460
1461         /*
1462          * When page table lock is held, the huge zero pud should not be
1463          * under splitting since we don't split the page itself, only pud to
1464          * a page table.
1465          */
1466         if (is_huge_zero_pud(pud)) {
1467                 /* No huge zero pud yet */
1468         }
1469
1470         /*
1471          * TODO: once we support anonymous pages, use
1472          * folio_try_dup_anon_rmap_*() and split if duplicating fails.
1473          */
1474         pudp_set_wrprotect(src_mm, addr, src_pud);
1475         pud = pud_mkold(pud_wrprotect(pud));
1476         set_pud_at(dst_mm, addr, dst_pud, pud);
1477
1478         ret = 0;
1479 out_unlock:
1480         spin_unlock(src_ptl);
1481         spin_unlock(dst_ptl);
1482         return ret;
1483 }
1484
1485 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1486 {
1487         bool write = vmf->flags & FAULT_FLAG_WRITE;
1488
1489         vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1490         if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1491                 goto unlock;
1492
1493         touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1494 unlock:
1495         spin_unlock(vmf->ptl);
1496 }
1497 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1498
1499 void huge_pmd_set_accessed(struct vm_fault *vmf)
1500 {
1501         bool write = vmf->flags & FAULT_FLAG_WRITE;
1502
1503         vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1504         if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1505                 goto unlock;
1506
1507         touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1508
1509 unlock:
1510         spin_unlock(vmf->ptl);
1511 }
1512
1513 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1514 {
1515         const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1516         struct vm_area_struct *vma = vmf->vma;
1517         struct folio *folio;
1518         struct page *page;
1519         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1520         pmd_t orig_pmd = vmf->orig_pmd;
1521
1522         vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1523         VM_BUG_ON_VMA(!vma->anon_vma, vma);
1524
1525         if (is_huge_zero_pmd(orig_pmd))
1526                 goto fallback;
1527
1528         spin_lock(vmf->ptl);
1529
1530         if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1531                 spin_unlock(vmf->ptl);
1532                 return 0;
1533         }
1534
1535         page = pmd_page(orig_pmd);
1536         folio = page_folio(page);
1537         VM_BUG_ON_PAGE(!PageHead(page), page);
1538
1539         /* Early check when only holding the PT lock. */
1540         if (PageAnonExclusive(page))
1541                 goto reuse;
1542
1543         if (!folio_trylock(folio)) {
1544                 folio_get(folio);
1545                 spin_unlock(vmf->ptl);
1546                 folio_lock(folio);
1547                 spin_lock(vmf->ptl);
1548                 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1549                         spin_unlock(vmf->ptl);
1550                         folio_unlock(folio);
1551                         folio_put(folio);
1552                         return 0;
1553                 }
1554                 folio_put(folio);
1555         }
1556
1557         /* Recheck after temporarily dropping the PT lock. */
1558         if (PageAnonExclusive(page)) {
1559                 folio_unlock(folio);
1560                 goto reuse;
1561         }
1562
1563         /*
1564          * See do_wp_page(): we can only reuse the folio exclusively if
1565          * there are no additional references. Note that we always drain
1566          * the LRU cache immediately after adding a THP.
1567          */
1568         if (folio_ref_count(folio) >
1569                         1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
1570                 goto unlock_fallback;
1571         if (folio_test_swapcache(folio))
1572                 folio_free_swap(folio);
1573         if (folio_ref_count(folio) == 1) {
1574                 pmd_t entry;
1575
1576                 folio_move_anon_rmap(folio, vma);
1577                 SetPageAnonExclusive(page);
1578                 folio_unlock(folio);
1579 reuse:
1580                 if (unlikely(unshare)) {
1581                         spin_unlock(vmf->ptl);
1582                         return 0;
1583                 }
1584                 entry = pmd_mkyoung(orig_pmd);
1585                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1586                 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1587                         update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1588                 spin_unlock(vmf->ptl);
1589                 return 0;
1590         }
1591
1592 unlock_fallback:
1593         folio_unlock(folio);
1594         spin_unlock(vmf->ptl);
1595 fallback:
1596         __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1597         return VM_FAULT_FALLBACK;
1598 }
1599
1600 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1601                                            unsigned long addr, pmd_t pmd)
1602 {
1603         struct page *page;
1604
1605         if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1606                 return false;
1607
1608         /* Don't touch entries that are not even readable (NUMA hinting). */
1609         if (pmd_protnone(pmd))
1610                 return false;
1611
1612         /* Do we need write faults for softdirty tracking? */
1613         if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1614                 return false;
1615
1616         /* Do we need write faults for uffd-wp tracking? */
1617         if (userfaultfd_huge_pmd_wp(vma, pmd))
1618                 return false;
1619
1620         if (!(vma->vm_flags & VM_SHARED)) {
1621                 /* See can_change_pte_writable(). */
1622                 page = vm_normal_page_pmd(vma, addr, pmd);
1623                 return page && PageAnon(page) && PageAnonExclusive(page);
1624         }
1625
1626         /* See can_change_pte_writable(). */
1627         return pmd_dirty(pmd);
1628 }
1629
1630 /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
1631 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1632                                         struct vm_area_struct *vma,
1633                                         unsigned int flags)
1634 {
1635         /* If the pmd is writable, we can write to the page. */
1636         if (pmd_write(pmd))
1637                 return true;
1638
1639         /* Maybe FOLL_FORCE is set to override it? */
1640         if (!(flags & FOLL_FORCE))
1641                 return false;
1642
1643         /* But FOLL_FORCE has no effect on shared mappings */
1644         if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
1645                 return false;
1646
1647         /* ... or read-only private ones */
1648         if (!(vma->vm_flags & VM_MAYWRITE))
1649                 return false;
1650
1651         /* ... or already writable ones that just need to take a write fault */
1652         if (vma->vm_flags & VM_WRITE)
1653                 return false;
1654
1655         /*
1656          * See can_change_pte_writable(): we broke COW and could map the page
1657          * writable if we have an exclusive anonymous page ...
1658          */
1659         if (!page || !PageAnon(page) || !PageAnonExclusive(page))
1660                 return false;
1661
1662         /* ... and a write-fault isn't required for other reasons. */
1663         if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1664                 return false;
1665         return !userfaultfd_huge_pmd_wp(vma, pmd);
1666 }
1667
1668 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1669                                    unsigned long addr,
1670                                    pmd_t *pmd,
1671                                    unsigned int flags)
1672 {
1673         struct mm_struct *mm = vma->vm_mm;
1674         struct page *page;
1675         int ret;
1676
1677         assert_spin_locked(pmd_lockptr(mm, pmd));
1678
1679         page = pmd_page(*pmd);
1680         VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1681
1682         if ((flags & FOLL_WRITE) &&
1683             !can_follow_write_pmd(*pmd, page, vma, flags))
1684                 return NULL;
1685
1686         /* Avoid dumping huge zero page */
1687         if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1688                 return ERR_PTR(-EFAULT);
1689
1690         if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
1691                 return NULL;
1692
1693         if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
1694                 return ERR_PTR(-EMLINK);
1695
1696         VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
1697                         !PageAnonExclusive(page), page);
1698
1699         ret = try_grab_page(page, flags);
1700         if (ret)
1701                 return ERR_PTR(ret);
1702
1703         if (flags & FOLL_TOUCH)
1704                 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1705
1706         page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1707         VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1708
1709         return page;
1710 }
1711
1712 /* NUMA hinting page fault entry point for trans huge pmds */
1713 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1714 {
1715         struct vm_area_struct *vma = vmf->vma;
1716         pmd_t oldpmd = vmf->orig_pmd;
1717         pmd_t pmd;
1718         struct folio *folio;
1719         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1720         int nid = NUMA_NO_NODE;
1721         int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
1722         bool migrated = false, writable = false;
1723         int flags = 0;
1724
1725         vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1726         if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1727                 spin_unlock(vmf->ptl);
1728                 goto out;
1729         }
1730
1731         pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1732
1733         /*
1734          * Detect now whether the PMD could be writable; this information
1735          * is only valid while holding the PT lock.
1736          */
1737         writable = pmd_write(pmd);
1738         if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1739             can_change_pmd_writable(vma, vmf->address, pmd))
1740                 writable = true;
1741
1742         folio = vm_normal_folio_pmd(vma, haddr, pmd);
1743         if (!folio)
1744                 goto out_map;
1745
1746         /* See similar comment in do_numa_page for explanation */
1747         if (!writable)
1748                 flags |= TNF_NO_GROUP;
1749
1750         nid = folio_nid(folio);
1751         /*
1752          * For memory tiering mode, cpupid of slow memory page is used
1753          * to record page access time.  So use default value.
1754          */
1755         if (node_is_toptier(nid))
1756                 last_cpupid = folio_last_cpupid(folio);
1757         target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags);
1758         if (target_nid == NUMA_NO_NODE) {
1759                 folio_put(folio);
1760                 goto out_map;
1761         }
1762
1763         spin_unlock(vmf->ptl);
1764         writable = false;
1765
1766         migrated = migrate_misplaced_folio(folio, vma, target_nid);
1767         if (migrated) {
1768                 flags |= TNF_MIGRATED;
1769                 nid = target_nid;
1770         } else {
1771                 flags |= TNF_MIGRATE_FAIL;
1772                 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1773                 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1774                         spin_unlock(vmf->ptl);
1775                         goto out;
1776                 }
1777                 goto out_map;
1778         }
1779
1780 out:
1781         if (nid != NUMA_NO_NODE)
1782                 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
1783
1784         return 0;
1785
1786 out_map:
1787         /* Restore the PMD */
1788         pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1789         pmd = pmd_mkyoung(pmd);
1790         if (writable)
1791                 pmd = pmd_mkwrite(pmd, vma);
1792         set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1793         update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1794         spin_unlock(vmf->ptl);
1795         goto out;
1796 }
1797
1798 /*
1799  * Return true if we do MADV_FREE successfully on entire pmd page.
1800  * Otherwise, return false.
1801  */
1802 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1803                 pmd_t *pmd, unsigned long addr, unsigned long next)
1804 {
1805         spinlock_t *ptl;
1806         pmd_t orig_pmd;
1807         struct folio *folio;
1808         struct mm_struct *mm = tlb->mm;
1809         bool ret = false;
1810
1811         tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1812
1813         ptl = pmd_trans_huge_lock(pmd, vma);
1814         if (!ptl)
1815                 goto out_unlocked;
1816
1817         orig_pmd = *pmd;
1818         if (is_huge_zero_pmd(orig_pmd))
1819                 goto out;
1820
1821         if (unlikely(!pmd_present(orig_pmd))) {
1822                 VM_BUG_ON(thp_migration_supported() &&
1823                                   !is_pmd_migration_entry(orig_pmd));
1824                 goto out;
1825         }
1826
1827         folio = pfn_folio(pmd_pfn(orig_pmd));
1828         /*
1829          * If other processes are mapping this folio, we couldn't discard
1830          * the folio unless they all do MADV_FREE so let's skip the folio.
1831          */
1832         if (folio_estimated_sharers(folio) != 1)
1833                 goto out;
1834
1835         if (!folio_trylock(folio))
1836                 goto out;
1837
1838         /*
1839          * If user want to discard part-pages of THP, split it so MADV_FREE
1840          * will deactivate only them.
1841          */
1842         if (next - addr != HPAGE_PMD_SIZE) {
1843                 folio_get(folio);
1844                 spin_unlock(ptl);
1845                 split_folio(folio);
1846                 folio_unlock(folio);
1847                 folio_put(folio);
1848                 goto out_unlocked;
1849         }
1850
1851         if (folio_test_dirty(folio))
1852                 folio_clear_dirty(folio);
1853         folio_unlock(folio);
1854
1855         if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1856                 pmdp_invalidate(vma, addr, pmd);
1857                 orig_pmd = pmd_mkold(orig_pmd);
1858                 orig_pmd = pmd_mkclean(orig_pmd);
1859
1860                 set_pmd_at(mm, addr, pmd, orig_pmd);
1861                 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1862         }
1863
1864         folio_mark_lazyfree(folio);
1865         ret = true;
1866 out:
1867         spin_unlock(ptl);
1868 out_unlocked:
1869         return ret;
1870 }
1871
1872 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1873 {
1874         pgtable_t pgtable;
1875
1876         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1877         pte_free(mm, pgtable);
1878         mm_dec_nr_ptes(mm);
1879 }
1880
1881 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1882                  pmd_t *pmd, unsigned long addr)
1883 {
1884         pmd_t orig_pmd;
1885         spinlock_t *ptl;
1886
1887         tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1888
1889         ptl = __pmd_trans_huge_lock(pmd, vma);
1890         if (!ptl)
1891                 return 0;
1892         /*
1893          * For architectures like ppc64 we look at deposited pgtable
1894          * when calling pmdp_huge_get_and_clear. So do the
1895          * pgtable_trans_huge_withdraw after finishing pmdp related
1896          * operations.
1897          */
1898         orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1899                                                 tlb->fullmm);
1900         arch_check_zapped_pmd(vma, orig_pmd);
1901         tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1902         if (vma_is_special_huge(vma)) {
1903                 if (arch_needs_pgtable_deposit())
1904                         zap_deposited_table(tlb->mm, pmd);
1905                 spin_unlock(ptl);
1906         } else if (is_huge_zero_pmd(orig_pmd)) {
1907                 zap_deposited_table(tlb->mm, pmd);
1908                 spin_unlock(ptl);
1909         } else {
1910                 struct folio *folio = NULL;
1911                 int flush_needed = 1;
1912
1913                 if (pmd_present(orig_pmd)) {
1914                         struct page *page = pmd_page(orig_pmd);
1915
1916                         folio = page_folio(page);
1917                         folio_remove_rmap_pmd(folio, page, vma);
1918                         VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1919                         VM_BUG_ON_PAGE(!PageHead(page), page);
1920                 } else if (thp_migration_supported()) {
1921                         swp_entry_t entry;
1922
1923                         VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1924                         entry = pmd_to_swp_entry(orig_pmd);
1925                         folio = pfn_swap_entry_folio(entry);
1926                         flush_needed = 0;
1927                 } else
1928                         WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1929
1930                 if (folio_test_anon(folio)) {
1931                         zap_deposited_table(tlb->mm, pmd);
1932                         add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1933                 } else {
1934                         if (arch_needs_pgtable_deposit())
1935                                 zap_deposited_table(tlb->mm, pmd);
1936                         add_mm_counter(tlb->mm, mm_counter_file(folio),
1937                                        -HPAGE_PMD_NR);
1938                 }
1939
1940                 spin_unlock(ptl);
1941                 if (flush_needed)
1942                         tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
1943         }
1944         return 1;
1945 }
1946
1947 #ifndef pmd_move_must_withdraw
1948 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1949                                          spinlock_t *old_pmd_ptl,
1950                                          struct vm_area_struct *vma)
1951 {
1952         /*
1953          * With split pmd lock we also need to move preallocated
1954          * PTE page table if new_pmd is on different PMD page table.
1955          *
1956          * We also don't deposit and withdraw tables for file pages.
1957          */
1958         return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1959 }
1960 #endif
1961
1962 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1963 {
1964 #ifdef CONFIG_MEM_SOFT_DIRTY
1965         if (unlikely(is_pmd_migration_entry(pmd)))
1966                 pmd = pmd_swp_mksoft_dirty(pmd);
1967         else if (pmd_present(pmd))
1968                 pmd = pmd_mksoft_dirty(pmd);
1969 #endif
1970         return pmd;
1971 }
1972
1973 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1974                   unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
1975 {
1976         spinlock_t *old_ptl, *new_ptl;
1977         pmd_t pmd;
1978         struct mm_struct *mm = vma->vm_mm;
1979         bool force_flush = false;
1980
1981         /*
1982          * The destination pmd shouldn't be established, free_pgtables()
1983          * should have released it; but move_page_tables() might have already
1984          * inserted a page table, if racing against shmem/file collapse.
1985          */
1986         if (!pmd_none(*new_pmd)) {
1987                 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1988                 return false;
1989         }
1990
1991         /*
1992          * We don't have to worry about the ordering of src and dst
1993          * ptlocks because exclusive mmap_lock prevents deadlock.
1994          */
1995         old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1996         if (old_ptl) {
1997                 new_ptl = pmd_lockptr(mm, new_pmd);
1998                 if (new_ptl != old_ptl)
1999                         spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
2000                 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
2001                 if (pmd_present(pmd))
2002                         force_flush = true;
2003                 VM_BUG_ON(!pmd_none(*new_pmd));
2004
2005                 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
2006                         pgtable_t pgtable;
2007                         pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
2008                         pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
2009                 }
2010                 pmd = move_soft_dirty_pmd(pmd);
2011                 set_pmd_at(mm, new_addr, new_pmd, pmd);
2012                 if (force_flush)
2013                         flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
2014                 if (new_ptl != old_ptl)
2015                         spin_unlock(new_ptl);
2016                 spin_unlock(old_ptl);
2017                 return true;
2018         }
2019         return false;
2020 }
2021
2022 /*
2023  * Returns
2024  *  - 0 if PMD could not be locked
2025  *  - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
2026  *      or if prot_numa but THP migration is not supported
2027  *  - HPAGE_PMD_NR if protections changed and TLB flush necessary
2028  */
2029 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2030                     pmd_t *pmd, unsigned long addr, pgprot_t newprot,
2031                     unsigned long cp_flags)
2032 {
2033         struct mm_struct *mm = vma->vm_mm;
2034         spinlock_t *ptl;
2035         pmd_t oldpmd, entry;
2036         bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
2037         bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
2038         bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
2039         int ret = 1;
2040
2041         tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2042
2043         if (prot_numa && !thp_migration_supported())
2044                 return 1;
2045
2046         ptl = __pmd_trans_huge_lock(pmd, vma);
2047         if (!ptl)
2048                 return 0;
2049
2050 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2051         if (is_swap_pmd(*pmd)) {
2052                 swp_entry_t entry = pmd_to_swp_entry(*pmd);
2053                 struct folio *folio = pfn_swap_entry_folio(entry);
2054                 pmd_t newpmd;
2055
2056                 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
2057                 if (is_writable_migration_entry(entry)) {
2058                         /*
2059                          * A protection check is difficult so
2060                          * just be safe and disable write
2061                          */
2062                         if (folio_test_anon(folio))
2063                                 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2064                         else
2065                                 entry = make_readable_migration_entry(swp_offset(entry));
2066                         newpmd = swp_entry_to_pmd(entry);
2067                         if (pmd_swp_soft_dirty(*pmd))
2068                                 newpmd = pmd_swp_mksoft_dirty(newpmd);
2069                 } else {
2070                         newpmd = *pmd;
2071                 }
2072
2073                 if (uffd_wp)
2074                         newpmd = pmd_swp_mkuffd_wp(newpmd);
2075                 else if (uffd_wp_resolve)
2076                         newpmd = pmd_swp_clear_uffd_wp(newpmd);
2077                 if (!pmd_same(*pmd, newpmd))
2078                         set_pmd_at(mm, addr, pmd, newpmd);
2079                 goto unlock;
2080         }
2081 #endif
2082
2083         if (prot_numa) {
2084                 struct folio *folio;
2085                 bool toptier;
2086                 /*
2087                  * Avoid trapping faults against the zero page. The read-only
2088                  * data is likely to be read-cached on the local CPU and
2089                  * local/remote hits to the zero page are not interesting.
2090                  */
2091                 if (is_huge_zero_pmd(*pmd))
2092                         goto unlock;
2093
2094                 if (pmd_protnone(*pmd))
2095                         goto unlock;
2096
2097                 folio = page_folio(pmd_page(*pmd));
2098                 toptier = node_is_toptier(folio_nid(folio));
2099                 /*
2100                  * Skip scanning top tier node if normal numa
2101                  * balancing is disabled
2102                  */
2103                 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
2104                     toptier)
2105                         goto unlock;
2106
2107                 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
2108                     !toptier)
2109                         folio_xchg_access_time(folio,
2110                                                jiffies_to_msecs(jiffies));
2111         }
2112         /*
2113          * In case prot_numa, we are under mmap_read_lock(mm). It's critical
2114          * to not clear pmd intermittently to avoid race with MADV_DONTNEED
2115          * which is also under mmap_read_lock(mm):
2116          *
2117          *      CPU0:                           CPU1:
2118          *                              change_huge_pmd(prot_numa=1)
2119          *                               pmdp_huge_get_and_clear_notify()
2120          * madvise_dontneed()
2121          *  zap_pmd_range()
2122          *   pmd_trans_huge(*pmd) == 0 (without ptl)
2123          *   // skip the pmd
2124          *                               set_pmd_at();
2125          *                               // pmd is re-established
2126          *
2127          * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2128          * which may break userspace.
2129          *
2130          * pmdp_invalidate_ad() is required to make sure we don't miss
2131          * dirty/young flags set by hardware.
2132          */
2133         oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
2134
2135         entry = pmd_modify(oldpmd, newprot);
2136         if (uffd_wp)
2137                 entry = pmd_mkuffd_wp(entry);
2138         else if (uffd_wp_resolve)
2139                 /*
2140                  * Leave the write bit to be handled by PF interrupt
2141                  * handler, then things like COW could be properly
2142                  * handled.
2143                  */
2144                 entry = pmd_clear_uffd_wp(entry);
2145
2146         /* See change_pte_range(). */
2147         if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2148             can_change_pmd_writable(vma, addr, entry))
2149                 entry = pmd_mkwrite(entry, vma);
2150
2151         ret = HPAGE_PMD_NR;
2152         set_pmd_at(mm, addr, pmd, entry);
2153
2154         if (huge_pmd_needs_flush(oldpmd, entry))
2155                 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
2156 unlock:
2157         spin_unlock(ptl);
2158         return ret;
2159 }
2160
2161 #ifdef CONFIG_USERFAULTFD
2162 /*
2163  * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
2164  * the caller, but it must return after releasing the page_table_lock.
2165  * Just move the page from src_pmd to dst_pmd if possible.
2166  * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2167  * repeated by the caller, or other errors in case of failure.
2168  */
2169 int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2170                         struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2171                         unsigned long dst_addr, unsigned long src_addr)
2172 {
2173         pmd_t _dst_pmd, src_pmdval;
2174         struct page *src_page;
2175         struct folio *src_folio;
2176         struct anon_vma *src_anon_vma;
2177         spinlock_t *src_ptl, *dst_ptl;
2178         pgtable_t src_pgtable;
2179         struct mmu_notifier_range range;
2180         int err = 0;
2181
2182         src_pmdval = *src_pmd;
2183         src_ptl = pmd_lockptr(mm, src_pmd);
2184
2185         lockdep_assert_held(src_ptl);
2186         vma_assert_locked(src_vma);
2187         vma_assert_locked(dst_vma);
2188
2189         /* Sanity checks before the operation */
2190         if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2191             WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2192                 spin_unlock(src_ptl);
2193                 return -EINVAL;
2194         }
2195
2196         if (!pmd_trans_huge(src_pmdval)) {
2197                 spin_unlock(src_ptl);
2198                 if (is_pmd_migration_entry(src_pmdval)) {
2199                         pmd_migration_entry_wait(mm, &src_pmdval);
2200                         return -EAGAIN;
2201                 }
2202                 return -ENOENT;
2203         }
2204
2205         src_page = pmd_page(src_pmdval);
2206
2207         if (!is_huge_zero_pmd(src_pmdval)) {
2208                 if (unlikely(!PageAnonExclusive(src_page))) {
2209                         spin_unlock(src_ptl);
2210                         return -EBUSY;
2211                 }
2212
2213                 src_folio = page_folio(src_page);
2214                 folio_get(src_folio);
2215         } else
2216                 src_folio = NULL;
2217
2218         spin_unlock(src_ptl);
2219
2220         flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2221         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2222                                 src_addr + HPAGE_PMD_SIZE);
2223         mmu_notifier_invalidate_range_start(&range);
2224
2225         if (src_folio) {
2226                 folio_lock(src_folio);
2227
2228                 /*
2229                  * split_huge_page walks the anon_vma chain without the page
2230                  * lock. Serialize against it with the anon_vma lock, the page
2231                  * lock is not enough.
2232                  */
2233                 src_anon_vma = folio_get_anon_vma(src_folio);
2234                 if (!src_anon_vma) {
2235                         err = -EAGAIN;
2236                         goto unlock_folio;
2237                 }
2238                 anon_vma_lock_write(src_anon_vma);
2239         } else
2240                 src_anon_vma = NULL;
2241
2242         dst_ptl = pmd_lockptr(mm, dst_pmd);
2243         double_pt_lock(src_ptl, dst_ptl);
2244         if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2245                      !pmd_same(*dst_pmd, dst_pmdval))) {
2246                 err = -EAGAIN;
2247                 goto unlock_ptls;
2248         }
2249         if (src_folio) {
2250                 if (folio_maybe_dma_pinned(src_folio) ||
2251                     !PageAnonExclusive(&src_folio->page)) {
2252                         err = -EBUSY;
2253                         goto unlock_ptls;
2254                 }
2255
2256                 if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2257                     WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2258                         err = -EBUSY;
2259                         goto unlock_ptls;
2260                 }
2261
2262                 folio_move_anon_rmap(src_folio, dst_vma);
2263                 WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
2264
2265                 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2266                 /* Folio got pinned from under us. Put it back and fail the move. */
2267                 if (folio_maybe_dma_pinned(src_folio)) {
2268                         set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2269                         err = -EBUSY;
2270                         goto unlock_ptls;
2271                 }
2272
2273                 _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
2274                 /* Follow mremap() behavior and treat the entry dirty after the move */
2275                 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2276         } else {
2277                 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2278                 _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
2279         }
2280         set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2281
2282         src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2283         pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2284 unlock_ptls:
2285         double_pt_unlock(src_ptl, dst_ptl);
2286         if (src_anon_vma) {
2287                 anon_vma_unlock_write(src_anon_vma);
2288                 put_anon_vma(src_anon_vma);
2289         }
2290 unlock_folio:
2291         /* unblock rmap walks */
2292         if (src_folio)
2293                 folio_unlock(src_folio);
2294         mmu_notifier_invalidate_range_end(&range);
2295         if (src_folio)
2296                 folio_put(src_folio);
2297         return err;
2298 }
2299 #endif /* CONFIG_USERFAULTFD */
2300
2301 /*
2302  * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
2303  *
2304  * Note that if it returns page table lock pointer, this routine returns without
2305  * unlocking page table lock. So callers must unlock it.
2306  */
2307 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
2308 {
2309         spinlock_t *ptl;
2310         ptl = pmd_lock(vma->vm_mm, pmd);
2311         if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
2312                         pmd_devmap(*pmd)))
2313                 return ptl;
2314         spin_unlock(ptl);
2315         return NULL;
2316 }
2317
2318 /*
2319  * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
2320  *
2321  * Note that if it returns page table lock pointer, this routine returns without
2322  * unlocking page table lock. So callers must unlock it.
2323  */
2324 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2325 {
2326         spinlock_t *ptl;
2327
2328         ptl = pud_lock(vma->vm_mm, pud);
2329         if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
2330                 return ptl;
2331         spin_unlock(ptl);
2332         return NULL;
2333 }
2334
2335 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2336 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2337                  pud_t *pud, unsigned long addr)
2338 {
2339         spinlock_t *ptl;
2340
2341         ptl = __pud_trans_huge_lock(pud, vma);
2342         if (!ptl)
2343                 return 0;
2344
2345         pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
2346         tlb_remove_pud_tlb_entry(tlb, pud, addr);
2347         if (vma_is_special_huge(vma)) {
2348                 spin_unlock(ptl);
2349                 /* No zero page support yet */
2350         } else {
2351                 /* No support for anonymous PUD pages yet */
2352                 BUG();
2353         }
2354         return 1;
2355 }
2356
2357 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2358                 unsigned long haddr)
2359 {
2360         VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2361         VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2362         VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2363         VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2364
2365         count_vm_event(THP_SPLIT_PUD);
2366
2367         pudp_huge_clear_flush(vma, haddr, pud);
2368 }
2369
2370 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2371                 unsigned long address)
2372 {
2373         spinlock_t *ptl;
2374         struct mmu_notifier_range range;
2375
2376         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2377                                 address & HPAGE_PUD_MASK,
2378                                 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2379         mmu_notifier_invalidate_range_start(&range);
2380         ptl = pud_lock(vma->vm_mm, pud);
2381         if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2382                 goto out;
2383         __split_huge_pud_locked(vma, pud, range.start);
2384
2385 out:
2386         spin_unlock(ptl);
2387         mmu_notifier_invalidate_range_end(&range);
2388 }
2389 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2390
2391 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2392                 unsigned long haddr, pmd_t *pmd)
2393 {
2394         struct mm_struct *mm = vma->vm_mm;
2395         pgtable_t pgtable;
2396         pmd_t _pmd, old_pmd;
2397         unsigned long addr;
2398         pte_t *pte;
2399         int i;
2400
2401         /*
2402          * Leave pmd empty until pte is filled note that it is fine to delay
2403          * notification until mmu_notifier_invalidate_range_end() as we are
2404          * replacing a zero pmd write protected page with a zero pte write
2405          * protected page.
2406          *
2407          * See Documentation/mm/mmu_notifier.rst
2408          */
2409         old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2410
2411         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2412         pmd_populate(mm, &_pmd, pgtable);
2413
2414         pte = pte_offset_map(&_pmd, haddr);
2415         VM_BUG_ON(!pte);
2416         for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2417                 pte_t entry;
2418
2419                 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
2420                 entry = pte_mkspecial(entry);
2421                 if (pmd_uffd_wp(old_pmd))
2422                         entry = pte_mkuffd_wp(entry);
2423                 VM_BUG_ON(!pte_none(ptep_get(pte)));
2424                 set_pte_at(mm, addr, pte, entry);
2425                 pte++;
2426         }
2427         pte_unmap(pte - 1);
2428         smp_wmb(); /* make pte visible before pmd */
2429         pmd_populate(mm, pmd, pgtable);
2430 }
2431
2432 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2433                 unsigned long haddr, bool freeze)
2434 {
2435         struct mm_struct *mm = vma->vm_mm;
2436         struct folio *folio;
2437         struct page *page;
2438         pgtable_t pgtable;
2439         pmd_t old_pmd, _pmd;
2440         bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2441         bool anon_exclusive = false, dirty = false;
2442         unsigned long addr;
2443         pte_t *pte;
2444         int i;
2445
2446         VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2447         VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2448         VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2449         VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2450                                 && !pmd_devmap(*pmd));
2451
2452         count_vm_event(THP_SPLIT_PMD);
2453
2454         if (!vma_is_anonymous(vma)) {
2455                 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2456                 /*
2457                  * We are going to unmap this huge page. So
2458                  * just go ahead and zap it
2459                  */
2460                 if (arch_needs_pgtable_deposit())
2461                         zap_deposited_table(mm, pmd);
2462                 if (vma_is_special_huge(vma))
2463                         return;
2464                 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2465                         swp_entry_t entry;
2466
2467                         entry = pmd_to_swp_entry(old_pmd);
2468                         folio = pfn_swap_entry_folio(entry);
2469                 } else {
2470                         page = pmd_page(old_pmd);
2471                         folio = page_folio(page);
2472                         if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
2473                                 folio_mark_dirty(folio);
2474                         if (!folio_test_referenced(folio) && pmd_young(old_pmd))
2475                                 folio_set_referenced(folio);
2476                         folio_remove_rmap_pmd(folio, page, vma);
2477                         folio_put(folio);
2478                 }
2479                 add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
2480                 return;
2481         }
2482
2483         if (is_huge_zero_pmd(*pmd)) {
2484                 /*
2485                  * FIXME: Do we want to invalidate secondary mmu by calling
2486                  * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2487                  * inside __split_huge_pmd() ?
2488                  *
2489                  * We are going from a zero huge page write protected to zero
2490                  * small page also write protected so it does not seems useful
2491                  * to invalidate secondary mmu at this time.
2492                  */
2493                 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2494         }
2495
2496         /*
2497          * Up to this point the pmd is present and huge and userland has the
2498          * whole access to the hugepage during the split (which happens in
2499          * place). If we overwrite the pmd with the not-huge version pointing
2500          * to the pte here (which of course we could if all CPUs were bug
2501          * free), userland could trigger a small page size TLB miss on the
2502          * small sized TLB while the hugepage TLB entry is still established in
2503          * the huge TLB. Some CPU doesn't like that.
2504          * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2505          * 383 on page 105. Intel should be safe but is also warns that it's
2506          * only safe if the permission and cache attributes of the two entries
2507          * loaded in the two TLB is identical (which should be the case here).
2508          * But it is generally safer to never allow small and huge TLB entries
2509          * for the same virtual address to be loaded simultaneously. So instead
2510          * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2511          * current pmd notpresent (atomically because here the pmd_trans_huge
2512          * must remain set at all times on the pmd until the split is complete
2513          * for this pmd), then we flush the SMP TLB and finally we write the
2514          * non-huge version of the pmd entry with pmd_populate.
2515          */
2516         old_pmd = pmdp_invalidate(vma, haddr, pmd);
2517
2518         pmd_migration = is_pmd_migration_entry(old_pmd);
2519         if (unlikely(pmd_migration)) {
2520                 swp_entry_t entry;
2521
2522                 entry = pmd_to_swp_entry(old_pmd);
2523                 page = pfn_swap_entry_to_page(entry);
2524                 write = is_writable_migration_entry(entry);
2525                 if (PageAnon(page))
2526                         anon_exclusive = is_readable_exclusive_migration_entry(entry);
2527                 young = is_migration_entry_young(entry);
2528                 dirty = is_migration_entry_dirty(entry);
2529                 soft_dirty = pmd_swp_soft_dirty(old_pmd);
2530                 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2531         } else {
2532                 page = pmd_page(old_pmd);
2533                 folio = page_folio(page);
2534                 if (pmd_dirty(old_pmd)) {
2535                         dirty = true;
2536                         folio_set_dirty(folio);
2537                 }
2538                 write = pmd_write(old_pmd);
2539                 young = pmd_young(old_pmd);
2540                 soft_dirty = pmd_soft_dirty(old_pmd);
2541                 uffd_wp = pmd_uffd_wp(old_pmd);
2542
2543                 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
2544                 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
2545
2546                 /*
2547                  * Without "freeze", we'll simply split the PMD, propagating the
2548                  * PageAnonExclusive() flag for each PTE by setting it for
2549                  * each subpage -- no need to (temporarily) clear.
2550                  *
2551                  * With "freeze" we want to replace mapped pages by
2552                  * migration entries right away. This is only possible if we
2553                  * managed to clear PageAnonExclusive() -- see
2554                  * set_pmd_migration_entry().
2555                  *
2556                  * In case we cannot clear PageAnonExclusive(), split the PMD
2557                  * only and let try_to_migrate_one() fail later.
2558                  *
2559                  * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
2560                  */
2561                 anon_exclusive = PageAnonExclusive(page);
2562                 if (freeze && anon_exclusive &&
2563                     folio_try_share_anon_rmap_pmd(folio, page))
2564                         freeze = false;
2565                 if (!freeze) {
2566                         rmap_t rmap_flags = RMAP_NONE;
2567
2568                         folio_ref_add(folio, HPAGE_PMD_NR - 1);
2569                         if (anon_exclusive)
2570                                 rmap_flags |= RMAP_EXCLUSIVE;
2571                         folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
2572                                                  vma, haddr, rmap_flags);
2573                 }
2574         }
2575
2576         /*
2577          * Withdraw the table only after we mark the pmd entry invalid.
2578          * This's critical for some architectures (Power).
2579          */
2580         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2581         pmd_populate(mm, &_pmd, pgtable);
2582
2583         pte = pte_offset_map(&_pmd, haddr);
2584         VM_BUG_ON(!pte);
2585
2586         /*
2587          * Note that NUMA hinting access restrictions are not transferred to
2588          * avoid any possibility of altering permissions across VMAs.
2589          */
2590         if (freeze || pmd_migration) {
2591                 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2592                         pte_t entry;
2593                         swp_entry_t swp_entry;
2594
2595                         if (write)
2596                                 swp_entry = make_writable_migration_entry(
2597                                                         page_to_pfn(page + i));
2598                         else if (anon_exclusive)
2599                                 swp_entry = make_readable_exclusive_migration_entry(
2600                                                         page_to_pfn(page + i));
2601                         else
2602                                 swp_entry = make_readable_migration_entry(
2603                                                         page_to_pfn(page + i));
2604                         if (young)
2605                                 swp_entry = make_migration_entry_young(swp_entry);
2606                         if (dirty)
2607                                 swp_entry = make_migration_entry_dirty(swp_entry);
2608                         entry = swp_entry_to_pte(swp_entry);
2609                         if (soft_dirty)
2610                                 entry = pte_swp_mksoft_dirty(entry);
2611                         if (uffd_wp)
2612                                 entry = pte_swp_mkuffd_wp(entry);
2613
2614                         VM_WARN_ON(!pte_none(ptep_get(pte + i)));
2615                         set_pte_at(mm, addr, pte + i, entry);
2616                 }
2617         } else {
2618                 pte_t entry;
2619
2620                 entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
2621                 if (write)
2622                         entry = pte_mkwrite(entry, vma);
2623                 if (!young)
2624                         entry = pte_mkold(entry);
2625                 /* NOTE: this may set soft-dirty too on some archs */
2626                 if (dirty)
2627                         entry = pte_mkdirty(entry);
2628                 if (soft_dirty)
2629                         entry = pte_mksoft_dirty(entry);
2630                 if (uffd_wp)
2631                         entry = pte_mkuffd_wp(entry);
2632
2633                 for (i = 0; i < HPAGE_PMD_NR; i++)
2634                         VM_WARN_ON(!pte_none(ptep_get(pte + i)));
2635
2636                 set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
2637         }
2638         pte_unmap(pte);
2639
2640         if (!pmd_migration)
2641                 folio_remove_rmap_pmd(folio, page, vma);
2642         if (freeze)
2643                 put_page(page);
2644
2645         smp_wmb(); /* make pte visible before pmd */
2646         pmd_populate(mm, pmd, pgtable);
2647 }
2648
2649 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2650                 unsigned long address, bool freeze, struct folio *folio)
2651 {
2652         spinlock_t *ptl;
2653         struct mmu_notifier_range range;
2654
2655         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2656                                 address & HPAGE_PMD_MASK,
2657                                 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2658         mmu_notifier_invalidate_range_start(&range);
2659         ptl = pmd_lock(vma->vm_mm, pmd);
2660
2661         /*
2662          * If caller asks to setup a migration entry, we need a folio to check
2663          * pmd against. Otherwise we can end up replacing wrong folio.
2664          */
2665         VM_BUG_ON(freeze && !folio);
2666         VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2667
2668         if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2669             is_pmd_migration_entry(*pmd)) {
2670                 /*
2671                  * It's safe to call pmd_page when folio is set because it's
2672                  * guaranteed that pmd is present.
2673                  */
2674                 if (folio && folio != page_folio(pmd_page(*pmd)))
2675                         goto out;
2676                 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
2677         }
2678
2679 out:
2680         spin_unlock(ptl);
2681         mmu_notifier_invalidate_range_end(&range);
2682 }
2683
2684 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2685                 bool freeze, struct folio *folio)
2686 {
2687         pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
2688
2689         if (!pmd)
2690                 return;
2691
2692         __split_huge_pmd(vma, pmd, address, freeze, folio);
2693 }
2694
2695 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2696 {
2697         /*
2698          * If the new address isn't hpage aligned and it could previously
2699          * contain an hugepage: check if we need to split an huge pmd.
2700          */
2701         if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2702             range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2703                          ALIGN(address, HPAGE_PMD_SIZE)))
2704                 split_huge_pmd_address(vma, address, false, NULL);
2705 }
2706
2707 void vma_adjust_trans_huge(struct vm_area_struct *vma,
2708                              unsigned long start,
2709                              unsigned long end,
2710                              long adjust_next)
2711 {
2712         /* Check if we need to split start first. */
2713         split_huge_pmd_if_needed(vma, start);
2714
2715         /* Check if we need to split end next. */
2716         split_huge_pmd_if_needed(vma, end);
2717
2718         /*
2719          * If we're also updating the next vma vm_start,
2720          * check if we need to split it.
2721          */
2722         if (adjust_next > 0) {
2723                 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
2724                 unsigned long nstart = next->vm_start;
2725                 nstart += adjust_next;
2726                 split_huge_pmd_if_needed(next, nstart);
2727         }
2728 }
2729
2730 static void unmap_folio(struct folio *folio)
2731 {
2732         enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC |
2733                 TTU_BATCH_FLUSH;
2734
2735         VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2736
2737         if (folio_test_pmd_mappable(folio))
2738                 ttu_flags |= TTU_SPLIT_HUGE_PMD;
2739
2740         /*
2741          * Anon pages need migration entries to preserve them, but file
2742          * pages can simply be left unmapped, then faulted back on demand.
2743          * If that is ever changed (perhaps for mlock), update remap_page().
2744          */
2745         if (folio_test_anon(folio))
2746                 try_to_migrate(folio, ttu_flags);
2747         else
2748                 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
2749
2750         try_to_unmap_flush();
2751 }
2752
2753 static void remap_page(struct folio *folio, unsigned long nr)
2754 {
2755         int i = 0;
2756
2757         /* If unmap_folio() uses try_to_migrate() on file, remove this check */
2758         if (!folio_test_anon(folio))
2759                 return;
2760         for (;;) {
2761                 remove_migration_ptes(folio, folio, true);
2762                 i += folio_nr_pages(folio);
2763                 if (i >= nr)
2764                         break;
2765                 folio = folio_next(folio);
2766         }
2767 }
2768
2769 static void lru_add_page_tail(struct page *head, struct page *tail,
2770                 struct lruvec *lruvec, struct list_head *list)
2771 {
2772         VM_BUG_ON_PAGE(!PageHead(head), head);
2773         VM_BUG_ON_PAGE(PageLRU(tail), head);
2774         lockdep_assert_held(&lruvec->lru_lock);
2775
2776         if (list) {
2777                 /* page reclaim is reclaiming a huge page */
2778                 VM_WARN_ON(PageLRU(head));
2779                 get_page(tail);
2780                 list_add_tail(&tail->lru, list);
2781         } else {
2782                 /* head is still on lru (and we have it frozen) */
2783                 VM_WARN_ON(!PageLRU(head));
2784                 if (PageUnevictable(tail))
2785                         tail->mlock_count = 0;
2786                 else
2787                         list_add_tail(&tail->lru, &head->lru);
2788                 SetPageLRU(tail);
2789         }
2790 }
2791
2792 static void __split_huge_page_tail(struct folio *folio, int tail,
2793                 struct lruvec *lruvec, struct list_head *list,
2794                 unsigned int new_order)
2795 {
2796         struct page *head = &folio->page;
2797         struct page *page_tail = head + tail;
2798         /*
2799          * Careful: new_folio is not a "real" folio before we cleared PageTail.
2800          * Don't pass it around before clear_compound_head().
2801          */
2802         struct folio *new_folio = (struct folio *)page_tail;
2803
2804         VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2805
2806         /*
2807          * Clone page flags before unfreezing refcount.
2808          *
2809          * After successful get_page_unless_zero() might follow flags change,
2810          * for example lock_page() which set PG_waiters.
2811          *
2812          * Note that for mapped sub-pages of an anonymous THP,
2813          * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
2814          * the migration entry instead from where remap_page() will restore it.
2815          * We can still have PG_anon_exclusive set on effectively unmapped and
2816          * unreferenced sub-pages of an anonymous THP: we can simply drop
2817          * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
2818          */
2819         page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2820         page_tail->flags |= (head->flags &
2821                         ((1L << PG_referenced) |
2822                          (1L << PG_swapbacked) |
2823                          (1L << PG_swapcache) |
2824                          (1L << PG_mlocked) |
2825                          (1L << PG_uptodate) |
2826                          (1L << PG_active) |
2827                          (1L << PG_workingset) |
2828                          (1L << PG_locked) |
2829                          (1L << PG_unevictable) |
2830 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
2831                          (1L << PG_arch_2) |
2832                          (1L << PG_arch_3) |
2833 #endif
2834                          (1L << PG_dirty) |
2835                          LRU_GEN_MASK | LRU_REFS_MASK));
2836
2837         /* ->mapping in first and second tail page is replaced by other uses */
2838         VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2839                         page_tail);
2840         page_tail->mapping = head->mapping;
2841         page_tail->index = head->index + tail;
2842
2843         /*
2844          * page->private should not be set in tail pages. Fix up and warn once
2845          * if private is unexpectedly set.
2846          */
2847         if (unlikely(page_tail->private)) {
2848                 VM_WARN_ON_ONCE_PAGE(true, page_tail);
2849                 page_tail->private = 0;
2850         }
2851         if (folio_test_swapcache(folio))
2852                 new_folio->swap.val = folio->swap.val + tail;
2853
2854         /* Page flags must be visible before we make the page non-compound. */
2855         smp_wmb();
2856
2857         /*
2858          * Clear PageTail before unfreezing page refcount.
2859          *
2860          * After successful get_page_unless_zero() might follow put_page()
2861          * which needs correct compound_head().
2862          */
2863         clear_compound_head(page_tail);
2864         if (new_order) {
2865                 prep_compound_page(page_tail, new_order);
2866                 folio_prep_large_rmappable(new_folio);
2867         }
2868
2869         /* Finally unfreeze refcount. Additional reference from page cache. */
2870         page_ref_unfreeze(page_tail,
2871                 1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ?
2872                              folio_nr_pages(new_folio) : 0));
2873
2874         if (folio_test_young(folio))
2875                 folio_set_young(new_folio);
2876         if (folio_test_idle(folio))
2877                 folio_set_idle(new_folio);
2878
2879         folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
2880
2881         /*
2882          * always add to the tail because some iterators expect new
2883          * pages to show after the currently processed elements - e.g.
2884          * migrate_pages
2885          */
2886         lru_add_page_tail(head, page_tail, lruvec, list);
2887 }
2888
2889 static void __split_huge_page(struct page *page, struct list_head *list,
2890                 pgoff_t end, unsigned int new_order)
2891 {
2892         struct folio *folio = page_folio(page);
2893         struct page *head = &folio->page;
2894         struct lruvec *lruvec;
2895         struct address_space *swap_cache = NULL;
2896         unsigned long offset = 0;
2897         int i, nr_dropped = 0;
2898         unsigned int new_nr = 1 << new_order;
2899         int order = folio_order(folio);
2900         unsigned int nr = 1 << order;
2901
2902         /* complete memcg works before add pages to LRU */
2903         split_page_memcg(head, order, new_order);
2904
2905         if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
2906                 offset = swp_offset(folio->swap);
2907                 swap_cache = swap_address_space(folio->swap);
2908                 xa_lock(&swap_cache->i_pages);
2909         }
2910
2911         /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
2912         lruvec = folio_lruvec_lock(folio);
2913
2914         ClearPageHasHWPoisoned(head);
2915
2916         for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
2917                 __split_huge_page_tail(folio, i, lruvec, list, new_order);
2918                 /* Some pages can be beyond EOF: drop them from page cache */
2919                 if (head[i].index >= end) {
2920                         struct folio *tail = page_folio(head + i);
2921
2922                         if (shmem_mapping(folio->mapping))
2923                                 nr_dropped++;
2924                         else if (folio_test_clear_dirty(tail))
2925                                 folio_account_cleaned(tail,
2926                                         inode_to_wb(folio->mapping->host));
2927                         __filemap_remove_folio(tail, NULL);
2928                         folio_put(tail);
2929                 } else if (!PageAnon(page)) {
2930                         __xa_store(&folio->mapping->i_pages, head[i].index,
2931                                         head + i, 0);
2932                 } else if (swap_cache) {
2933                         __xa_store(&swap_cache->i_pages, offset + i,
2934                                         head + i, 0);
2935                 }
2936         }
2937
2938         if (!new_order)
2939                 ClearPageCompound(head);
2940         else {
2941                 struct folio *new_folio = (struct folio *)head;
2942
2943                 folio_set_order(new_folio, new_order);
2944         }
2945         unlock_page_lruvec(lruvec);
2946         /* Caller disabled irqs, so they are still disabled here */
2947
2948         split_page_owner(head, order, new_order);
2949
2950         /* See comment in __split_huge_page_tail() */
2951         if (folio_test_anon(folio)) {
2952                 /* Additional pin to swap cache */
2953                 if (folio_test_swapcache(folio)) {
2954                         folio_ref_add(folio, 1 + new_nr);
2955                         xa_unlock(&swap_cache->i_pages);
2956                 } else {
2957                         folio_ref_inc(folio);
2958                 }
2959         } else {
2960                 /* Additional pin to page cache */
2961                 folio_ref_add(folio, 1 + new_nr);
2962                 xa_unlock(&folio->mapping->i_pages);
2963         }
2964         local_irq_enable();
2965
2966         if (nr_dropped)
2967                 shmem_uncharge(folio->mapping->host, nr_dropped);
2968         remap_page(folio, nr);
2969
2970         if (folio_test_swapcache(folio))
2971                 split_swap_cluster(folio->swap);
2972
2973         /*
2974          * set page to its compound_head when split to non order-0 pages, so
2975          * we can skip unlocking it below, since PG_locked is transferred to
2976          * the compound_head of the page and the caller will unlock it.
2977          */
2978         if (new_order)
2979                 page = compound_head(page);
2980
2981         for (i = 0; i < nr; i += new_nr) {
2982                 struct page *subpage = head + i;
2983                 struct folio *new_folio = page_folio(subpage);
2984                 if (subpage == page)
2985                         continue;
2986                 folio_unlock(new_folio);
2987
2988                 /*
2989                  * Subpages may be freed if there wasn't any mapping
2990                  * like if add_to_swap() is running on a lru page that
2991                  * had its mapping zapped. And freeing these pages
2992                  * requires taking the lru_lock so we do the put_page
2993                  * of the tail pages after the split is complete.
2994                  */
2995                 free_page_and_swap_cache(subpage);
2996         }
2997 }
2998
2999 /* Racy check whether the huge page can be split */
3000 bool can_split_folio(struct folio *folio, int *pextra_pins)
3001 {
3002         int extra_pins;
3003
3004         /* Additional pins from page cache */
3005         if (folio_test_anon(folio))
3006                 extra_pins = folio_test_swapcache(folio) ?
3007                                 folio_nr_pages(folio) : 0;
3008         else
3009                 extra_pins = folio_nr_pages(folio);
3010         if (pextra_pins)
3011                 *pextra_pins = extra_pins;
3012         return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
3013 }
3014
3015 /*
3016  * This function splits huge page into pages in @new_order. @page can point to
3017  * any subpage of huge page to split. Split doesn't change the position of
3018  * @page.
3019  *
3020  * NOTE: order-1 anonymous folio is not supported because _deferred_list,
3021  * which is used by partially mapped folios, is stored in subpage 2 and an
3022  * order-1 folio only has subpage 0 and 1. File-backed order-1 folios are OK,
3023  * since they do not use _deferred_list.
3024  *
3025  * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
3026  * The huge page must be locked.
3027  *
3028  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
3029  *
3030  * Pages in new_order will inherit mapping, flags, and so on from the hugepage.
3031  *
3032  * GUP pin and PG_locked transferred to @page or the compound page @page belongs
3033  * to. Rest subpages can be freed if they are not mapped.
3034  *
3035  * Returns 0 if the hugepage is split successfully.
3036  * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
3037  * us.
3038  */
3039 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3040                                      unsigned int new_order)
3041 {
3042         struct folio *folio = page_folio(page);
3043         struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3044         /* reset xarray order to new order after split */
3045         XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
3046         struct anon_vma *anon_vma = NULL;
3047         struct address_space *mapping = NULL;
3048         int extra_pins, ret;
3049         pgoff_t end;
3050         bool is_hzp;
3051
3052         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3053         VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
3054
3055         if (new_order >= folio_order(folio))
3056                 return -EINVAL;
3057
3058         /* Cannot split anonymous THP to order-1 */
3059         if (new_order == 1 && folio_test_anon(folio)) {
3060                 VM_WARN_ONCE(1, "Cannot split to order-1 folio");
3061                 return -EINVAL;
3062         }
3063
3064         if (new_order) {
3065                 /* Only swapping a whole PMD-mapped folio is supported */
3066                 if (folio_test_swapcache(folio))
3067                         return -EINVAL;
3068                 /* Split shmem folio to non-zero order not supported */
3069                 if (shmem_mapping(folio->mapping)) {
3070                         VM_WARN_ONCE(1,
3071                                 "Cannot split shmem folio to non-0 order");
3072                         return -EINVAL;
3073                 }
3074                 /* No split if the file system does not support large folio */
3075                 if (!mapping_large_folio_support(folio->mapping)) {
3076                         VM_WARN_ONCE(1,
3077                                 "Cannot split file folio to non-0 order");
3078                         return -EINVAL;
3079                 }
3080         }
3081
3082
3083         is_hzp = is_huge_zero_page(&folio->page);
3084         if (is_hzp) {
3085                 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
3086                 return -EBUSY;
3087         }
3088
3089         if (folio_test_writeback(folio))
3090                 return -EBUSY;
3091
3092         if (folio_test_anon(folio)) {
3093                 /*
3094                  * The caller does not necessarily hold an mmap_lock that would
3095                  * prevent the anon_vma disappearing so we first we take a
3096                  * reference to it and then lock the anon_vma for write. This
3097                  * is similar to folio_lock_anon_vma_read except the write lock
3098                  * is taken to serialise against parallel split or collapse
3099                  * operations.
3100                  */
3101                 anon_vma = folio_get_anon_vma(folio);
3102                 if (!anon_vma) {
3103                         ret = -EBUSY;
3104                         goto out;
3105                 }
3106                 end = -1;
3107                 mapping = NULL;
3108                 anon_vma_lock_write(anon_vma);
3109         } else {
3110                 gfp_t gfp;
3111
3112                 mapping = folio->mapping;
3113
3114                 /* Truncated ? */
3115                 if (!mapping) {
3116                         ret = -EBUSY;
3117                         goto out;
3118                 }
3119
3120                 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
3121                                                         GFP_RECLAIM_MASK);
3122
3123                 if (!filemap_release_folio(folio, gfp)) {
3124                         ret = -EBUSY;
3125                         goto out;
3126                 }
3127
3128                 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
3129                 if (xas_error(&xas)) {
3130                         ret = xas_error(&xas);
3131                         goto out;
3132                 }
3133
3134                 anon_vma = NULL;
3135                 i_mmap_lock_read(mapping);
3136
3137                 /*
3138                  *__split_huge_page() may need to trim off pages beyond EOF:
3139                  * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
3140                  * which cannot be nested inside the page tree lock. So note
3141                  * end now: i_size itself may be changed at any moment, but
3142                  * folio lock is good enough to serialize the trimming.
3143                  */
3144                 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3145                 if (shmem_mapping(mapping))
3146                         end = shmem_fallocend(mapping->host, end);
3147         }
3148
3149         /*
3150          * Racy check if we can split the page, before unmap_folio() will
3151          * split PMDs
3152          */
3153         if (!can_split_folio(folio, &extra_pins)) {
3154                 ret = -EAGAIN;
3155                 goto out_unlock;
3156         }
3157
3158         unmap_folio(folio);
3159
3160         /* block interrupt reentry in xa_lock and spinlock */
3161         local_irq_disable();
3162         if (mapping) {
3163                 /*
3164                  * Check if the folio is present in page cache.
3165                  * We assume all tail are present too, if folio is there.
3166                  */
3167                 xas_lock(&xas);
3168                 xas_reset(&xas);
3169                 if (xas_load(&xas) != folio)
3170                         goto fail;
3171         }
3172
3173         /* Prevent deferred_split_scan() touching ->_refcount */
3174         spin_lock(&ds_queue->split_queue_lock);
3175         if (folio_ref_freeze(folio, 1 + extra_pins)) {
3176                 if (folio_order(folio) > 1 &&
3177                     !list_empty(&folio->_deferred_list)) {
3178                         ds_queue->split_queue_len--;
3179                         /*
3180                          * Reinitialize page_deferred_list after removing the
3181                          * page from the split_queue, otherwise a subsequent
3182                          * split will see list corruption when checking the
3183                          * page_deferred_list.
3184                          */
3185                         list_del_init(&folio->_deferred_list);
3186                 }
3187                 spin_unlock(&ds_queue->split_queue_lock);
3188                 if (mapping) {
3189                         int nr = folio_nr_pages(folio);
3190
3191                         xas_split(&xas, folio, folio_order(folio));
3192                         if (folio_test_pmd_mappable(folio) &&
3193                             new_order < HPAGE_PMD_ORDER) {
3194                                 if (folio_test_swapbacked(folio)) {
3195                                         __lruvec_stat_mod_folio(folio,
3196                                                         NR_SHMEM_THPS, -nr);
3197                                 } else {
3198                                         __lruvec_stat_mod_folio(folio,
3199                                                         NR_FILE_THPS, -nr);
3200                                         filemap_nr_thps_dec(mapping);
3201                                 }
3202                         }
3203                 }
3204
3205                 __split_huge_page(page, list, end, new_order);
3206                 ret = 0;
3207         } else {
3208                 spin_unlock(&ds_queue->split_queue_lock);
3209 fail:
3210                 if (mapping)
3211                         xas_unlock(&xas);
3212                 local_irq_enable();
3213                 remap_page(folio, folio_nr_pages(folio));
3214                 ret = -EAGAIN;
3215         }
3216
3217 out_unlock:
3218         if (anon_vma) {
3219                 anon_vma_unlock_write(anon_vma);
3220                 put_anon_vma(anon_vma);
3221         }
3222         if (mapping)
3223                 i_mmap_unlock_read(mapping);
3224 out:
3225         xas_destroy(&xas);
3226         count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3227         return ret;
3228 }
3229
3230 void folio_undo_large_rmappable(struct folio *folio)
3231 {
3232         struct deferred_split *ds_queue;
3233         unsigned long flags;
3234
3235         if (folio_order(folio) <= 1)
3236                 return;
3237
3238         /*
3239          * At this point, there is no one trying to add the folio to
3240          * deferred_list. If folio is not in deferred_list, it's safe
3241          * to check without acquiring the split_queue_lock.
3242          */
3243         if (data_race(list_empty(&folio->_deferred_list)))
3244                 return;
3245
3246         ds_queue = get_deferred_split_queue(folio);
3247         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3248         if (!list_empty(&folio->_deferred_list)) {
3249                 ds_queue->split_queue_len--;
3250                 list_del_init(&folio->_deferred_list);
3251         }
3252         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3253 }
3254
3255 void deferred_split_folio(struct folio *folio)
3256 {
3257         struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3258 #ifdef CONFIG_MEMCG
3259         struct mem_cgroup *memcg = folio_memcg(folio);
3260 #endif
3261         unsigned long flags;
3262
3263         /*
3264          * Order 1 folios have no space for a deferred list, but we also
3265          * won't waste much memory by not adding them to the deferred list.
3266          */
3267         if (folio_order(folio) <= 1)
3268                 return;
3269
3270         /*
3271          * The try_to_unmap() in page reclaim path might reach here too,
3272          * this may cause a race condition to corrupt deferred split queue.
3273          * And, if page reclaim is already handling the same folio, it is
3274          * unnecessary to handle it again in shrinker.
3275          *
3276          * Check the swapcache flag to determine if the folio is being
3277          * handled by page reclaim since THP swap would add the folio into
3278          * swap cache before calling try_to_unmap().
3279          */
3280         if (folio_test_swapcache(folio))
3281                 return;
3282
3283         if (!list_empty(&folio->_deferred_list))
3284                 return;
3285
3286         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3287         if (list_empty(&folio->_deferred_list)) {
3288                 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
3289                 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
3290                 ds_queue->split_queue_len++;
3291 #ifdef CONFIG_MEMCG
3292                 if (memcg)
3293                         set_shrinker_bit(memcg, folio_nid(folio),
3294                                          deferred_split_shrinker->id);
3295 #endif
3296         }
3297         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3298 }
3299
3300 static unsigned long deferred_split_count(struct shrinker *shrink,
3301                 struct shrink_control *sc)
3302 {
3303         struct pglist_data *pgdata = NODE_DATA(sc->nid);
3304         struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3305
3306 #ifdef CONFIG_MEMCG
3307         if (sc->memcg)
3308                 ds_queue = &sc->memcg->deferred_split_queue;
3309 #endif
3310         return READ_ONCE(ds_queue->split_queue_len);
3311 }
3312
3313 static unsigned long deferred_split_scan(struct shrinker *shrink,
3314                 struct shrink_control *sc)
3315 {
3316         struct pglist_data *pgdata = NODE_DATA(sc->nid);
3317         struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3318         unsigned long flags;
3319         LIST_HEAD(list);
3320         struct folio *folio, *next;
3321         int split = 0;
3322
3323 #ifdef CONFIG_MEMCG
3324         if (sc->memcg)
3325                 ds_queue = &sc->memcg->deferred_split_queue;
3326 #endif
3327
3328         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3329         /* Take pin on all head pages to avoid freeing them under us */
3330         list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
3331                                                         _deferred_list) {
3332                 if (folio_try_get(folio)) {
3333                         list_move(&folio->_deferred_list, &list);
3334                 } else {
3335                         /* We lost race with folio_put() */
3336                         list_del_init(&folio->_deferred_list);
3337                         ds_queue->split_queue_len--;
3338                 }
3339                 if (!--sc->nr_to_scan)
3340                         break;
3341         }
3342         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3343
3344         list_for_each_entry_safe(folio, next, &list, _deferred_list) {
3345                 if (!folio_trylock(folio))
3346                         goto next;
3347                 /* split_huge_page() removes page from list on success */
3348                 if (!split_folio(folio))
3349                         split++;
3350                 folio_unlock(folio);
3351 next:
3352                 folio_put(folio);
3353         }
3354
3355         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3356         list_splice_tail(&list, &ds_queue->split_queue);
3357         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3358
3359         /*
3360          * Stop shrinker if we didn't split any page, but the queue is empty.
3361          * This can happen if pages were freed under us.
3362          */
3363         if (!split && list_empty(&ds_queue->split_queue))
3364                 return SHRINK_STOP;
3365         return split;
3366 }
3367
3368 #ifdef CONFIG_DEBUG_FS
3369 static void split_huge_pages_all(void)
3370 {
3371         struct zone *zone;
3372         struct page *page;
3373         struct folio *folio;
3374         unsigned long pfn, max_zone_pfn;
3375         unsigned long total = 0, split = 0;
3376
3377         pr_debug("Split all THPs\n");
3378         for_each_zone(zone) {
3379                 if (!managed_zone(zone))
3380                         continue;
3381                 max_zone_pfn = zone_end_pfn(zone);
3382                 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
3383                         int nr_pages;
3384
3385                         page = pfn_to_online_page(pfn);
3386                         if (!page || PageTail(page))
3387                                 continue;
3388                         folio = page_folio(page);
3389                         if (!folio_try_get(folio))
3390                                 continue;
3391
3392                         if (unlikely(page_folio(page) != folio))
3393                                 goto next;
3394
3395                         if (zone != folio_zone(folio))
3396                                 goto next;
3397
3398                         if (!folio_test_large(folio)
3399                                 || folio_test_hugetlb(folio)
3400                                 || !folio_test_lru(folio))
3401                                 goto next;
3402
3403                         total++;
3404                         folio_lock(folio);
3405                         nr_pages = folio_nr_pages(folio);
3406                         if (!split_folio(folio))
3407                                 split++;
3408                         pfn += nr_pages - 1;
3409                         folio_unlock(folio);
3410 next:
3411                         folio_put(folio);
3412                         cond_resched();
3413                 }
3414         }
3415
3416         pr_debug("%lu of %lu THP split\n", split, total);
3417 }
3418
3419 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
3420 {
3421         return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
3422                     is_vm_hugetlb_page(vma);
3423 }
3424
3425 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
3426                                 unsigned long vaddr_end, unsigned int new_order)
3427 {
3428         int ret = 0;
3429         struct task_struct *task;
3430         struct mm_struct *mm;
3431         unsigned long total = 0, split = 0;
3432         unsigned long addr;
3433
3434         vaddr_start &= PAGE_MASK;
3435         vaddr_end &= PAGE_MASK;
3436
3437         /* Find the task_struct from pid */
3438         rcu_read_lock();
3439         task = find_task_by_vpid(pid);
3440         if (!task) {
3441                 rcu_read_unlock();
3442                 ret = -ESRCH;
3443                 goto out;
3444         }
3445         get_task_struct(task);
3446         rcu_read_unlock();
3447
3448         /* Find the mm_struct */
3449         mm = get_task_mm(task);
3450         put_task_struct(task);
3451
3452         if (!mm) {
3453                 ret = -EINVAL;
3454                 goto out;
3455         }
3456
3457         pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3458                  pid, vaddr_start, vaddr_end);
3459
3460         mmap_read_lock(mm);
3461         /*
3462          * always increase addr by PAGE_SIZE, since we could have a PTE page
3463          * table filled with PTE-mapped THPs, each of which is distinct.
3464          */
3465         for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
3466                 struct vm_area_struct *vma = vma_lookup(mm, addr);
3467                 struct page *page;
3468                 struct folio *folio;
3469
3470                 if (!vma)
3471                         break;
3472
3473                 /* skip special VMA and hugetlb VMA */
3474                 if (vma_not_suitable_for_thp_split(vma)) {
3475                         addr = vma->vm_end;
3476                         continue;
3477                 }
3478
3479                 /* FOLL_DUMP to ignore special (like zero) pages */
3480                 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
3481
3482                 if (IS_ERR_OR_NULL(page))
3483                         continue;
3484
3485                 folio = page_folio(page);
3486                 if (!is_transparent_hugepage(folio))
3487                         goto next;
3488
3489                 if (new_order >= folio_order(folio))
3490                         goto next;
3491
3492                 total++;
3493                 /*
3494                  * For folios with private, split_huge_page_to_list_to_order()
3495                  * will try to drop it before split and then check if the folio
3496                  * can be split or not. So skip the check here.
3497                  */
3498                 if (!folio_test_private(folio) &&
3499                     !can_split_folio(folio, NULL))
3500                         goto next;
3501
3502                 if (!folio_trylock(folio))
3503                         goto next;
3504
3505                 if (!split_folio_to_order(folio, new_order))
3506                         split++;
3507
3508                 folio_unlock(folio);
3509 next:
3510                 folio_put(folio);
3511                 cond_resched();
3512         }
3513         mmap_read_unlock(mm);
3514         mmput(mm);
3515
3516         pr_debug("%lu of %lu THP split\n", split, total);
3517
3518 out:
3519         return ret;
3520 }
3521
3522 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3523                                 pgoff_t off_end, unsigned int new_order)
3524 {
3525         struct filename *file;
3526         struct file *candidate;
3527         struct address_space *mapping;
3528         int ret = -EINVAL;
3529         pgoff_t index;
3530         int nr_pages = 1;
3531         unsigned long total = 0, split = 0;
3532
3533         file = getname_kernel(file_path);
3534         if (IS_ERR(file))
3535                 return ret;
3536
3537         candidate = file_open_name(file, O_RDONLY, 0);
3538         if (IS_ERR(candidate))
3539                 goto out;
3540
3541         pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3542                  file_path, off_start, off_end);
3543
3544         mapping = candidate->f_mapping;
3545
3546         for (index = off_start; index < off_end; index += nr_pages) {
3547                 struct folio *folio = filemap_get_folio(mapping, index);
3548
3549                 nr_pages = 1;
3550                 if (IS_ERR(folio))
3551                         continue;
3552
3553                 if (!folio_test_large(folio))
3554                         goto next;
3555
3556                 total++;
3557                 nr_pages = folio_nr_pages(folio);
3558
3559                 if (new_order >= folio_order(folio))
3560                         goto next;
3561
3562                 if (!folio_trylock(folio))
3563                         goto next;
3564
3565                 if (!split_folio_to_order(folio, new_order))
3566                         split++;
3567
3568                 folio_unlock(folio);
3569 next:
3570                 folio_put(folio);
3571                 cond_resched();
3572         }
3573
3574         filp_close(candidate, NULL);
3575         ret = 0;
3576
3577         pr_debug("%lu of %lu file-backed THP split\n", split, total);
3578 out:
3579         putname(file);
3580         return ret;
3581 }
3582
3583 #define MAX_INPUT_BUF_SZ 255
3584
3585 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3586                                 size_t count, loff_t *ppops)
3587 {
3588         static DEFINE_MUTEX(split_debug_mutex);
3589         ssize_t ret;
3590         /*
3591          * hold pid, start_vaddr, end_vaddr, new_order or
3592          * file_path, off_start, off_end, new_order
3593          */
3594         char input_buf[MAX_INPUT_BUF_SZ];
3595         int pid;
3596         unsigned long vaddr_start, vaddr_end;
3597         unsigned int new_order = 0;
3598
3599         ret = mutex_lock_interruptible(&split_debug_mutex);
3600         if (ret)
3601                 return ret;
3602
3603         ret = -EFAULT;
3604
3605         memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3606         if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3607                 goto out;
3608
3609         input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
3610
3611         if (input_buf[0] == '/') {
3612                 char *tok;
3613                 char *buf = input_buf;
3614                 char file_path[MAX_INPUT_BUF_SZ];
3615                 pgoff_t off_start = 0, off_end = 0;
3616                 size_t input_len = strlen(input_buf);
3617
3618                 tok = strsep(&buf, ",");
3619                 if (tok) {
3620                         strcpy(file_path, tok);
3621                 } else {
3622                         ret = -EINVAL;
3623                         goto out;
3624                 }
3625
3626                 ret = sscanf(buf, "0x%lx,0x%lx,%d", &off_start, &off_end, &new_order);
3627                 if (ret != 2 && ret != 3) {
3628                         ret = -EINVAL;
3629                         goto out;
3630                 }
3631                 ret = split_huge_pages_in_file(file_path, off_start, off_end, new_order);
3632                 if (!ret)
3633                         ret = input_len;
3634
3635                 goto out;
3636         }
3637
3638         ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d", &pid, &vaddr_start, &vaddr_end, &new_order);
3639         if (ret == 1 && pid == 1) {
3640                 split_huge_pages_all();
3641                 ret = strlen(input_buf);
3642                 goto out;
3643         } else if (ret != 3 && ret != 4) {
3644                 ret = -EINVAL;
3645                 goto out;
3646         }
3647
3648         ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order);
3649         if (!ret)
3650                 ret = strlen(input_buf);
3651 out:
3652         mutex_unlock(&split_debug_mutex);
3653         return ret;
3654
3655 }
3656
3657 static const struct file_operations split_huge_pages_fops = {
3658         .owner   = THIS_MODULE,
3659         .write   = split_huge_pages_write,
3660         .llseek  = no_llseek,
3661 };
3662
3663 static int __init split_huge_pages_debugfs(void)
3664 {
3665         debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3666                             &split_huge_pages_fops);
3667         return 0;
3668 }
3669 late_initcall(split_huge_pages_debugfs);
3670 #endif
3671
3672 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3673 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3674                 struct page *page)
3675 {
3676         struct folio *folio = page_folio(page);
3677         struct vm_area_struct *vma = pvmw->vma;
3678         struct mm_struct *mm = vma->vm_mm;
3679         unsigned long address = pvmw->address;
3680         bool anon_exclusive;
3681         pmd_t pmdval;
3682         swp_entry_t entry;
3683         pmd_t pmdswp;
3684
3685         if (!(pvmw->pmd && !pvmw->pte))
3686                 return 0;
3687
3688         flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
3689         pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
3690
3691         /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
3692         anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
3693         if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
3694                 set_pmd_at(mm, address, pvmw->pmd, pmdval);
3695                 return -EBUSY;
3696         }
3697
3698         if (pmd_dirty(pmdval))
3699                 folio_mark_dirty(folio);
3700         if (pmd_write(pmdval))
3701                 entry = make_writable_migration_entry(page_to_pfn(page));
3702         else if (anon_exclusive)
3703                 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
3704         else
3705                 entry = make_readable_migration_entry(page_to_pfn(page));
3706         if (pmd_young(pmdval))
3707                 entry = make_migration_entry_young(entry);
3708         if (pmd_dirty(pmdval))
3709                 entry = make_migration_entry_dirty(entry);
3710         pmdswp = swp_entry_to_pmd(entry);
3711         if (pmd_soft_dirty(pmdval))
3712                 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
3713         if (pmd_uffd_wp(pmdval))
3714                 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
3715         set_pmd_at(mm, address, pvmw->pmd, pmdswp);
3716         folio_remove_rmap_pmd(folio, page, vma);
3717         folio_put(folio);
3718         trace_set_migration_pmd(address, pmd_val(pmdswp));
3719
3720         return 0;
3721 }
3722
3723 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3724 {
3725         struct folio *folio = page_folio(new);
3726         struct vm_area_struct *vma = pvmw->vma;
3727         struct mm_struct *mm = vma->vm_mm;
3728         unsigned long address = pvmw->address;
3729         unsigned long haddr = address & HPAGE_PMD_MASK;
3730         pmd_t pmde;
3731         swp_entry_t entry;
3732
3733         if (!(pvmw->pmd && !pvmw->pte))
3734                 return;
3735
3736         entry = pmd_to_swp_entry(*pvmw->pmd);
3737         folio_get(folio);
3738         pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
3739         if (pmd_swp_soft_dirty(*pvmw->pmd))
3740                 pmde = pmd_mksoft_dirty(pmde);
3741         if (is_writable_migration_entry(entry))
3742                 pmde = pmd_mkwrite(pmde, vma);
3743         if (pmd_swp_uffd_wp(*pvmw->pmd))
3744                 pmde = pmd_mkuffd_wp(pmde);
3745         if (!is_migration_entry_young(entry))
3746                 pmde = pmd_mkold(pmde);
3747         /* NOTE: this may contain setting soft-dirty on some archs */
3748         if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
3749                 pmde = pmd_mkdirty(pmde);
3750
3751         if (folio_test_anon(folio)) {
3752                 rmap_t rmap_flags = RMAP_NONE;
3753
3754                 if (!is_readable_migration_entry(entry))
3755                         rmap_flags |= RMAP_EXCLUSIVE;
3756
3757                 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
3758         } else {
3759                 folio_add_file_rmap_pmd(folio, new, vma);
3760         }
3761         VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
3762         set_pmd_at(mm, haddr, pvmw->pmd, pmde);
3763
3764         /* No need to invalidate - it was non-present before */
3765         update_mmu_cache_pmd(vma, address, pvmw->pmd);
3766         trace_remove_migration_pmd(address, pmd_val(pmde));
3767 }
3768 #endif