1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 ARM Ltd.
5 #ifndef __ASM_PGTABLE_H
6 #define __ASM_PGTABLE_H
9 #include <asm/proc-fns.h>
11 #include <asm/memory.h>
13 #include <asm/pgtable-hwdef.h>
14 #include <asm/pgtable-prot.h>
15 #include <asm/tlbflush.h>
20 * VMALLOC_START: beginning of the kernel vmalloc space
21 * VMALLOC_END: extends to the available space below vmemmap
23 #define VMALLOC_START (MODULES_END)
24 #if VA_BITS == VA_BITS_MIN
25 #define VMALLOC_END (VMEMMAP_START - SZ_8M)
27 #define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT)
28 #define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M)
31 #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
35 #include <asm/cmpxchg.h>
36 #include <asm/fixmap.h>
37 #include <linux/mmdebug.h>
38 #include <linux/mm_types.h>
39 #include <linux/sched.h>
40 #include <linux/page_table_check.h>
42 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
43 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
45 /* Set stride and tlb_level in flush_*_tlb_range */
46 #define flush_pmd_tlb_range(vma, addr, end) \
47 __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
48 #define flush_pud_tlb_range(vma, addr, end) \
49 __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
50 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
52 static inline bool arch_thp_swp_supported(void)
54 return !system_supports_mte();
56 #define arch_thp_swp_supported arch_thp_swp_supported
59 * Outside of a few very special situations (e.g. hibernation), we always
60 * use broadcast TLB invalidation instructions, therefore a spurious page
61 * fault on one CPU which has been handled concurrently by another CPU
62 * does not need to perform additional invalidation.
64 #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
67 * ZERO_PAGE is a global shared page that is always zero: used
68 * for zero-mapped memory areas etc..
70 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
71 #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
73 #define pte_ERROR(e) \
74 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
77 * Macros to convert between a physical address and its placement in a
78 * page table entry, taking care of 52-bit addresses.
80 #ifdef CONFIG_ARM64_PA_BITS_52
81 static inline phys_addr_t __pte_to_phys(pte_t pte)
83 pte_val(pte) &= ~PTE_MAYBE_SHARED;
84 return (pte_val(pte) & PTE_ADDR_LOW) |
85 ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT);
87 static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
89 return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
92 #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW)
93 #define __phys_to_pte_val(phys) (phys)
96 #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
97 #define pfn_pte(pfn,prot) \
98 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
100 #define pte_none(pte) (!pte_val(pte))
101 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
102 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
105 * The following only work if pte_present(). Undefined behaviour otherwise.
107 #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
108 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
109 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
110 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
111 #define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY))
112 #define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
113 #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
114 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
115 #define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
116 #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
117 PTE_ATTRINDX(MT_NORMAL_TAGGED))
119 #define pte_cont_addr_end(addr, end) \
120 ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
121 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
124 #define pmd_cont_addr_end(addr, end) \
125 ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
126 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
129 #define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte))
130 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
131 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
133 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
135 * Execute-only user mappings do not have the PTE_USER bit set. All valid
136 * kernel mappings have the PTE_UXN bit set.
138 #define pte_valid_not_user(pte) \
139 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
141 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
142 * so that we don't erroneously return false for pages that have been
143 * remapped as PROT_NONE but are yet to be flushed from the TLB.
144 * Note that we can't make any assumptions based on the state of the access
145 * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
148 #define pte_accessible(mm, pte) \
149 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
152 * p??_access_permitted() is true for valid user mappings (PTE_USER
153 * bit set, subject to the write permission check). For execute-only
154 * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
155 * not set) must return false. PROT_NONE mappings do not have the
158 #define pte_access_permitted(pte, write) \
159 (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
160 #define pmd_access_permitted(pmd, write) \
161 (pte_access_permitted(pmd_pte(pmd), (write)))
162 #define pud_access_permitted(pud, write) \
163 (pte_access_permitted(pud_pte(pud), (write)))
165 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
167 pte_val(pte) &= ~pgprot_val(prot);
171 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
173 pte_val(pte) |= pgprot_val(prot);
177 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
179 pmd_val(pmd) &= ~pgprot_val(prot);
183 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
185 pmd_val(pmd) |= pgprot_val(prot);
189 static inline pte_t pte_mkwrite_novma(pte_t pte)
191 pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
192 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
196 static inline pte_t pte_mkclean(pte_t pte)
198 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
199 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
204 static inline pte_t pte_mkdirty(pte_t pte)
206 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
209 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
214 static inline pte_t pte_wrprotect(pte_t pte)
217 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
218 * clear), set the PTE_DIRTY bit.
220 if (pte_hw_dirty(pte))
221 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
223 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
224 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
228 static inline pte_t pte_mkold(pte_t pte)
230 return clear_pte_bit(pte, __pgprot(PTE_AF));
233 static inline pte_t pte_mkyoung(pte_t pte)
235 return set_pte_bit(pte, __pgprot(PTE_AF));
238 static inline pte_t pte_mkspecial(pte_t pte)
240 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
243 static inline pte_t pte_mkcont(pte_t pte)
245 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
246 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
249 static inline pte_t pte_mknoncont(pte_t pte)
251 return clear_pte_bit(pte, __pgprot(PTE_CONT));
254 static inline pte_t pte_mkpresent(pte_t pte)
256 return set_pte_bit(pte, __pgprot(PTE_VALID));
259 static inline pmd_t pmd_mkcont(pmd_t pmd)
261 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
264 static inline pte_t pte_mkdevmap(pte_t pte)
266 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
269 static inline void set_pte(pte_t *ptep, pte_t pte)
271 WRITE_ONCE(*ptep, pte);
274 * Only if the new pte is valid and kernel, otherwise TLB maintenance
275 * or update_mmu_cache() have the necessary barriers.
277 if (pte_valid_not_user(pte)) {
283 extern void __sync_icache_dcache(pte_t pteval);
284 bool pgattr_change_is_safe(u64 old, u64 new);
287 * PTE bits configuration in the presence of hardware Dirty Bit Management
288 * (PTE_WRITE == PTE_DBM):
290 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
296 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
297 * the page fault mechanism. Checking the dirty status of a pte becomes:
299 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
302 static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep,
307 if (!IS_ENABLED(CONFIG_DEBUG_VM))
310 old_pte = READ_ONCE(*ptep);
312 if (!pte_valid(old_pte) || !pte_valid(pte))
314 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
318 * Check for potential race with hardware updates of the pte
319 * (ptep_set_access_flags safely changes valid ptes without going
320 * through an invalid entry).
322 VM_WARN_ONCE(!pte_young(pte),
323 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
324 __func__, pte_val(old_pte), pte_val(pte));
325 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
326 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
327 __func__, pte_val(old_pte), pte_val(pte));
328 VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)),
329 "%s: unsafe attribute change: 0x%016llx -> 0x%016llx",
330 __func__, pte_val(old_pte), pte_val(pte));
333 static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
335 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
336 __sync_icache_dcache(pte);
339 * If the PTE would provide user space access to the tags associated
340 * with it then ensure that the MTE tags are synchronised. Although
341 * pte_access_permitted() returns false for exec only mappings, they
342 * don't expose tags (instruction fetches don't check tags).
344 if (system_supports_mte() && pte_access_permitted(pte, false) &&
345 !pte_special(pte) && pte_tagged(pte))
346 mte_sync_tags(pte, nr_pages);
349 static inline void set_ptes(struct mm_struct *mm,
350 unsigned long __always_unused addr,
351 pte_t *ptep, pte_t pte, unsigned int nr)
353 page_table_check_ptes_set(mm, ptep, pte, nr);
354 __sync_cache_and_tags(pte, nr);
357 __check_safe_pte_update(mm, ptep, pte);
362 pte_val(pte) += PAGE_SIZE;
365 #define set_ptes set_ptes
368 * Huge pte definitions.
370 #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
373 * Hugetlb definitions.
375 #define HUGE_MAX_HSTATE 4
376 #define HPAGE_SHIFT PMD_SHIFT
377 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
378 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
379 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
381 static inline pte_t pgd_pte(pgd_t pgd)
383 return __pte(pgd_val(pgd));
386 static inline pte_t p4d_pte(p4d_t p4d)
388 return __pte(p4d_val(p4d));
391 static inline pte_t pud_pte(pud_t pud)
393 return __pte(pud_val(pud));
396 static inline pud_t pte_pud(pte_t pte)
398 return __pud(pte_val(pte));
401 static inline pmd_t pud_pmd(pud_t pud)
403 return __pmd(pud_val(pud));
406 static inline pte_t pmd_pte(pmd_t pmd)
408 return __pte(pmd_val(pmd));
411 static inline pmd_t pte_pmd(pte_t pte)
413 return __pmd(pte_val(pte));
416 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
418 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
421 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
423 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
426 static inline pte_t pte_swp_mkexclusive(pte_t pte)
428 return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
431 static inline int pte_swp_exclusive(pte_t pte)
433 return pte_val(pte) & PTE_SWP_EXCLUSIVE;
436 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
438 return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
442 * Select all bits except the pfn
444 static inline pgprot_t pte_pgprot(pte_t pte)
446 unsigned long pfn = pte_pfn(pte);
448 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
451 #ifdef CONFIG_NUMA_BALANCING
453 * See the comment in include/linux/pgtable.h
455 static inline int pte_protnone(pte_t pte)
457 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
460 static inline int pmd_protnone(pmd_t pmd)
462 return pte_protnone(pmd_pte(pmd));
466 #define pmd_present_invalid(pmd) (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
468 static inline int pmd_present(pmd_t pmd)
470 return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
477 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
478 static inline int pmd_trans_huge(pmd_t pmd)
480 return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
482 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
484 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
485 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
486 #define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
487 #define pmd_user(pmd) pte_user(pmd_pte(pmd))
488 #define pmd_user_exec(pmd) pte_user_exec(pmd_pte(pmd))
489 #define pmd_cont(pmd) pte_cont(pmd_pte(pmd))
490 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
491 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
492 #define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
493 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
494 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
495 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
497 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
499 pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
500 pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
505 #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
507 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
509 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
511 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
512 #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
514 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
516 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
519 #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
520 #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
521 #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
522 #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
523 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
525 #define pud_young(pud) pte_young(pud_pte(pud))
526 #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
527 #define pud_write(pud) pte_write(pud_pte(pud))
529 #define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
531 #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
532 #define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
533 #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
534 #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
536 static inline void __set_pte_at(struct mm_struct *mm,
537 unsigned long __always_unused addr,
538 pte_t *ptep, pte_t pte, unsigned int nr)
540 __sync_cache_and_tags(pte, nr);
541 __check_safe_pte_update(mm, ptep, pte);
545 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
546 pmd_t *pmdp, pmd_t pmd)
548 page_table_check_pmd_set(mm, pmdp, pmd);
549 return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd),
550 PMD_SIZE >> PAGE_SHIFT);
553 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
554 pud_t *pudp, pud_t pud)
556 page_table_check_pud_set(mm, pudp, pud);
557 return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud),
558 PUD_SIZE >> PAGE_SHIFT);
561 #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
562 #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
564 #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
565 #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
567 #define __pgprot_modify(prot,mask,bits) \
568 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
570 #define pgprot_nx(prot) \
571 __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
574 * Mark the prot value as uncacheable and unbufferable.
576 #define pgprot_noncached(prot) \
577 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
578 #define pgprot_writecombine(prot) \
579 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
580 #define pgprot_device(prot) \
581 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
582 #define pgprot_tagged(prot) \
583 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
584 #define pgprot_mhp pgprot_tagged
586 * DMA allocations for non-coherent devices use what the Arm architecture calls
587 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
588 * and merging of writes. This is different from "Device-nGnR[nE]" memory which
589 * is intended for MMIO and thus forbids speculation, preserves access size,
590 * requires strict alignment and can also force write responses to come from the
593 #define pgprot_dmacoherent(prot) \
594 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \
595 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
597 #define __HAVE_PHYS_MEM_ACCESS_PROT
599 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
600 unsigned long size, pgprot_t vma_prot);
602 #define pmd_none(pmd) (!pmd_val(pmd))
604 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
606 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
608 #define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd))
609 #define pmd_bad(pmd) (!pmd_table(pmd))
611 #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
612 #define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
614 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
615 static inline bool pud_sect(pud_t pud) { return false; }
616 static inline bool pud_table(pud_t pud) { return true; }
618 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
620 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
624 extern pgd_t init_pg_dir[];
625 extern pgd_t init_pg_end[];
626 extern pgd_t swapper_pg_dir[];
627 extern pgd_t idmap_pg_dir[];
628 extern pgd_t tramp_pg_dir[];
629 extern pgd_t reserved_pg_dir[];
631 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
633 static inline bool in_swapper_pgdir(void *addr)
635 return ((unsigned long)addr & PAGE_MASK) ==
636 ((unsigned long)swapper_pg_dir & PAGE_MASK);
639 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
641 #ifdef __PAGETABLE_PMD_FOLDED
642 if (in_swapper_pgdir(pmdp)) {
643 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
646 #endif /* __PAGETABLE_PMD_FOLDED */
648 WRITE_ONCE(*pmdp, pmd);
650 if (pmd_valid(pmd)) {
656 static inline void pmd_clear(pmd_t *pmdp)
658 set_pmd(pmdp, __pmd(0));
661 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
663 return __pmd_to_phys(pmd);
666 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
668 return (unsigned long)__va(pmd_page_paddr(pmd));
671 /* Find an entry in the third-level page table. */
672 #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
674 #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
675 #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
676 #define pte_clear_fixmap() clear_fixmap(FIX_PTE)
678 #define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd))
680 /* use ONLY for statically allocated translation tables */
681 #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
684 * Conversion functions: convert a page and protection to a page entry,
685 * and a page entry and page directory to the page they refer to.
687 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
689 #if CONFIG_PGTABLE_LEVELS > 2
691 #define pmd_ERROR(e) \
692 pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
694 #define pud_none(pud) (!pud_val(pud))
695 #define pud_bad(pud) (!pud_table(pud))
696 #define pud_present(pud) pte_present(pud_pte(pud))
697 #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
698 #define pud_valid(pud) pte_valid(pud_pte(pud))
699 #define pud_user(pud) pte_user(pud_pte(pud))
700 #define pud_user_exec(pud) pte_user_exec(pud_pte(pud))
702 static inline void set_pud(pud_t *pudp, pud_t pud)
704 #ifdef __PAGETABLE_PUD_FOLDED
705 if (in_swapper_pgdir(pudp)) {
706 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
709 #endif /* __PAGETABLE_PUD_FOLDED */
711 WRITE_ONCE(*pudp, pud);
713 if (pud_valid(pud)) {
719 static inline void pud_clear(pud_t *pudp)
721 set_pud(pudp, __pud(0));
724 static inline phys_addr_t pud_page_paddr(pud_t pud)
726 return __pud_to_phys(pud);
729 static inline pmd_t *pud_pgtable(pud_t pud)
731 return (pmd_t *)__va(pud_page_paddr(pud));
734 /* Find an entry in the second-level page table. */
735 #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
737 #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
738 #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
739 #define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
741 #define pud_page(pud) phys_to_page(__pud_to_phys(pud))
743 /* use ONLY for statically allocated translation tables */
744 #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
748 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
749 #define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */
751 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
752 #define pmd_set_fixmap(addr) NULL
753 #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
754 #define pmd_clear_fixmap()
756 #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
758 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
760 #if CONFIG_PGTABLE_LEVELS > 3
762 #define pud_ERROR(e) \
763 pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
765 #define p4d_none(p4d) (!p4d_val(p4d))
766 #define p4d_bad(p4d) (!(p4d_val(p4d) & 2))
767 #define p4d_present(p4d) (p4d_val(p4d))
769 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
771 if (in_swapper_pgdir(p4dp)) {
772 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
776 WRITE_ONCE(*p4dp, p4d);
781 static inline void p4d_clear(p4d_t *p4dp)
783 set_p4d(p4dp, __p4d(0));
786 static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
788 return __p4d_to_phys(p4d);
791 static inline pud_t *p4d_pgtable(p4d_t p4d)
793 return (pud_t *)__va(p4d_page_paddr(p4d));
796 /* Find an entry in the first-level page table. */
797 #define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
799 #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
800 #define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr))
801 #define pud_clear_fixmap() clear_fixmap(FIX_PUD)
803 #define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
805 /* use ONLY for statically allocated translation tables */
806 #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
810 #define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
812 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
813 #define pud_set_fixmap(addr) NULL
814 #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
815 #define pud_clear_fixmap()
817 #define pud_offset_kimg(dir,addr) ((pud_t *)dir)
819 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
821 #if CONFIG_PGTABLE_LEVELS > 4
823 static __always_inline bool pgtable_l5_enabled(void)
825 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT))
826 return vabits_actual == VA_BITS;
827 return alternative_has_cap_unlikely(ARM64_HAS_VA52);
830 static inline bool mm_p4d_folded(const struct mm_struct *mm)
832 return !pgtable_l5_enabled();
834 #define mm_p4d_folded mm_p4d_folded
836 #define p4d_ERROR(e) \
837 pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e))
839 #define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd))
840 #define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & 2))
841 #define pgd_present(pgd) (!pgd_none(pgd))
843 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
845 if (in_swapper_pgdir(pgdp)) {
846 set_swapper_pgd(pgdp, __pgd(pgd_val(pgd)));
850 WRITE_ONCE(*pgdp, pgd);
855 static inline void pgd_clear(pgd_t *pgdp)
857 if (pgtable_l5_enabled())
858 set_pgd(pgdp, __pgd(0));
861 static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
863 return __pgd_to_phys(pgd);
866 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
868 static inline p4d_t *pgd_to_folded_p4d(pgd_t *pgdp, unsigned long addr)
870 return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr);
873 static inline phys_addr_t p4d_offset_phys(pgd_t *pgdp, unsigned long addr)
875 BUG_ON(!pgtable_l5_enabled());
877 return pgd_page_paddr(READ_ONCE(*pgdp)) + p4d_index(addr) * sizeof(p4d_t);
881 p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long addr)
883 if (!pgtable_l5_enabled())
884 return pgd_to_folded_p4d(pgdp, addr);
885 return (p4d_t *)__va(pgd_page_paddr(pgd)) + p4d_index(addr);
887 #define p4d_offset_lockless p4d_offset_lockless
889 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long addr)
891 return p4d_offset_lockless(pgdp, READ_ONCE(*pgdp), addr);
894 static inline p4d_t *p4d_set_fixmap(unsigned long addr)
896 if (!pgtable_l5_enabled())
898 return (p4d_t *)set_fixmap_offset(FIX_P4D, addr);
901 static inline p4d_t *p4d_set_fixmap_offset(pgd_t *pgdp, unsigned long addr)
903 if (!pgtable_l5_enabled())
904 return pgd_to_folded_p4d(pgdp, addr);
905 return p4d_set_fixmap(p4d_offset_phys(pgdp, addr));
908 static inline void p4d_clear_fixmap(void)
910 if (pgtable_l5_enabled())
911 clear_fixmap(FIX_P4D);
914 /* use ONLY for statically allocated translation tables */
915 static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr)
917 if (!pgtable_l5_enabled())
918 return pgd_to_folded_p4d(pgdp, addr);
919 return (p4d_t *)__phys_to_kimg(p4d_offset_phys(pgdp, addr));
922 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd)))
926 static inline bool pgtable_l5_enabled(void) { return false; }
928 /* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */
929 #define p4d_set_fixmap(addr) NULL
930 #define p4d_set_fixmap_offset(p4dp, addr) ((p4d_t *)p4dp)
931 #define p4d_clear_fixmap()
933 #define p4d_offset_kimg(dir,addr) ((p4d_t *)dir)
935 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
937 #define pgd_ERROR(e) \
938 pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
940 #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
941 #define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
943 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
946 * Normal and Normal-Tagged are two different memory types and indices
947 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
949 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
950 PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
952 /* preserve the hardware dirty information */
953 if (pte_hw_dirty(pte))
954 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
956 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
958 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
961 if (pte_sw_dirty(pte))
962 pte = pte_mkdirty(pte);
966 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
968 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
971 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
972 extern int ptep_set_access_flags(struct vm_area_struct *vma,
973 unsigned long address, pte_t *ptep,
974 pte_t entry, int dirty);
976 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
977 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
978 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
979 unsigned long address, pmd_t *pmdp,
980 pmd_t entry, int dirty)
982 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
985 static inline int pud_devmap(pud_t pud)
990 static inline int pgd_devmap(pgd_t pgd)
996 #ifdef CONFIG_PAGE_TABLE_CHECK
997 static inline bool pte_user_accessible_page(pte_t pte)
999 return pte_present(pte) && (pte_user(pte) || pte_user_exec(pte));
1002 static inline bool pmd_user_accessible_page(pmd_t pmd)
1004 return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
1007 static inline bool pud_user_accessible_page(pud_t pud)
1009 return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
1014 * Atomic pte/pmd modifications.
1016 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1017 static inline int __ptep_test_and_clear_young(pte_t *ptep)
1021 pte = READ_ONCE(*ptep);
1024 pte = pte_mkold(pte);
1025 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
1026 pte_val(old_pte), pte_val(pte));
1027 } while (pte_val(pte) != pte_val(old_pte));
1029 return pte_young(pte);
1032 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1033 unsigned long address,
1036 return __ptep_test_and_clear_young(ptep);
1039 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1040 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1041 unsigned long address, pte_t *ptep)
1043 int young = ptep_test_and_clear_young(vma, address, ptep);
1047 * We can elide the trailing DSB here since the worst that can
1048 * happen is that a CPU continues to use the young entry in its
1049 * TLB and we mistakenly reclaim the associated page. The
1050 * window for such an event is bounded by the next
1051 * context-switch, which provides a DSB to complete the TLB
1054 flush_tlb_page_nosync(vma, address);
1060 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1061 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1062 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1063 unsigned long address,
1066 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
1068 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1070 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1071 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1072 unsigned long address, pte_t *ptep)
1074 pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
1076 page_table_check_pte_clear(mm, pte);
1081 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1082 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1083 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1084 unsigned long address, pmd_t *pmdp)
1086 pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0));
1088 page_table_check_pmd_clear(mm, pmd);
1092 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1095 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
1096 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
1098 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1099 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
1103 pte = READ_ONCE(*ptep);
1106 pte = pte_wrprotect(pte);
1107 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
1108 pte_val(old_pte), pte_val(pte));
1109 } while (pte_val(pte) != pte_val(old_pte));
1112 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1113 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1114 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1115 unsigned long address, pmd_t *pmdp)
1117 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
1120 #define pmdp_establish pmdp_establish
1121 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1122 unsigned long address, pmd_t *pmdp, pmd_t pmd)
1124 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
1125 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
1130 * Encode and decode a swap entry:
1131 * bits 0-1: present (must be zero)
1132 * bits 2: remember PG_anon_exclusive
1133 * bits 3-7: swap type
1134 * bits 8-57: swap offset
1135 * bit 58: PTE_PROT_NONE (must be zero)
1137 #define __SWP_TYPE_SHIFT 3
1138 #define __SWP_TYPE_BITS 5
1139 #define __SWP_OFFSET_BITS 50
1140 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
1141 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
1142 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
1144 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
1145 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
1146 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
1148 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1149 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
1151 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1152 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
1153 #define __swp_entry_to_pmd(swp) __pmd((swp).val)
1154 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
1157 * Ensure that there are not more swap files than can be encoded in the kernel
1160 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
1162 #ifdef CONFIG_ARM64_MTE
1164 #define __HAVE_ARCH_PREPARE_TO_SWAP
1165 static inline int arch_prepare_to_swap(struct page *page)
1167 if (system_supports_mte())
1168 return mte_save_tags(page);
1172 #define __HAVE_ARCH_SWAP_INVALIDATE
1173 static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
1175 if (system_supports_mte())
1176 mte_invalidate_tags(type, offset);
1179 static inline void arch_swap_invalidate_area(int type)
1181 if (system_supports_mte())
1182 mte_invalidate_tags_area(type);
1185 #define __HAVE_ARCH_SWAP_RESTORE
1186 static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
1188 if (system_supports_mte())
1189 mte_restore_tags(entry, &folio->page);
1192 #endif /* CONFIG_ARM64_MTE */
1195 * On AArch64, the cache coherency is handled via the set_pte_at() function.
1197 static inline void update_mmu_cache_range(struct vm_fault *vmf,
1198 struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
1202 * We don't do anything here, so there's a very small chance of
1203 * us retaking a user fault which we just fixed up. The alternative
1204 * is doing a dsb(ishst), but that penalises the fastpath.
1208 #define update_mmu_cache(vma, addr, ptep) \
1209 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
1210 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
1212 #ifdef CONFIG_ARM64_PA_BITS_52
1213 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
1215 #define phys_to_ttbr(addr) (addr)
1219 * On arm64 without hardware Access Flag, copying from user will fail because
1220 * the pte is old and cannot be marked young. So we always end up with zeroed
1221 * page after fork() + CoW for pfn mappings. We don't always have a
1222 * hardware-managed access flag on arm64.
1224 #define arch_has_hw_pte_young cpu_has_hw_af
1227 * Experimentally, it's cheap to set the access flag in hardware and we
1228 * benefit from prefaulting mappings as 'old' to start with.
1230 #define arch_wants_old_prefaulted_pte cpu_has_hw_af
1232 static inline bool pud_sect_supported(void)
1234 return PAGE_SIZE == SZ_4K;
1238 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1239 #define ptep_modify_prot_start ptep_modify_prot_start
1240 extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
1241 unsigned long addr, pte_t *ptep);
1243 #define ptep_modify_prot_commit ptep_modify_prot_commit
1244 extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
1245 unsigned long addr, pte_t *ptep,
1246 pte_t old_pte, pte_t new_pte);
1247 #endif /* !__ASSEMBLY__ */
1249 #endif /* __ASM_PGTABLE_H */