From db95ea787bd19be666ba41733259ffea65963bff Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 14 Feb 2024 13:29:15 +0100 Subject: [PATCH] arm64: mm: Wire up TCR.DS bit to PTE shareability fields When LPA2 is enabled, bits 8 and 9 of page and block descriptors become part of the output address instead of carrying shareability attributes for the region in question. So avoid setting these bits if TCR.DS == 1, which means LPA2 is enabled. Signed-off-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20240214122845.2033971-74-ardb+git@google.com Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 4 ++++ arch/arm64/include/asm/pgtable-hwdef.h | 1 + arch/arm64/include/asm/pgtable-prot.h | 16 ++++++++++++++-- arch/arm64/mm/mmap.c | 4 ++++ arch/arm64/mm/proc.S | 2 ++ 5 files changed, 25 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index aa7c1d435139..8c2c36fffcf5 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1377,6 +1377,10 @@ config ARM64_PA_BITS default 48 if ARM64_PA_BITS_48 default 52 if ARM64_PA_BITS_52 +config ARM64_LPA2 + def_bool y + depends on ARM64_PA_BITS_52 && !ARM64_64K_PAGES + choice prompt "Endianness" default CPU_LITTLE_ENDIAN diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index e4944d517c99..b770f98fc0b5 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -284,6 +284,7 @@ #define TCR_E0PD1 (UL(1) << 56) #define TCR_TCMA0 (UL(1) << 57) #define TCR_TCMA1 (UL(1) << 58) +#define TCR_DS (UL(1) << 59) /* * TTBR. diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 63ced9ccec21..dd9ee67d1d87 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -30,8 +30,8 @@ #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) -#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) +#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_MAYBE_NG | PTE_MAYBE_SHARED | PTE_AF) +#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_MAYBE_NG | PMD_MAYBE_SHARED | PMD_SECT_AF) #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) @@ -67,7 +67,19 @@ extern bool arm64_use_ng_mappings; #define PTE_MAYBE_NG (arm64_use_ng_mappings ? PTE_NG : 0) #define PMD_MAYBE_NG (arm64_use_ng_mappings ? PMD_SECT_NG : 0) +#ifndef CONFIG_ARM64_LPA2 #define lpa2_is_enabled() false +#define PTE_MAYBE_SHARED PTE_SHARED +#define PMD_MAYBE_SHARED PMD_SECT_S +#else +static inline bool __pure lpa2_is_enabled(void) +{ + return read_tcr() & TCR_DS; +} + +#define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED) +#define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S) +#endif /* * If we have userspace only BTI we don't want to mark kernel pages diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 645fe60d000f..642bdf908b22 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -73,6 +73,10 @@ static int __init adjust_protection_map(void) protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY; } + if (lpa2_is_enabled()) + for (int i = 0; i < ARRAY_SIZE(protection_map); i++) + pgprot_val(protection_map[i]) &= ~PTE_SHARED; + return 0; } arch_initcall(adjust_protection_map); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 6e1b2bc41a9f..7c46f8cfd6ae 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -466,6 +466,7 @@ alternative_else_nop_endif */ #define PTE_MAYBE_NG 0 +#define PTE_MAYBE_SHARED 0 mov_q x0, PIE_E0 msr REG_PIRE0_EL1, x0 @@ -473,6 +474,7 @@ alternative_else_nop_endif msr REG_PIR_EL1, x0 #undef PTE_MAYBE_NG +#undef PTE_MAYBE_SHARED mov x0, TCR2_EL1x_PIE msr REG_TCR2_EL1, x0 -- 2.34.1