arm64: Revert "mm: provide idmap pointer to cpu_replace_ttbr1()"
authorArd Biesheuvel <ardb@kernel.org>
Wed, 14 Feb 2024 12:29:10 +0000 (13:29 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 16 Feb 2024 12:42:36 +0000 (12:42 +0000)
This reverts commit 1682c45b920643c, which is no longer needed now that
we create the permanent kernel mapping directly during early boot.

This is a RINO (revert in name only) given that some of the code has
moved around, but the changes are straight-forward.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20240214122845.2033971-69-ardb+git@google.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/mmu_context.h
arch/arm64/mm/kasan_init.c
arch/arm64/mm/mmu.c

index a8a89a0f2867cfcc67fdd0f6f4e52081eef4cf8e..c768d16b81a49e5a4718fb9fd7d3075028d92def 100644 (file)
@@ -108,18 +108,13 @@ static inline void cpu_uninstall_idmap(void)
                cpu_switch_mm(mm->pgd, mm);
 }
 
-static inline void __cpu_install_idmap(pgd_t *idmap)
+static inline void cpu_install_idmap(void)
 {
        cpu_set_reserved_ttbr0();
        local_flush_tlb_all();
        cpu_set_idmap_tcr_t0sz();
 
-       cpu_switch_mm(lm_alias(idmap), &init_mm);
-}
-
-static inline void cpu_install_idmap(void)
-{
-       __cpu_install_idmap(idmap_pg_dir);
+       cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
 }
 
 /*
@@ -146,21 +141,21 @@ static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
        isb();
 }
 
-void __cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap, bool cnp);
+void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp);
 
 static inline void cpu_enable_swapper_cnp(void)
 {
-       __cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir, true);
+       __cpu_replace_ttbr1(lm_alias(swapper_pg_dir), true);
 }
 
-static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
+static inline void cpu_replace_ttbr1(pgd_t *pgdp)
 {
        /*
         * Only for early TTBR1 replacement before cpucaps are finalized and
         * before we've decided whether to use CNP.
         */
        WARN_ON(system_capabilities_finalized());
-       __cpu_replace_ttbr1(pgdp, idmap, false);
+       __cpu_replace_ttbr1(pgdp, false);
 }
 
 /*
index 89828ad2bca780c769f572a62efdd56439e19bb8..a86ab99587c9076d29e204209c299e7bdd6b27d8 100644 (file)
@@ -225,7 +225,7 @@ static void __init kasan_init_shadow(void)
         */
        memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
        dsb(ishst);
-       cpu_replace_ttbr1(lm_alias(tmp_pg_dir), idmap_pg_dir);
+       cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
 
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
@@ -261,7 +261,7 @@ static void __init kasan_init_shadow(void)
                                PAGE_KERNEL_RO));
 
        memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
-       cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
+       cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 }
 
 static void __init kasan_init_depth(void)
index 3db40b5179474f16a9affc8898e62d8b7ce7d2ca..a3d23da92d87fece0c1ddc10645ae889662d6bb8 100644 (file)
@@ -1445,7 +1445,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte
  * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
  * avoiding the possibility of conflicting TLB entries being allocated.
  */
-void __cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap, bool cnp)
+void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp)
 {
        typedef void (ttbr_replace_func)(phys_addr_t);
        extern ttbr_replace_func idmap_cpu_replace_ttbr1;
@@ -1460,7 +1460,7 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap, bool cnp)
 
        replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
 
-       __cpu_install_idmap(idmap);
+       cpu_install_idmap();
 
        /*
         * We really don't want to take *any* exceptions while TTBR1 is