LoongArch: Make {virt, phys, page, pfn} translation work with KFENCE
authorHuacai Chen <chenhuacai@loongson.cn>
Wed, 10 Apr 2024 13:08:51 +0000 (21:08 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Wed, 10 Apr 2024 13:08:51 +0000 (21:08 +0800)
KFENCE changes virt_to_page() to be able to translate tlb mapped virtual
addresses, but forget to change virt_to_phys()/phys_to_virt() and other
translation functions as well. This patch fix it, otherwise some drivers
(such as nvme and virtio-blk) cannot work with KFENCE.

All {virt, phys, page, pfn} translation functions are updated:
1, virt_to_pfn()/pfn_to_virt();
2, virt_to_page()/page_to_virt();
3, virt_to_phys()/phys_to_virt().

DMW/TLB mapped addresses are distinguished by comparing the vaddress
with vm_map_base in virt_to_xyz(), and we define WANT_PAGE_VIRTUAL in
the KFENCE case for the reverse translations, xyz_to_virt().

Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/include/asm/io.h
arch/loongarch/include/asm/kfence.h
arch/loongarch/include/asm/page.h
arch/loongarch/mm/pgtable.c

index 4a8adcca329b81e4f289dd7825fb15dbf2f4f7a9..c2f9979b2979e5e92e791e3f8304975db9e929c9 100644 (file)
 #include <asm/pgtable-bits.h>
 #include <asm/string.h>
 
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page)     ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-
 extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
 extern void __init early_iounmap(void __iomem *addr, unsigned long size);
 
@@ -73,6 +68,21 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t
 
 #define __io_aw() mmiowb()
 
+#ifdef CONFIG_KFENCE
+#define virt_to_phys(kaddr)                                                            \
+({                                                                                     \
+       (likely((unsigned long)kaddr < vm_map_base)) ? __pa((unsigned long)kaddr) :     \
+       page_to_phys(tlb_virt_to_page((unsigned long)kaddr)) + offset_in_page((unsigned long)kaddr);\
+})
+
+#define phys_to_virt(paddr)                                                            \
+({                                                                                     \
+       extern char *__kfence_pool;                                                     \
+       (unlikely(__kfence_pool == NULL)) ? __va((unsigned long)paddr) :                \
+       page_address(phys_to_page((unsigned long)paddr)) + offset_in_page((unsigned long)paddr);\
+})
+#endif
+
 #include <asm-generic/io.h>
 
 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
index 6c82aea1c99398c46484a77cc28da1316799affb..a6a5760da3a3323641e3fa422f3da87cdb4b66f8 100644 (file)
@@ -16,6 +16,7 @@
 static inline bool arch_kfence_init_pool(void)
 {
        int err;
+       char *kaddr, *vaddr;
        char *kfence_pool = __kfence_pool;
        struct vm_struct *area;
 
@@ -35,6 +36,14 @@ static inline bool arch_kfence_init_pool(void)
                return false;
        }
 
+       kaddr = kfence_pool;
+       vaddr = __kfence_pool;
+       while (kaddr < kfence_pool + KFENCE_POOL_SIZE) {
+               set_page_address(virt_to_page(kaddr), vaddr);
+               kaddr += PAGE_SIZE;
+               vaddr += PAGE_SIZE;
+       }
+
        return true;
 }
 
index 44027060c54a28bd34a80f538135491e3ebc758a..e85df33f11c77212c2e8ec8e6b3f1dbb955bc622 100644 (file)
@@ -78,7 +78,26 @@ typedef struct { unsigned long pgprot; } pgprot_t;
 struct page *dmw_virt_to_page(unsigned long kaddr);
 struct page *tlb_virt_to_page(unsigned long kaddr);
 
-#define virt_to_pfn(kaddr)     PFN_DOWN(PHYSADDR(kaddr))
+#define pfn_to_phys(pfn)       __pfn_to_phys(pfn)
+#define phys_to_pfn(paddr)     __phys_to_pfn(paddr)
+
+#define page_to_phys(page)     pfn_to_phys(page_to_pfn(page))
+#define phys_to_page(paddr)    pfn_to_page(phys_to_pfn(paddr))
+
+#ifndef CONFIG_KFENCE
+
+#define page_to_virt(page)     __va(page_to_phys(page))
+#define virt_to_page(kaddr)    phys_to_page(__pa(kaddr))
+
+#else
+
+#define WANT_PAGE_VIRTUAL
+
+#define page_to_virt(page)                                                             \
+({                                                                                     \
+       extern char *__kfence_pool;                                                     \
+       (__kfence_pool == NULL) ? __va(page_to_phys(page)) : page_address(page);        \
+})
 
 #define virt_to_page(kaddr)                                                            \
 ({                                                                                     \
@@ -86,6 +105,11 @@ struct page *tlb_virt_to_page(unsigned long kaddr);
        dmw_virt_to_page((unsigned long)kaddr) : tlb_virt_to_page((unsigned long)kaddr);\
 })
 
+#endif
+
+#define pfn_to_virt(pfn)       page_to_virt(pfn_to_page(pfn))
+#define virt_to_pfn(kaddr)     page_to_pfn(virt_to_page(kaddr))
+
 extern int __virt_addr_valid(volatile void *kaddr);
 #define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
 
index 2aae72e638713a658475e6fb82fc73eae0fc3469..bda018150000e66b906420ea7e3a5f79472ca352 100644 (file)
 
 struct page *dmw_virt_to_page(unsigned long kaddr)
 {
-       return pfn_to_page(virt_to_pfn(kaddr));
+       return phys_to_page(__pa(kaddr));
 }
 EXPORT_SYMBOL(dmw_virt_to_page);
 
 struct page *tlb_virt_to_page(unsigned long kaddr)
 {
-       return pfn_to_page(pte_pfn(*virt_to_kpte(kaddr)));
+       return phys_to_page(pfn_to_phys(pte_pfn(*virt_to_kpte(kaddr))));
 }
 EXPORT_SYMBOL(tlb_virt_to_page);