1 // SPDX-License-Identifier: GPL-2.0
3 * prepare to run common code
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
8 #define DISABLE_BRANCH_PROFILING
10 /* cpu_feature_enabled() cannot be used this early */
11 #define USE_EARLY_PGTABLE_L5
13 #include <linux/init.h>
14 #include <linux/linkage.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/percpu.h>
19 #include <linux/start_kernel.h>
21 #include <linux/memblock.h>
22 #include <linux/cc_platform.h>
23 #include <linux/pgtable.h>
26 #include <asm/page_64.h>
27 #include <asm/processor.h>
28 #include <asm/proto.h>
30 #include <asm/setup.h>
32 #include <asm/tlbflush.h>
33 #include <asm/sections.h>
34 #include <asm/kdebug.h>
35 #include <asm/e820/api.h>
36 #include <asm/bios_ebda.h>
37 #include <asm/bootparam_utils.h>
38 #include <asm/microcode.h>
39 #include <asm/kasan.h>
40 #include <asm/fixmap.h>
41 #include <asm/realmode.h>
42 #include <asm/extable.h>
43 #include <asm/trapnr.h>
49 * Manage page tables very early on.
51 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
52 static unsigned int __initdata next_early_pgt;
53 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
55 #ifdef CONFIG_X86_5LEVEL
56 unsigned int __pgtable_l5_enabled __ro_after_init;
57 unsigned int pgdir_shift __ro_after_init = 39;
58 EXPORT_SYMBOL(pgdir_shift);
59 unsigned int ptrs_per_p4d __ro_after_init = 1;
60 EXPORT_SYMBOL(ptrs_per_p4d);
63 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
64 unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
65 EXPORT_SYMBOL(page_offset_base);
66 unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
67 EXPORT_SYMBOL(vmalloc_base);
68 unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
69 EXPORT_SYMBOL(vmemmap_base);
72 static inline bool check_la57_support(void)
74 if (!IS_ENABLED(CONFIG_X86_5LEVEL))
78 * 5-level paging is detected and enabled at kernel decompression
79 * stage. Only check if it has been enabled there.
81 if (!(native_read_cr4() & X86_CR4_LA57))
87 static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdval_t *pmd)
89 unsigned long vaddr, vaddr_end;
92 /* Encrypt the kernel and related (if SME is active) */
93 sme_encrypt_kernel(bp);
96 * Clear the memory encryption mask from the .bss..decrypted section.
97 * The bss section will be memset to zero later in the initialization so
98 * there is no need to zero it after changing the memory encryption
101 if (sme_get_me_mask()) {
102 vaddr = (unsigned long)__start_bss_decrypted;
103 vaddr_end = (unsigned long)__end_bss_decrypted;
105 for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
107 * On SNP, transition the page to shared in the RMP table so that
108 * it is consistent with the page table attribute change.
110 * __start_bss_decrypted has a virtual address in the high range
111 * mapping (kernel .text). PVALIDATE, by way of
112 * early_snp_set_memory_shared(), requires a valid virtual
113 * address but the kernel is currently running off of the identity
114 * mapping so use __pa() to get a *currently* valid virtual address.
116 early_snp_set_memory_shared(__pa(vaddr), __pa(vaddr), PTRS_PER_PMD);
118 i = pmd_index(vaddr);
119 pmd[i] -= sme_get_me_mask();
124 * Return the SME encryption mask (if SME is active) to be used as a
125 * modifier for the initial pgdir entry programmed into CR3.
127 return sme_get_me_mask();
130 /* Code in __startup_64() can be relocated during execution, but the compiler
131 * doesn't have to generate PC-relative relocations when accessing globals from
132 * that function. Clang actually does not generate them, which leads to
133 * boot-time crashes. To work around this problem, every global pointer must
134 * be accessed using RIP_REL_REF().
136 unsigned long __head __startup_64(unsigned long physaddr,
137 struct boot_params *bp)
139 pmd_t (*early_pgts)[PTRS_PER_PMD] = RIP_REL_REF(early_dynamic_pgts);
140 unsigned long pgtable_flags;
141 unsigned long load_delta;
145 pmdval_t *pmd, pmd_entry;
149 la57 = check_la57_support();
151 /* Is the address too large? */
152 if (physaddr >> MAX_PHYSMEM_BITS)
156 * Compute the delta between the address I am compiled to run at
157 * and the address I am actually running at.
159 load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
160 RIP_REL_REF(phys_base) = load_delta;
162 /* Is the address not 2M aligned? */
163 if (load_delta & ~PMD_MASK)
166 /* Include the SME encryption mask in the fixup value */
167 load_delta += sme_get_me_mask();
169 /* Fixup the physical addresses in the page table */
171 pgd = &RIP_REL_REF(early_top_pgt)->pgd;
172 pgd[pgd_index(__START_KERNEL_map)] += load_delta;
175 p4d = (p4dval_t *)&RIP_REL_REF(level4_kernel_pgt);
176 p4d[MAX_PTRS_PER_P4D - 1] += load_delta;
178 pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE;
181 RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 2].pud += load_delta;
182 RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 1].pud += load_delta;
184 for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
185 RIP_REL_REF(level2_fixmap_pgt)[i].pmd += load_delta;
188 * Set up the identity mapping for the switchover. These
189 * entries should *NOT* have the global bit set! This also
190 * creates a bunch of nonsense entries but that is fine --
191 * it avoids problems around wraparound.
194 pud = &early_pgts[0]->pmd;
195 pmd = &early_pgts[1]->pmd;
196 RIP_REL_REF(next_early_pgt) = 2;
198 pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
201 p4d = &early_pgts[RIP_REL_REF(next_early_pgt)++]->pmd;
203 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
204 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
205 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
207 i = physaddr >> P4D_SHIFT;
208 p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
209 p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
211 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
212 pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
213 pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
216 i = physaddr >> PUD_SHIFT;
217 pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
218 pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
220 pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
221 /* Filter out unsupported __PAGE_KERNEL_* bits: */
222 pmd_entry &= RIP_REL_REF(__supported_pte_mask);
223 pmd_entry += sme_get_me_mask();
224 pmd_entry += physaddr;
226 for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
227 int idx = i + (physaddr >> PMD_SHIFT);
229 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
233 * Fixup the kernel text+data virtual addresses. Note that
234 * we might write invalid pmds, when the kernel is relocated
235 * cleanup_highmap() fixes this up along with the mappings
238 * Only the region occupied by the kernel image has so far
239 * been checked against the table of usable memory regions
240 * provided by the firmware, so invalidate pages outside that
241 * region. A page table entry that maps to a reserved area of
242 * memory would allow processor speculation into that area,
243 * and on some hardware (particularly the UV platform) even
244 * speculative access to some reserved areas is caught as an
245 * error, causing the BIOS to halt the system.
248 pmd = &RIP_REL_REF(level2_kernel_pgt)->pmd;
250 /* invalidate pages before the kernel image */
251 for (i = 0; i < pmd_index((unsigned long)_text); i++)
252 pmd[i] &= ~_PAGE_PRESENT;
254 /* fixup pages that are part of the kernel image */
255 for (; i <= pmd_index((unsigned long)_end); i++)
256 if (pmd[i] & _PAGE_PRESENT)
257 pmd[i] += load_delta;
259 /* invalidate pages after the kernel image */
260 for (; i < PTRS_PER_PMD; i++)
261 pmd[i] &= ~_PAGE_PRESENT;
263 return sme_postprocess_startup(bp, pmd);
266 /* Wipe all early page tables except for the kernel symbol map */
267 static void __init reset_early_page_tables(void)
269 memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
271 write_cr3(__sme_pa_nodebug(early_top_pgt));
274 /* Create a new PMD entry */
275 bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
277 unsigned long physaddr = address - __PAGE_OFFSET;
278 pgdval_t pgd, *pgd_p;
279 p4dval_t p4d, *p4d_p;
280 pudval_t pud, *pud_p;
283 /* Invalid address or early pgt is done ? */
284 if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
288 pgd_p = &early_top_pgt[pgd_index(address)].pgd;
292 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
293 * critical -- __PAGE_OFFSET would point us back into the dynamic
294 * range and we might end up looping forever...
296 if (!pgtable_l5_enabled())
299 p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
301 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
302 reset_early_page_tables();
306 p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
307 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
308 *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
310 p4d_p += p4d_index(address);
314 pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
316 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
317 reset_early_page_tables();
321 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
322 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
323 *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
325 pud_p += pud_index(address);
329 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
331 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
332 reset_early_page_tables();
336 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
337 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
338 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
340 pmd_p[pmd_index(address)] = pmd;
345 static bool __init early_make_pgtable(unsigned long address)
347 unsigned long physaddr = address - __PAGE_OFFSET;
350 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
352 return __early_make_pgtable(address, pmd);
355 void __init do_early_exception(struct pt_regs *regs, int trapnr)
357 if (trapnr == X86_TRAP_PF &&
358 early_make_pgtable(native_read_cr2()))
361 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT) &&
362 trapnr == X86_TRAP_VC && handle_vc_boot_ghcb(regs))
365 if (trapnr == X86_TRAP_VE && tdx_early_handle_ve(regs))
368 early_fixup_exception(regs, trapnr);
371 /* Don't add a printk in there. printk relies on the PDA which is not initialized
373 void __init clear_bss(void)
375 memset(__bss_start, 0,
376 (unsigned long) __bss_stop - (unsigned long) __bss_start);
377 memset(__brk_base, 0,
378 (unsigned long) __brk_limit - (unsigned long) __brk_base);
381 static unsigned long get_cmd_line_ptr(void)
383 unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
385 cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
390 static void __init copy_bootdata(char *real_mode_data)
393 unsigned long cmd_line_ptr;
396 * If SME is active, this will create decrypted mappings of the
397 * boot data in advance of the copy operations.
399 sme_map_bootdata(real_mode_data);
401 memcpy(&boot_params, real_mode_data, sizeof(boot_params));
402 sanitize_boot_params(&boot_params);
403 cmd_line_ptr = get_cmd_line_ptr();
405 command_line = __va(cmd_line_ptr);
406 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
410 * The old boot data is no longer needed and won't be reserved,
411 * freeing up that memory for use by the system. If SME is active,
412 * we need to remove the mappings that were created so that the
413 * memory doesn't remain mapped as decrypted.
415 sme_unmap_bootdata(real_mode_data);
418 asmlinkage __visible void __init __noreturn x86_64_start_kernel(char * real_mode_data)
421 * Build-time sanity checks on the kernel image and module
422 * area mappings. (these are purely build-time and produce no code)
424 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
425 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
426 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
427 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
428 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
429 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
430 MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
431 (__START_KERNEL & PGDIR_MASK)));
432 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
434 if (check_la57_support()) {
435 __pgtable_l5_enabled = 1;
438 page_offset_base = __PAGE_OFFSET_BASE_L5;
439 vmalloc_base = __VMALLOC_BASE_L5;
440 vmemmap_base = __VMEMMAP_BASE_L5;
445 /* Kill off the identity-map trampoline */
446 reset_early_page_tables();
451 * This needs to happen *before* kasan_early_init() because latter maps stuff
454 clear_page(init_top_pgt);
457 * SME support may update early_pmd_flags to include the memory
458 * encryption mask, so it needs to be called before anything
459 * that may generate a page fault.
466 * Flush global TLB entries which could be left over from the trampoline page
469 * This needs to happen *after* kasan_early_init() as KASAN-enabled .configs
470 * instrument native_write_cr4() so KASAN must be initialized for that
471 * instrumentation to work.
473 __native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4));
475 idt_setup_early_handler();
477 /* Needed before cc_platform_has() can be used for TDX */
480 copy_bootdata(__va(real_mode_data));
483 * Load microcode early on BSP.
487 /* set init_top_pgt kernel high mapping*/
488 init_top_pgt[511] = early_top_pgt[511];
490 x86_64_start_reservations(real_mode_data);
493 void __init __noreturn x86_64_start_reservations(char *real_mode_data)
495 /* version is always not zero if it is copied */
496 if (!boot_params.hdr.version)
497 copy_bootdata(__va(real_mode_data));
499 x86_early_init_platform_quirks();
501 switch (boot_params.hdr.hardware_subarch) {
502 case X86_SUBARCH_INTEL_MID:
503 x86_intel_mid_early_setup();
513 * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
514 * used until the idt_table takes over. On the boot CPU this happens in
515 * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
516 * this happens in the functions called from head_64.S.
518 * The idt_table can't be used that early because all the code modifying it is
519 * in idt.c and can be instrumented by tracing or KASAN, which both don't work
520 * during early CPU bringup. Also the idt_table has the runtime vectors
521 * configured which require certain CPU state to be setup already (like TSS),
522 * which also hasn't happened yet in early CPU bringup.
524 static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
526 /* This may run while still in the direct mapping */
527 static void __head startup_64_load_idt(void *vc_handler)
529 struct desc_ptr desc = {
530 .address = (unsigned long)&RIP_REL_REF(bringup_idt_table),
531 .size = sizeof(bringup_idt_table) - 1,
533 struct idt_data data;
536 /* @vc_handler is set only for a VMM Communication Exception */
538 init_idt_data(&data, X86_TRAP_VC, vc_handler);
539 idt_init_desc(&idt_desc, &data);
540 native_write_idt_entry((gate_desc *)desc.address, X86_TRAP_VC, &idt_desc);
543 native_load_idt(&desc);
546 /* This is used when running on kernel addresses */
547 void early_setup_idt(void)
549 void *handler = NULL;
551 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
553 handler = vc_boot_ghcb;
556 startup_64_load_idt(handler);
560 * Setup boot CPU state needed before kernel switches to virtual addresses.
562 void __head startup_64_setup_gdt_idt(void)
564 void *handler = NULL;
566 struct desc_ptr startup_gdt_descr = {
567 .address = (unsigned long)&RIP_REL_REF(init_per_cpu_var(gdt_page.gdt)),
568 .size = GDT_SIZE - 1,
572 native_load_gdt(&startup_gdt_descr);
574 /* New GDT is live - reload data segment registers */
575 asm volatile("movl %%eax, %%ds\n"
577 "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
579 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
580 handler = &RIP_REL_REF(vc_no_ghcb);
582 startup_64_load_idt(handler);