x86/cpu: Ensure that CPU info updates are propagated on UP
authorThomas Gleixner <tglx@linutronix.de>
Fri, 22 Mar 2024 18:56:35 +0000 (19:56 +0100)
committerBorislav Petkov (AMD) <bp@alien8.de>
Sat, 23 Mar 2024 11:22:04 +0000 (12:22 +0100)
The boot sequence evaluates CPUID information twice:

  1) During early boot

  2) When finalizing the early setup right before
     mitigations are selected and alternatives are patched.

In both cases the evaluation is stored in boot_cpu_data, but on UP the
copying of boot_cpu_data to the per CPU info of the boot CPU happens
between #1 and #2. So any update which happens in #2 is never propagated to
the per CPU info instance.

Consolidate the whole logic and copy boot_cpu_data right before applying
alternatives as that's the point where boot_cpu_data is in it's final
state and not supposed to change anymore.

This also removes the voodoo mb() from smp_prepare_cpus_common() which
had absolutely no purpose.

Fixes: 71eb4893cfaf ("x86/percpu: Cure per CPU madness on UP")
Reported-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Tested-by: Guenter Roeck <linux@roeck-us.net>
Link: https://lore.kernel.org/r/20240322185305.127642785@linutronix.de
arch/x86/kernel/cpu/common.c
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c

index ba8cf5e9ce5632aeaa9322899f5b5eaea3aab2e7..5c1e6d6be267af3e7b489e9f71937e7be6b25448 100644 (file)
@@ -2307,6 +2307,8 @@ void arch_smt_update(void)
 
 void __init arch_cpu_finalize_init(void)
 {
+       struct cpuinfo_x86 *c = this_cpu_ptr(&cpu_info);
+
        identify_boot_cpu();
 
        select_idle_routine();
@@ -2345,6 +2347,13 @@ void __init arch_cpu_finalize_init(void)
        fpu__init_system();
        fpu__init_cpu();
 
+       /*
+        * Ensure that access to the per CPU representation has the initial
+        * boot CPU configuration.
+        */
+       *c = boot_cpu_data;
+       c->initialized = true;
+
        alternative_instructions();
 
        if (IS_ENABLED(CONFIG_X86_64)) {
index 3e1e96efadfe7ec8fe5a5499529a9d2913e10722..ef206500ed6f22e11228ebfb6f4537343a064076 100644 (file)
@@ -1206,16 +1206,6 @@ void __init i386_reserve_resources(void)
 
 #endif /* CONFIG_X86_32 */
 
-#ifndef CONFIG_SMP
-void __init smp_prepare_boot_cpu(void)
-{
-       struct cpuinfo_x86 *c = &cpu_data(0);
-
-       *c = boot_cpu_data;
-       c->initialized = true;
-}
-#endif
-
 static struct notifier_block kernel_offset_notifier = {
        .notifier_call = dump_kernel_offset
 };
index fe355c89f6c112a33d17966d8821b1ae20608055..76bb65045c649a2c7d849c8a38c9e5d4ea92140d 100644 (file)
@@ -313,14 +313,6 @@ static void notrace start_secondary(void *unused)
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
-static void __init smp_store_boot_cpu_info(void)
-{
-       struct cpuinfo_x86 *c = &cpu_data(0);
-
-       *c = boot_cpu_data;
-       c->initialized = true;
-}
-
 /*
  * The bootstrap kernel entry code has set these up. Save them for
  * a given CPU
@@ -1039,29 +1031,15 @@ static __init void disable_smp(void)
        cpumask_set_cpu(0, topology_die_cpumask(0));
 }
 
-static void __init smp_cpu_index_default(void)
-{
-       int i;
-       struct cpuinfo_x86 *c;
-
-       for_each_possible_cpu(i) {
-               c = &cpu_data(i);
-               /* mark all to hotplug */
-               c->cpu_index = nr_cpu_ids;
-       }
-}
-
 void __init smp_prepare_cpus_common(void)
 {
        unsigned int i;
 
-       smp_cpu_index_default();
-
-       /*
-        * Setup boot CPU information
-        */
-       smp_store_boot_cpu_info(); /* Final full version of the data */
-       mb();
+       /* Mark all except the boot CPU as hotpluggable */
+       for_each_possible_cpu(i) {
+               if (i)
+                       per_cpu(cpu_info.cpu_index, i) = nr_cpu_ids;
+       }
 
        for_each_possible_cpu(i) {
                zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);