x86/cpu: Ensure that CPU info updates are propagated on UP
[sfrench/cifs-2.6.git] / arch / x86 / kernel / cpu / common.c
index 0b97bcde70c6102a4b82b561c3256ec53b614770..5c1e6d6be267af3e7b489e9f71937e7be6b25448 100644 (file)
@@ -61,6 +61,7 @@
 #include <asm/microcode.h>
 #include <asm/intel-family.h>
 #include <asm/cpu_device_id.h>
+#include <asm/fred.h>
 #include <asm/uv/uv.h>
 #include <asm/ia32.h>
 #include <asm/set_memory.h>
 
 #include "cpu.h"
 
+DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
+EXPORT_PER_CPU_SYMBOL(cpu_info);
+
 u32 elf_hwcap2 __read_mostly;
 
 /* Number of siblings per CPU package */
-int smp_num_siblings = 1;
-EXPORT_SYMBOL(smp_num_siblings);
+unsigned int __max_threads_per_core __ro_after_init = 1;
+EXPORT_SYMBOL(__max_threads_per_core);
+
+unsigned int __max_dies_per_package __ro_after_init = 1;
+EXPORT_SYMBOL(__max_dies_per_package);
+
+unsigned int __max_logical_packages __ro_after_init = 1;
+EXPORT_SYMBOL(__max_logical_packages);
+
+unsigned int __num_cores_per_package __ro_after_init = 1;
+EXPORT_SYMBOL(__num_cores_per_package);
+
+unsigned int __num_threads_per_package __ro_after_init = 1;
+EXPORT_SYMBOL(__num_threads_per_package);
 
 static struct ppin_info {
        int     feature;
@@ -382,9 +398,8 @@ out:
 }
 
 /* These bits should not change their value after CPU init is finished. */
-static const unsigned long cr4_pinned_mask =
-       X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP |
-       X86_CR4_FSGSBASE | X86_CR4_CET;
+static const unsigned long cr4_pinned_mask = X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP |
+                                            X86_CR4_FSGSBASE | X86_CR4_CET | X86_CR4_FRED;
 static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
 static unsigned long cr4_pinned_bits __ro_after_init;
 
@@ -790,19 +805,6 @@ static void get_model_name(struct cpuinfo_x86 *c)
        *(s + 1) = '\0';
 }
 
-void detect_num_cpu_cores(struct cpuinfo_x86 *c)
-{
-       unsigned int eax, ebx, ecx, edx;
-
-       c->x86_max_cores = 1;
-       if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
-               return;
-
-       cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
-       if (eax & 0x1f)
-               c->x86_max_cores = (eax >> 26) + 1;
-}
-
 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
 {
        unsigned int n, dummy, ebx, ecx, edx, l2size;
@@ -864,51 +866,6 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
                tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
 }
 
-int detect_ht_early(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_SMP
-       u32 eax, ebx, ecx, edx;
-
-       if (!cpu_has(c, X86_FEATURE_HT))
-               return -1;
-
-       if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
-               return -1;
-
-       if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
-               return -1;
-
-       cpuid(1, &eax, &ebx, &ecx, &edx);
-
-       smp_num_siblings = (ebx & 0xff0000) >> 16;
-       if (smp_num_siblings == 1)
-               pr_info_once("CPU0: Hyper-Threading is disabled\n");
-#endif
-       return 0;
-}
-
-void detect_ht(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_SMP
-       int index_msb, core_bits;
-
-       if (detect_ht_early(c) < 0)
-               return;
-
-       index_msb = get_count_order(smp_num_siblings);
-       c->topo.pkg_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb);
-
-       smp_num_siblings = smp_num_siblings / c->x86_max_cores;
-
-       index_msb = get_count_order(smp_num_siblings);
-
-       core_bits = get_count_order(c->x86_max_cores);
-
-       c->topo.core_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb) &
-               ((1 << core_bits) - 1);
-#endif
-}
-
 static void get_cpu_vendor(struct cpuinfo_x86 *c)
 {
        char *v = c->x86_vendor_id;
@@ -1267,6 +1224,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
 #define SRSO           BIT(5)
 /* CPU is affected by GDS */
 #define GDS            BIT(6)
+/* CPU is affected by Register File Data Sampling */
+#define RFDS           BIT(7)
 
 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
        VULNBL_INTEL_STEPPINGS(IVYBRIDGE,       X86_STEPPING_ANY,               SRBDS),
@@ -1294,9 +1253,18 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
        VULNBL_INTEL_STEPPINGS(TIGERLAKE,       X86_STEPPING_ANY,               GDS),
        VULNBL_INTEL_STEPPINGS(LAKEFIELD,       X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED),
        VULNBL_INTEL_STEPPINGS(ROCKETLAKE,      X86_STEPPING_ANY,               MMIO | RETBLEED | GDS),
-       VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,    X86_STEPPING_ANY,               MMIO | MMIO_SBDS),
-       VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,  X86_STEPPING_ANY,               MMIO),
-       VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,  X86_STEPPING_ANY,               MMIO | MMIO_SBDS),
+       VULNBL_INTEL_STEPPINGS(ALDERLAKE,       X86_STEPPING_ANY,               RFDS),
+       VULNBL_INTEL_STEPPINGS(ALDERLAKE_L,     X86_STEPPING_ANY,               RFDS),
+       VULNBL_INTEL_STEPPINGS(RAPTORLAKE,      X86_STEPPING_ANY,               RFDS),
+       VULNBL_INTEL_STEPPINGS(RAPTORLAKE_P,    X86_STEPPING_ANY,               RFDS),
+       VULNBL_INTEL_STEPPINGS(RAPTORLAKE_S,    X86_STEPPING_ANY,               RFDS),
+       VULNBL_INTEL_STEPPINGS(ATOM_GRACEMONT,  X86_STEPPING_ANY,               RFDS),
+       VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,    X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RFDS),
+       VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,  X86_STEPPING_ANY,               MMIO | RFDS),
+       VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,  X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RFDS),
+       VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT,   X86_STEPPING_ANY,               RFDS),
+       VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_D, X86_STEPPING_ANY,               RFDS),
+       VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY,            RFDS),
 
        VULNBL_AMD(0x15, RETBLEED),
        VULNBL_AMD(0x16, RETBLEED),
@@ -1330,6 +1298,24 @@ static bool arch_cap_mmio_immune(u64 ia32_cap)
                ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
 }
 
+static bool __init vulnerable_to_rfds(u64 ia32_cap)
+{
+       /* The "immunity" bit trumps everything else: */
+       if (ia32_cap & ARCH_CAP_RFDS_NO)
+               return false;
+
+       /*
+        * VMMs set ARCH_CAP_RFDS_CLEAR for processors not in the blacklist to
+        * indicate that mitigation is needed because guest is running on a
+        * vulnerable hardware or may migrate to such hardware:
+        */
+       if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
+               return true;
+
+       /* Only consult the blacklist when there is no enumeration: */
+       return cpu_matches(cpu_vuln_blacklist, RFDS);
+}
+
 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
        u64 ia32_cap = x86_read_arch_cap_msr();
@@ -1355,8 +1341,13 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
        /*
         * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
         * flag and protect from vendor-specific bugs via the whitelist.
+        *
+        * Don't use AutoIBRS when SNP is enabled because it degrades host
+        * userspace indirect branch performance.
         */
-       if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
+       if ((ia32_cap & ARCH_CAP_IBRS_ALL) ||
+           (cpu_has(c, X86_FEATURE_AUTOIBRS) &&
+            !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
                setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
                if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
                    !(ia32_cap & ARCH_CAP_PBRSB_NO))
@@ -1441,6 +1432,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
            boot_cpu_has(X86_FEATURE_AVX))
                setup_force_cpu_bug(X86_BUG_GDS);
 
+       if (vulnerable_to_rfds(ia32_cap))
+               setup_force_cpu_bug(X86_BUG_RFDS);
+
        if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
                return;
 
@@ -1589,8 +1583,11 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                get_cpu_vendor(c);
                get_cpu_cap(c);
                setup_force_cpu_cap(X86_FEATURE_CPUID);
+               get_cpu_address_sizes(c);
                cpu_parse_early_param();
 
+               cpu_init_topology(c);
+
                if (this_cpu->c_early_init)
                        this_cpu->c_early_init(c);
 
@@ -1601,10 +1598,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                        this_cpu->c_bsp_init(c);
        } else {
                setup_clear_cpu_cap(X86_FEATURE_CPUID);
+               get_cpu_address_sizes(c);
+               cpu_init_topology(c);
        }
 
-       get_cpu_address_sizes(c);
-
        setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 
        cpu_set_bug_bits(c);
@@ -1748,18 +1745,6 @@ static void generic_identify(struct cpuinfo_x86 *c)
 
        get_cpu_address_sizes(c);
 
-       if (c->cpuid_level >= 0x00000001) {
-               c->topo.initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
-#ifdef CONFIG_X86_32
-# ifdef CONFIG_SMP
-               c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
-# else
-               c->topo.apicid = c->topo.initial_apicid;
-# endif
-#endif
-               c->topo.pkg_id = c->topo.initial_apicid;
-       }
-
        get_model_name(c); /* Default name */
 
        /*
@@ -1780,29 +1765,6 @@ static void generic_identify(struct cpuinfo_x86 *c)
 #endif
 }
 
-/*
- * Validate that ACPI/mptables have the same information about the
- * effective APIC id and update the package map.
- */
-static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_SMP
-       unsigned int cpu = smp_processor_id();
-       u32 apicid;
-
-       apicid = apic->cpu_present_to_apicid(cpu);
-
-       if (apicid != c->topo.apicid) {
-               pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
-                      cpu, apicid, c->topo.initial_apicid);
-       }
-       BUG_ON(topology_update_package_map(c->topo.pkg_id, cpu));
-       BUG_ON(topology_update_die_map(c->topo.die_id, cpu));
-#else
-       c->topo.logical_pkg_id = 0;
-#endif
-}
-
 /*
  * This does the hard work of actually picking apart the CPU stuff...
  */
@@ -1816,11 +1778,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
        c->x86_model = c->x86_stepping = 0;     /* So far unknown... */
        c->x86_vendor_id[0] = '\0'; /* Unset */
        c->x86_model_id[0] = '\0';  /* Unset */
-       c->x86_max_cores = 1;
-       c->x86_coreid_bits = 0;
-       c->topo.cu_id = 0xff;
-       c->topo.llc_id = BAD_APICID;
-       c->topo.l2c_id = BAD_APICID;
 #ifdef CONFIG_X86_64
        c->x86_clflush_size = 64;
        c->x86_phys_bits = 36;
@@ -1839,17 +1796,14 @@ static void identify_cpu(struct cpuinfo_x86 *c)
 
        generic_identify(c);
 
+       cpu_parse_topology(c);
+
        if (this_cpu->c_identify)
                this_cpu->c_identify(c);
 
        /* Clear/Set all flags overridden by options, after probe */
        apply_forced_caps(c);
 
-#ifdef CONFIG_X86_64
-       c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
-#endif
-
-
        /*
         * Set default APIC and TSC_DEADLINE MSR fencing flag. AMD and
         * Hygon will clear it in ->c_init() below.
@@ -1903,10 +1857,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
                                c->x86, c->x86_model);
        }
 
-#ifdef CONFIG_X86_64
-       detect_ht(c);
-#endif
-
        x86_init_rdrand(c);
        setup_pku(c);
        setup_cet(c);
@@ -1938,8 +1888,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
        /* Init Machine Check Exception if available. */
        mcheck_cpu_init(c);
 
-       select_idle_routine(c);
-
 #ifdef CONFIG_NUMA
        numa_add_cpu(smp_processor_id());
 #endif
@@ -1998,7 +1946,6 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
 #ifdef CONFIG_X86_32
        enable_sep_cpu();
 #endif
-       validate_apic_and_package_id(c);
        x86_spec_ctrl_setup_ap();
        update_srbds_msr();
        if (boot_cpu_has_bug(X86_BUG_GDS))
@@ -2050,6 +1997,7 @@ DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
        .top_of_stack   = TOP_OF_INIT_STACK,
 };
 EXPORT_PER_CPU_SYMBOL(pcpu_hot);
+EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);
 
 #ifdef CONFIG_X86_64
 DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
@@ -2067,10 +2015,8 @@ static void wrmsrl_cstar(unsigned long val)
                wrmsrl(MSR_CSTAR, val);
 }
 
-/* May not be marked __init: used by software suspend */
-void syscall_init(void)
+static inline void idt_syscall_init(void)
 {
-       wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
        wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
 
        if (ia32_enabled()) {
@@ -2104,6 +2050,23 @@ void syscall_init(void)
               X86_EFLAGS_AC|X86_EFLAGS_ID);
 }
 
+/* May not be marked __init: used by software suspend */
+void syscall_init(void)
+{
+       /* The default user and kernel segments */
+       wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
+
+       /*
+        * Except the IA32_STAR MSR, there is NO need to setup SYSCALL and
+        * SYSENTER MSRs for FRED, because FRED uses the ring 3 FRED
+        * entrypoint for SYSCALL and SYSENTER, and ERETU is the only legit
+        * instruction to return to ring 3 (both sysexit and sysret cause
+        * #UD when FRED is enabled).
+        */
+       if (!cpu_feature_enabled(X86_FEATURE_FRED))
+               idt_syscall_init();
+}
+
 #else  /* CONFIG_X86_64 */
 
 #ifdef CONFIG_STACKPROTECTOR
@@ -2207,8 +2170,9 @@ void cpu_init_exception_handling(void)
        /* paranoid_entry() gets the CPU number from the GDT */
        setup_getcpu(cpu);
 
-       /* IST vectors need TSS to be set up. */
-       tss_setup_ist(tss);
+       /* For IDT mode, IST vectors need to be set in TSS. */
+       if (!cpu_feature_enabled(X86_FEATURE_FRED))
+               tss_setup_ist(tss);
        tss_setup_io_bitmap(tss);
        set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
 
@@ -2217,8 +2181,10 @@ void cpu_init_exception_handling(void)
        /* GHCB needs to be setup to handle #VC. */
        setup_ghcb();
 
-       /* Finally load the IDT */
-       load_current_idt();
+       if (cpu_feature_enabled(X86_FEATURE_FRED))
+               cpu_init_fred_exceptions();
+       else
+               load_current_idt();
 }
 
 /*
@@ -2341,13 +2307,17 @@ void arch_smt_update(void)
 
 void __init arch_cpu_finalize_init(void)
 {
+       struct cpuinfo_x86 *c = this_cpu_ptr(&cpu_info);
+
        identify_boot_cpu();
 
+       select_idle_routine();
+
        /*
         * identify_boot_cpu() initialized SMT support information, let the
         * core code know.
         */
-       cpu_smt_set_num_threads(smp_num_siblings, smp_num_siblings);
+       cpu_smt_set_num_threads(__max_threads_per_core, __max_threads_per_core);
 
        if (!IS_ENABLED(CONFIG_SMP)) {
                pr_info("CPU: ");
@@ -2377,6 +2347,13 @@ void __init arch_cpu_finalize_init(void)
        fpu__init_system();
        fpu__init_cpu();
 
+       /*
+        * Ensure that access to the per CPU representation has the initial
+        * boot CPU configuration.
+        */
+       *c = boot_cpu_data;
+       c->initialized = true;
+
        alternative_instructions();
 
        if (IS_ENABLED(CONFIG_X86_64)) {