riscv: Use the same CPU operations for all CPUs
authorSamuel Holland <samuel.holland@sifive.com>
Tue, 21 Nov 2023 23:47:26 +0000 (15:47 -0800)
committerPalmer Dabbelt <palmer@rivosinc.com>
Thu, 4 Jan 2024 23:03:07 +0000 (15:03 -0800)
RISC-V provides no binding (ACPI or DT) to describe per-cpu start/stop
operations, so cpu_set_ops() will always detect the same operations for
every CPU. Replace the cpu_ops array with a single pointer to save space
and reduce boot time.

Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
Reviewed-by: Conor Dooley <conor.dooley@microchip.com>
Link: https://lore.kernel.org/r/20231121234736.3489608-4-samuel.holland@sifive.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
arch/riscv/include/asm/cpu_ops.h
arch/riscv/kernel/cpu-hotplug.c
arch/riscv/kernel/cpu_ops.c
arch/riscv/kernel/smp.c
arch/riscv/kernel/smpboot.c

index 18af75e6873c57501c921414301cb47a905a79ab..176b570ef982761bec7810eda1332b105e640623 100644 (file)
@@ -29,7 +29,7 @@ struct cpu_operations {
 };
 
 extern const struct cpu_operations cpu_ops_spinwait;
-extern const struct cpu_operations *cpu_ops[NR_CPUS];
-void __init cpu_set_ops(int cpu);
+extern const struct cpu_operations *cpu_ops;
+void __init cpu_set_ops(void);
 
 #endif /* ifndef __ASM_CPU_OPS_H */
index 934eb64da0d0bdb29c27bb0a220dce1374f15e2d..28b58fc5ad1996112c67b7f6030f9ba81a26b388 100644 (file)
@@ -18,7 +18,7 @@
 
 bool cpu_has_hotplug(unsigned int cpu)
 {
-       if (cpu_ops[cpu]->cpu_stop)
+       if (cpu_ops->cpu_stop)
                return true;
 
        return false;
@@ -31,7 +31,7 @@ int __cpu_disable(void)
 {
        unsigned int cpu = smp_processor_id();
 
-       if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_stop)
+       if (!cpu_ops->cpu_stop)
                return -EOPNOTSUPP;
 
        remove_cpu_topology(cpu);
@@ -55,8 +55,8 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
        pr_notice("CPU%u: off\n", cpu);
 
        /* Verify from the firmware if the cpu is really stopped*/
-       if (cpu_ops[cpu]->cpu_is_stopped)
-               ret = cpu_ops[cpu]->cpu_is_stopped(cpu);
+       if (cpu_ops->cpu_is_stopped)
+               ret = cpu_ops->cpu_is_stopped(cpu);
        if (ret)
                pr_warn("CPU%d may not have stopped: %d\n", cpu, ret);
 }
@@ -70,7 +70,7 @@ void __noreturn arch_cpu_idle_dead(void)
 
        cpuhp_ap_report_dead();
 
-       cpu_ops[smp_processor_id()]->cpu_stop();
+       cpu_ops->cpu_stop();
        /* It should never reach here */
        BUG();
 }
index 5540e2880abbbb1c5f5ec1fb472f6960e5042185..6a8bd8f4db0711b7ee02d0f4f7ce3ccb20e36526 100644 (file)
@@ -13,7 +13,7 @@
 #include <asm/sbi.h>
 #include <asm/smp.h>
 
-const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
+const struct cpu_operations *cpu_ops __ro_after_init = &cpu_ops_spinwait;
 
 extern const struct cpu_operations cpu_ops_sbi;
 #ifndef CONFIG_RISCV_BOOT_SPINWAIT
@@ -22,14 +22,12 @@ const struct cpu_operations cpu_ops_spinwait = {
 };
 #endif
 
-void __init cpu_set_ops(int cpuid)
+void __init cpu_set_ops(void)
 {
 #if IS_ENABLED(CONFIG_RISCV_SBI)
        if (sbi_probe_extension(SBI_EXT_HSM)) {
-               if (!cpuid)
-                       pr_info("SBI HSM extension detected\n");
-               cpu_ops[cpuid] = &cpu_ops_sbi;
-       } else
+               pr_info("SBI HSM extension detected\n");
+               cpu_ops = &cpu_ops_sbi;
+       }
 #endif
-               cpu_ops[cpuid] = &cpu_ops_spinwait;
 }
index 40420afbb1a09fc90ea00107816adde1002d6479..45dd4035416efdc59eab5dd1092e7f0ff60cfbe6 100644 (file)
@@ -81,7 +81,7 @@ static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
 
 #ifdef CONFIG_HOTPLUG_CPU
        if (cpu_has_hotplug(cpu))
-               cpu_ops[cpu]->cpu_stop();
+               cpu_ops->cpu_stop();
 #endif
 
        for(;;)
index 5551945255cdcccdfa72cb51bfabdbcccc56f070..519b6bd946e5d1b69edf3379e31b345e38a03deb 100644 (file)
@@ -166,25 +166,22 @@ void __init setup_smp(void)
 {
        int cpuid;
 
-       cpu_set_ops(0);
+       cpu_set_ops();
 
        if (acpi_disabled)
                of_parse_and_init_cpus();
        else
                acpi_parse_and_init_cpus();
 
-       for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
-               if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
-                       cpu_set_ops(cpuid);
+       for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++)
+               if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID)
                        set_cpu_possible(cpuid, true);
-               }
-       }
 }
 
 static int start_secondary_cpu(int cpu, struct task_struct *tidle)
 {
-       if (cpu_ops[cpu]->cpu_start)
-               return cpu_ops[cpu]->cpu_start(cpu, tidle);
+       if (cpu_ops->cpu_start)
+               return cpu_ops->cpu_start(cpu, tidle);
 
        return -EOPNOTSUPP;
 }