x86/resctrl: Allow arch to allocate memory needed in resctrl_arch_rmid_read()
authorJames Morse <james.morse@arm.com>
Tue, 13 Feb 2024 18:44:29 +0000 (18:44 +0000)
committerBorislav Petkov (AMD) <bp@alien8.de>
Fri, 16 Feb 2024 18:18:32 +0000 (19:18 +0100)
Depending on the number of monitors available, Arm's MPAM may need to
allocate a monitor prior to reading the counter value. Allocating a
contended resource may involve sleeping.

__check_limbo() and mon_event_count() each make multiple calls to
resctrl_arch_rmid_read(), to avoid extra work on contended systems,
the allocation should be valid for multiple invocations of
resctrl_arch_rmid_read().

The memory or hardware allocated is not specific to a domain.

Add arch hooks for this allocation, which need calling before
resctrl_arch_rmid_read(). The allocated monitor is passed to
resctrl_arch_rmid_read(), then freed again afterwards. The helper
can be called on any CPU, and can sleep.

Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Shaopeng Tan <tan.shaopeng@fujitsu.com>
Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
Reviewed-by: Babu Moger <babu.moger@amd.com>
Tested-by: Shaopeng Tan <tan.shaopeng@fujitsu.com>
Tested-by: Peter Newman <peternewman@google.com>
Tested-by: Babu Moger <babu.moger@amd.com>
Tested-by: Carl Worth <carl@os.amperecomputing.com> # arm64
Link: https://lore.kernel.org/r/20240213184438.16675-16-james.morse@arm.com
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
arch/x86/include/asm/resctrl.h
arch/x86/kernel/cpu/resctrl/ctrlmondata.c
arch/x86/kernel/cpu/resctrl/internal.h
arch/x86/kernel/cpu/resctrl/monitor.c
include/linux/resctrl.h

index 1d274dbabc44420c25b06ac1fc2febc7114c0a99..29c4cc34378713d851ae53bf4fb4cf4271ccb8e6 100644 (file)
@@ -136,6 +136,17 @@ static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid)
        return rmid;
 }
 
+/* x86 can always read an rmid, nothing needs allocating */
+struct rdt_resource;
+static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid)
+{
+       might_sleep();
+       return NULL;
+};
+
+static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid,
+                                            void *ctx) { };
+
 void resctrl_cpu_detect(struct cpuinfo_x86 *c);
 
 #else
index e933e1cdb1c9253c30a07575206427453ae306b5..52fa0e14cb86cb789b0b80c19fad1282794a6459 100644 (file)
@@ -546,6 +546,11 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
        rr->d = d;
        rr->val = 0;
        rr->first = first;
+       rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid);
+       if (IS_ERR(rr->arch_mon_ctx)) {
+               rr->err = -EINVAL;
+               return;
+       }
 
        cpu = cpumask_any_housekeeping(&d->cpu_mask);
 
@@ -559,6 +564,8 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
                smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
        else
                smp_call_on_cpu(cpu, smp_mon_event_count, rr, false);
+
+       resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx);
 }
 
 int rdtgroup_mondata_show(struct seq_file *m, void *arg)
index 81f5de916db8cae2c42ccbb404a3ab3245863160..e089d1a1a05556c28aa6f595b8d3f681f37b5823 100644 (file)
@@ -137,6 +137,7 @@ struct rmid_read {
        bool                    first;
        int                     err;
        u64                     val;
+       void                    *arch_mon_ctx;
 };
 
 extern bool rdt_alloc_capable;
index e8aeff6673ea34e7f9a553c5cf97f84329f28802..9b503e6ac490f6e55bb6c34a15f6e393c1e83df2 100644 (file)
@@ -269,7 +269,7 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
 
 int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
                           u32 unused, u32 rmid, enum resctrl_event_id eventid,
-                          u64 *val)
+                          u64 *val, void *ignored)
 {
        struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
@@ -324,9 +324,17 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
        u32 idx_limit = resctrl_arch_system_num_rmid_idx();
        struct rmid_entry *entry;
        u32 idx, cur_idx = 1;
+       void *arch_mon_ctx;
        bool rmid_dirty;
        u64 val = 0;
 
+       arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID);
+       if (IS_ERR(arch_mon_ctx)) {
+               pr_warn_ratelimited("Failed to allocate monitor context: %ld",
+                                   PTR_ERR(arch_mon_ctx));
+               return;
+       }
+
        /*
         * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
         * are marked as busy for occupancy < threshold. If the occupancy
@@ -340,7 +348,8 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
 
                entry = __rmid_entry(idx);
                if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid,
-                                          QOS_L3_OCCUP_EVENT_ID, &val)) {
+                                          QOS_L3_OCCUP_EVENT_ID, &val,
+                                          arch_mon_ctx)) {
                        rmid_dirty = true;
                } else {
                        rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
@@ -353,6 +362,8 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
                }
                cur_idx = idx + 1;
        }
+
+       resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx);
 }
 
 bool has_busy_rmid(struct rdt_domain *d)
@@ -533,7 +544,7 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
        }
 
        rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid,
-                                        &tval);
+                                        &tval, rr->arch_mon_ctx);
        if (rr->err)
                return rr->err;
 
@@ -722,11 +733,27 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d,
        if (is_mbm_total_enabled()) {
                rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
                rr.val = 0;
+               rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
+               if (IS_ERR(rr.arch_mon_ctx)) {
+                       pr_warn_ratelimited("Failed to allocate monitor context: %ld",
+                                           PTR_ERR(rr.arch_mon_ctx));
+                       return;
+               }
+
                __mon_event_count(closid, rmid, &rr);
+
+               resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
        }
        if (is_mbm_local_enabled()) {
                rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
                rr.val = 0;
+               rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
+               if (IS_ERR(rr.arch_mon_ctx)) {
+                       pr_warn_ratelimited("Failed to allocate monitor context: %ld",
+                                           PTR_ERR(rr.arch_mon_ctx));
+                       return;
+               }
+
                __mon_event_count(closid, rmid, &rr);
 
                /*
@@ -736,6 +763,8 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d,
                 */
                if (is_mba_sc(NULL))
                        mbm_bw_count(closid, rmid, &rr);
+
+               resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
        }
 }
 
index 8649fc84aac25290e8295baf87f4660d94ddf4c3..bf460c912bf52213460ea614853945d194d4b975 100644 (file)
@@ -235,6 +235,9 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
  * @rmid:              rmid of the counter to read.
  * @eventid:           eventid to read, e.g. L3 occupancy.
  * @val:               result of the counter read in bytes.
+ * @arch_mon_ctx:      An architecture specific value from
+ *                     resctrl_arch_mon_ctx_alloc(), for MPAM this identifies
+ *                     the hardware monitor allocated for this read request.
  *
  * Some architectures need to sleep when first programming some of the counters.
  * (specifically: arm64's MPAM cache occupancy counters can return 'not ready'
@@ -248,7 +251,7 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
  */
 int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
                           u32 closid, u32 rmid, enum resctrl_event_id eventid,
-                          u64 *val);
+                          u64 *val, void *arch_mon_ctx);
 
 /**
  * resctrl_arch_rmid_read_context_check()  - warn about invalid contexts