1 // SPDX-License-Identifier: GPL-2.0-only
3 * Resource Director Technology(RDT)
4 * - Cache Allocation code.
6 * Copyright (C) 2016 Intel Corporation
9 * Fenghua Yu <fenghua.yu@intel.com>
10 * Tony Luck <tony.luck@intel.com>
11 * Vikas Shivappa <vikas.shivappa@intel.com>
13 * More information about RDT be found in the Intel (R) x86 Architecture
14 * Software Developer Manual June 2016, volume 3, section 17.17.
17 #define pr_fmt(fmt) "resctrl: " fmt
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/cacheinfo.h>
22 #include <linux/cpuhotplug.h>
24 #include <asm/intel-family.h>
25 #include <asm/resctrl.h>
28 /* Mutex to protect rdtgroup access. */
29 DEFINE_MUTEX(rdtgroup_mutex);
32 * The cached resctrl_pqr_state is strictly per CPU and can never be
33 * updated from a remote CPU. Functions which modify the state
34 * are called with interrupts disabled and no preemption, which
35 * is sufficient for the protection.
37 DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
40 * Used to store the max resource name width and max resource data width
41 * to display the schemata in a tabular format
43 int max_name_width, max_data_width;
46 * Global boolean for rdt_alloc which is true if any
47 * resource allocation is enabled.
49 bool rdt_alloc_capable;
52 mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
53 struct rdt_resource *r);
55 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
57 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
58 struct rdt_resource *r);
60 #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains)
62 struct rdt_hw_resource rdt_resources_all[] = {
66 .rid = RDT_RESOURCE_L3,
69 .domains = domain_init(RDT_RESOURCE_L3),
70 .parse_ctrlval = parse_cbm,
71 .format_str = "%d=%0*x",
72 .fflags = RFTYPE_RES_CACHE,
74 .msr_base = MSR_IA32_L3_CBM_BASE,
75 .msr_update = cat_wrmsr,
80 .rid = RDT_RESOURCE_L2,
83 .domains = domain_init(RDT_RESOURCE_L2),
84 .parse_ctrlval = parse_cbm,
85 .format_str = "%d=%0*x",
86 .fflags = RFTYPE_RES_CACHE,
88 .msr_base = MSR_IA32_L2_CBM_BASE,
89 .msr_update = cat_wrmsr,
94 .rid = RDT_RESOURCE_MBA,
97 .domains = domain_init(RDT_RESOURCE_MBA),
98 .parse_ctrlval = parse_bw,
99 .format_str = "%d=%*u",
100 .fflags = RFTYPE_RES_MB,
103 [RDT_RESOURCE_SMBA] =
106 .rid = RDT_RESOURCE_SMBA,
109 .domains = domain_init(RDT_RESOURCE_SMBA),
110 .parse_ctrlval = parse_bw,
111 .format_str = "%d=%*u",
112 .fflags = RFTYPE_RES_MB,
118 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
119 * as they do not have CPUID enumeration support for Cache allocation.
120 * The check for Vendor/Family/Model is not enough to guarantee that
121 * the MSRs won't #GP fault because only the following SKUs support
123 * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
124 * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
125 * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
126 * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
127 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
128 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
130 * Probe by trying to write the first of the L3 cache mask registers
131 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
132 * is always 20 on hsw server parts. The minimum cache bitmask length
133 * allowed for HSW server is always 2 bits. Hardcode all of them.
135 static inline void cache_alloc_hsw_probe(void)
137 struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3];
138 struct rdt_resource *r = &hw_res->r_resctrl;
139 u32 l, h, max_cbm = BIT_MASK(20) - 1;
141 if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
144 rdmsr(MSR_IA32_L3_CBM_BASE, l, h);
146 /* If all the bits were set in MSR, return success */
150 hw_res->num_closid = 4;
151 r->default_ctrl = max_cbm;
152 r->cache.cbm_len = 20;
153 r->cache.shareable_bits = 0xc0000;
154 r->cache.min_cbm_bits = 2;
155 r->cache.arch_has_sparse_bitmasks = false;
156 r->alloc_capable = true;
158 rdt_alloc_capable = true;
161 bool is_mba_sc(struct rdt_resource *r)
164 return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc;
167 * The software controller support is only applicable to MBA resource.
168 * Make sure to check for resource type.
170 if (r->rid != RDT_RESOURCE_MBA)
173 return r->membw.mba_sc;
177 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
178 * exposed to user interface and the h/w understandable delay values.
180 * The non-linear delay values have the granularity of power of two
181 * and also the h/w does not guarantee a curve for configured delay
182 * values vs. actual b/w enforced.
183 * Hence we need a mapping that is pre calibrated so the user can
184 * express the memory b/w as a percentage value.
186 static inline bool rdt_get_mb_table(struct rdt_resource *r)
189 * There are no Intel SKUs as of now to support non-linear delay.
191 pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
192 boot_cpu_data.x86, boot_cpu_data.x86_model);
197 static bool __get_mem_config_intel(struct rdt_resource *r)
199 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
200 union cpuid_0x10_3_eax eax;
201 union cpuid_0x10_x_edx edx;
202 u32 ebx, ecx, max_delay;
204 cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
205 hw_res->num_closid = edx.split.cos_max + 1;
206 max_delay = eax.split.max_delay + 1;
207 r->default_ctrl = MAX_MBA_BW;
208 r->membw.arch_needs_linear = true;
209 if (ecx & MBA_IS_LINEAR) {
210 r->membw.delay_linear = true;
211 r->membw.min_bw = MAX_MBA_BW - max_delay;
212 r->membw.bw_gran = MAX_MBA_BW - max_delay;
214 if (!rdt_get_mb_table(r))
216 r->membw.arch_needs_linear = false;
220 if (boot_cpu_has(X86_FEATURE_PER_THREAD_MBA))
221 r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD;
223 r->membw.throttle_mode = THREAD_THROTTLE_MAX;
224 thread_throttle_mode_init();
226 r->alloc_capable = true;
231 static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
233 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
234 union cpuid_0x10_3_eax eax;
235 union cpuid_0x10_x_edx edx;
236 u32 ebx, ecx, subleaf;
239 * Query CPUID_Fn80000020_EDX_x01 for MBA and
240 * CPUID_Fn80000020_EDX_x02 for SMBA
242 subleaf = (r->rid == RDT_RESOURCE_SMBA) ? 2 : 1;
244 cpuid_count(0x80000020, subleaf, &eax.full, &ebx, &ecx, &edx.full);
245 hw_res->num_closid = edx.split.cos_max + 1;
246 r->default_ctrl = MAX_MBA_BW_AMD;
248 /* AMD does not use delay */
249 r->membw.delay_linear = false;
250 r->membw.arch_needs_linear = false;
253 * AMD does not use memory delay throttle model to control
254 * the allocation like Intel does.
256 r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED;
258 r->membw.bw_gran = 1;
259 /* Max value is 2048, Data width should be 4 in decimal */
262 r->alloc_capable = true;
267 static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
269 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
270 union cpuid_0x10_1_eax eax;
271 union cpuid_0x10_x_ecx ecx;
272 union cpuid_0x10_x_edx edx;
275 cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx.full, &edx.full);
276 hw_res->num_closid = edx.split.cos_max + 1;
277 r->cache.cbm_len = eax.split.cbm_len + 1;
278 r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
279 r->cache.shareable_bits = ebx & r->default_ctrl;
280 r->data_width = (r->cache.cbm_len + 3) / 4;
281 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
282 r->cache.arch_has_sparse_bitmasks = ecx.split.noncont;
283 r->alloc_capable = true;
286 static void rdt_get_cdp_config(int level)
289 * By default, CDP is disabled. CDP can be enabled by mount parameter
290 * "cdp" during resctrl file system mount time.
292 rdt_resources_all[level].cdp_enabled = false;
293 rdt_resources_all[level].r_resctrl.cdp_capable = true;
296 static void rdt_get_cdp_l3_config(void)
298 rdt_get_cdp_config(RDT_RESOURCE_L3);
301 static void rdt_get_cdp_l2_config(void)
303 rdt_get_cdp_config(RDT_RESOURCE_L2);
307 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
310 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
311 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
313 for (i = m->low; i < m->high; i++)
314 wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
318 * Map the memory b/w percentage value to delay values
319 * that can be written to QOS_MSRs.
320 * There are currently no SKUs which support non linear delay values.
322 static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
324 if (r->membw.delay_linear)
325 return MAX_MBA_BW - bw;
327 pr_warn_once("Non Linear delay-bw map not supported but queried\n");
328 return r->default_ctrl;
332 mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
333 struct rdt_resource *r)
336 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
337 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
339 /* Write the delay values for mba. */
340 for (i = m->low; i < m->high; i++)
341 wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r));
345 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
348 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
349 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
351 for (i = m->low; i < m->high; i++)
352 wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
355 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
357 struct rdt_domain *d;
359 list_for_each_entry(d, &r->domains, list) {
360 /* Find the domain that contains this CPU */
361 if (cpumask_test_cpu(cpu, &d->cpu_mask))
368 u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
370 return resctrl_to_arch_res(r)->num_closid;
373 void rdt_ctrl_update(void *arg)
375 struct msr_param *m = arg;
376 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
377 struct rdt_resource *r = m->res;
378 int cpu = smp_processor_id();
379 struct rdt_domain *d;
381 d = get_domain_from_cpu(cpu, r);
383 hw_res->msr_update(d, m, r);
386 pr_warn_once("cpu %d not found in any domain for resource %s\n",
391 * rdt_find_domain - Find a domain in a resource that matches input resource id
393 * Search resource r's domain list to find the resource id. If the resource
394 * id is found in a domain, return the domain. Otherwise, if requested by
395 * caller, return the first domain whose id is bigger than the input id.
396 * The domain list is sorted by id in ascending order.
398 struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
399 struct list_head **pos)
401 struct rdt_domain *d;
405 return ERR_PTR(-ENODEV);
407 list_for_each(l, &r->domains) {
408 d = list_entry(l, struct rdt_domain, list);
409 /* When id is found, return its domain. */
412 /* Stop searching when finding id's position in sorted list. */
423 static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc)
425 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
429 * Initialize the Control MSRs to having no control.
430 * For Cache Allocation: Set all bits in cbm
431 * For Memory Allocation: Set b/w requested to 100%
433 for (i = 0; i < hw_res->num_closid; i++, dc++)
434 *dc = r->default_ctrl;
437 static void domain_free(struct rdt_hw_domain *hw_dom)
439 kfree(hw_dom->arch_mbm_total);
440 kfree(hw_dom->arch_mbm_local);
441 kfree(hw_dom->ctrl_val);
445 static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
447 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
448 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
452 dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val),
457 hw_dom->ctrl_val = dc;
458 setup_default_ctrlval(r, dc);
461 m.high = hw_res->num_closid;
462 hw_res->msr_update(d, &m, r);
467 * arch_domain_mbm_alloc() - Allocate arch private storage for the MBM counters
468 * @num_rmid: The size of the MBM counter array
469 * @hw_dom: The domain that owns the allocated arrays
471 static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
475 if (is_mbm_total_enabled()) {
476 tsize = sizeof(*hw_dom->arch_mbm_total);
477 hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL);
478 if (!hw_dom->arch_mbm_total)
481 if (is_mbm_local_enabled()) {
482 tsize = sizeof(*hw_dom->arch_mbm_local);
483 hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL);
484 if (!hw_dom->arch_mbm_local) {
485 kfree(hw_dom->arch_mbm_total);
486 hw_dom->arch_mbm_total = NULL;
495 * domain_add_cpu - Add a cpu to a resource's domain list.
497 * If an existing domain in the resource r's domain list matches the cpu's
498 * resource id, add the cpu in the domain.
500 * Otherwise, a new domain is allocated and inserted into the right position
501 * in the domain list sorted by id in ascending order.
503 * The order in the domain list is visible to users when we print entries
504 * in the schemata file and schemata input is validated to have the same order
507 static void domain_add_cpu(int cpu, struct rdt_resource *r)
509 int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
510 struct list_head *add_pos = NULL;
511 struct rdt_hw_domain *hw_dom;
512 struct rdt_domain *d;
515 d = rdt_find_domain(r, id, &add_pos);
517 pr_warn("Couldn't find cache id for CPU %d\n", cpu);
522 cpumask_set_cpu(cpu, &d->cpu_mask);
523 if (r->cache.arch_has_per_cpu_cfg)
524 rdt_domain_reconfigure_cdp(r);
528 hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu));
532 d = &hw_dom->d_resctrl;
534 cpumask_set_cpu(cpu, &d->cpu_mask);
536 rdt_domain_reconfigure_cdp(r);
538 if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
543 if (r->mon_capable && arch_domain_mbm_alloc(r->num_rmid, hw_dom)) {
548 list_add_tail(&d->list, add_pos);
550 err = resctrl_online_domain(r, d);
557 static void domain_remove_cpu(int cpu, struct rdt_resource *r)
559 int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
560 struct rdt_hw_domain *hw_dom;
561 struct rdt_domain *d;
563 d = rdt_find_domain(r, id, NULL);
564 if (IS_ERR_OR_NULL(d)) {
565 pr_warn("Couldn't find cache id for CPU %d\n", cpu);
568 hw_dom = resctrl_to_arch_dom(d);
570 cpumask_clear_cpu(cpu, &d->cpu_mask);
571 if (cpumask_empty(&d->cpu_mask)) {
572 resctrl_offline_domain(r, d);
576 * rdt_domain "d" is going to be freed below, so clear
577 * its pointer from pseudo_lock_region struct.
586 if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) {
587 if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
588 cancel_delayed_work(&d->mbm_over);
589 mbm_setup_overflow_handler(d, 0);
591 if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
592 has_busy_rmid(r, d)) {
593 cancel_delayed_work(&d->cqm_limbo);
594 cqm_setup_limbo_handler(d, 0);
599 static void clear_closid_rmid(int cpu)
601 struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
603 state->default_closid = 0;
604 state->default_rmid = 0;
605 state->cur_closid = 0;
607 wrmsr(MSR_IA32_PQR_ASSOC, 0, 0);
610 static int resctrl_online_cpu(unsigned int cpu)
612 struct rdt_resource *r;
614 mutex_lock(&rdtgroup_mutex);
615 for_each_capable_rdt_resource(r)
616 domain_add_cpu(cpu, r);
617 /* The cpu is set in default rdtgroup after online. */
618 cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
619 clear_closid_rmid(cpu);
620 mutex_unlock(&rdtgroup_mutex);
625 static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
629 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
630 if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
636 static int resctrl_offline_cpu(unsigned int cpu)
638 struct rdtgroup *rdtgrp;
639 struct rdt_resource *r;
641 mutex_lock(&rdtgroup_mutex);
642 for_each_capable_rdt_resource(r)
643 domain_remove_cpu(cpu, r);
644 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
645 if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
646 clear_childcpus(rdtgrp, cpu);
650 clear_closid_rmid(cpu);
651 mutex_unlock(&rdtgroup_mutex);
657 * Choose a width for the resource name and resource data based on the
658 * resource that has widest name and cbm.
660 static __init void rdt_init_padding(void)
662 struct rdt_resource *r;
664 for_each_alloc_capable_rdt_resource(r) {
665 if (r->data_width > max_data_width)
666 max_data_width = r->data_width;
683 #define RDT_OPT(idx, n, f) \
692 bool force_off, force_on;
695 static struct rdt_options rdt_options[] __initdata = {
696 RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC),
697 RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL),
698 RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL),
699 RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3),
700 RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3),
701 RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2),
702 RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp", X86_FEATURE_CDP_L2),
703 RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA),
704 RDT_OPT(RDT_FLAG_SMBA, "smba", X86_FEATURE_SMBA),
705 RDT_OPT(RDT_FLAG_BMEC, "bmec", X86_FEATURE_BMEC),
707 #define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
709 static int __init set_rdt_options(char *str)
711 struct rdt_options *o;
717 while ((tok = strsep(&str, ",")) != NULL) {
718 force_off = *tok == '!';
721 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
722 if (strcmp(tok, o->name) == 0) {
733 __setup("rdt", set_rdt_options);
735 bool __init rdt_cpu_has(int flag)
737 bool ret = boot_cpu_has(flag);
738 struct rdt_options *o;
743 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
744 if (flag == o->flag) {
755 static __init bool get_mem_config(void)
757 struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA];
759 if (!rdt_cpu_has(X86_FEATURE_MBA))
762 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
763 return __get_mem_config_intel(&hw_res->r_resctrl);
764 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
765 return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
770 static __init bool get_slow_mem_config(void)
772 struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_SMBA];
774 if (!rdt_cpu_has(X86_FEATURE_SMBA))
777 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
778 return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
783 static __init bool get_rdt_alloc_resources(void)
785 struct rdt_resource *r;
788 if (rdt_alloc_capable)
791 if (!boot_cpu_has(X86_FEATURE_RDT_A))
794 if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
795 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
796 rdt_get_cache_alloc_cfg(1, r);
797 if (rdt_cpu_has(X86_FEATURE_CDP_L3))
798 rdt_get_cdp_l3_config();
801 if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
802 /* CPUID 0x10.2 fields are same format at 0x10.1 */
803 r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl;
804 rdt_get_cache_alloc_cfg(2, r);
805 if (rdt_cpu_has(X86_FEATURE_CDP_L2))
806 rdt_get_cdp_l2_config();
810 if (get_mem_config())
813 if (get_slow_mem_config())
819 static __init bool get_rdt_mon_resources(void)
821 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
823 if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
824 rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
825 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
826 rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID);
827 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))
828 rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID);
830 if (!rdt_mon_features)
833 return !rdt_get_mon_l3_config(r);
836 static __init void __check_quirks_intel(void)
838 switch (boot_cpu_data.x86_model) {
839 case INTEL_FAM6_HASWELL_X:
840 if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
841 cache_alloc_hsw_probe();
843 case INTEL_FAM6_SKYLAKE_X:
844 if (boot_cpu_data.x86_stepping <= 4)
845 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
847 set_rdt_options("!l3cat");
849 case INTEL_FAM6_BROADWELL_X:
850 intel_rdt_mbm_apply_quirk();
855 static __init void check_quirks(void)
857 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
858 __check_quirks_intel();
861 static __init bool get_rdt_resources(void)
863 rdt_alloc_capable = get_rdt_alloc_resources();
864 rdt_mon_capable = get_rdt_mon_resources();
866 return (rdt_mon_capable || rdt_alloc_capable);
869 static __init void rdt_init_res_defs_intel(void)
871 struct rdt_hw_resource *hw_res;
872 struct rdt_resource *r;
874 for_each_rdt_resource(r) {
875 hw_res = resctrl_to_arch_res(r);
877 if (r->rid == RDT_RESOURCE_L3 ||
878 r->rid == RDT_RESOURCE_L2) {
879 r->cache.arch_has_per_cpu_cfg = false;
880 r->cache.min_cbm_bits = 1;
881 } else if (r->rid == RDT_RESOURCE_MBA) {
882 hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
883 hw_res->msr_update = mba_wrmsr_intel;
888 static __init void rdt_init_res_defs_amd(void)
890 struct rdt_hw_resource *hw_res;
891 struct rdt_resource *r;
893 for_each_rdt_resource(r) {
894 hw_res = resctrl_to_arch_res(r);
896 if (r->rid == RDT_RESOURCE_L3 ||
897 r->rid == RDT_RESOURCE_L2) {
898 r->cache.arch_has_sparse_bitmasks = true;
899 r->cache.arch_has_per_cpu_cfg = true;
900 r->cache.min_cbm_bits = 0;
901 } else if (r->rid == RDT_RESOURCE_MBA) {
902 hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
903 hw_res->msr_update = mba_wrmsr_amd;
904 } else if (r->rid == RDT_RESOURCE_SMBA) {
905 hw_res->msr_base = MSR_IA32_SMBA_BW_BASE;
906 hw_res->msr_update = mba_wrmsr_amd;
911 static __init void rdt_init_res_defs(void)
913 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
914 rdt_init_res_defs_intel();
915 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
916 rdt_init_res_defs_amd();
919 static enum cpuhp_state rdt_online;
921 /* Runs once on the BSP during boot. */
922 void resctrl_cpu_detect(struct cpuinfo_x86 *c)
924 if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
925 c->x86_cache_max_rmid = -1;
926 c->x86_cache_occ_scale = -1;
927 c->x86_cache_mbm_width_offset = -1;
931 /* will be overridden if occupancy monitoring exists */
932 c->x86_cache_max_rmid = cpuid_ebx(0xf);
934 if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
935 cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
936 cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
937 u32 eax, ebx, ecx, edx;
939 /* QoS sub-leaf, EAX=0Fh, ECX=1 */
940 cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
942 c->x86_cache_max_rmid = ecx;
943 c->x86_cache_occ_scale = ebx;
944 c->x86_cache_mbm_width_offset = eax & 0xff;
946 if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
947 c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
951 static int __init resctrl_late_init(void)
953 struct rdt_resource *r;
957 * Initialize functions(or definitions) that are different
958 * between vendors here.
964 if (!get_rdt_resources())
969 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
970 "x86/resctrl/cat:online:",
971 resctrl_online_cpu, resctrl_offline_cpu);
975 ret = rdtgroup_init();
977 cpuhp_remove_state(state);
982 for_each_alloc_capable_rdt_resource(r)
983 pr_info("%s allocation detected\n", r->name);
985 for_each_mon_capable_rdt_resource(r)
986 pr_info("%s monitoring detected\n", r->name);
991 late_initcall(resctrl_late_init);
993 static void __exit resctrl_exit(void)
995 cpuhp_remove_state(rdt_online);
999 __exitcall(resctrl_exit);