forked from Minki/linux
- More work by James Morse to disentangle the resctrl filesystem generic
code from the architectural one with the endgoal of plugging ARM's MPAM implementation into it too so that the user interface remains the same - Properly restore the MSR_MISC_FEATURE_CONTROL value instead of blindly overwriting it to 0 -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmM8QhAACgkQEsHwGGHe VUqazA/8DIfBYMXe/M6qk+tZnLyBJPL3/3hzqOPc3fu2pmwzCHhb+1ksk7s0uLEO xdV4CK3SDc8WQnsiF9l4Hta1PhvD2Uhf6duCVv1DT0dmBQ6m9tks8SwhbgSCNrIh cQ8ABuTUsE0/PNW6Zx7x1JC0e2J6Yjhn55WGMGJD7kGl0eo1ClYSv8vnReBE/6cX YhgjVnWAeUNgwKayokbN7PFXwuP0WjDGmrn+7e8AF4emHWvdDYYw9F1MHIOvZoVO lLJi6f7ddjxCQSWPg3mG0KSvc4EXixhtEzq8Mk/16drkKlPdn89sHkqEyR7vP/jQ lEahxtzoWEfZXwVDPGCIIbfjab/lvvr4lTumKzxUgHEha+ORtWZGaukr4kPg6BRf IBrE12jCBKmYzzgE0e9EWGr0KCn6qXrnq37yzccQXVM0WxsBOUZWQXhInl6mSdz9 uus1rKR/swJBT58ybzvw2LGFYUow0bb0qY6XvQxmriiyA60EVmf9/Nt/KgatXa63 s9Q4mVii4W1tgxSmCjNVZnDFhXvvowclNU4TuJ6d+6kvEnrvoW5+vDRk2O7iJKqf K2zSe56lf0TnBe9WaUlxRFaTZg+UXZt7a+e7/hQ90wT/7fkIMk1uxVpqnQW4vDPi YskbKRPc5DlLBSJ+yxW9Ntff4QVIdUhhj0bcKBAo8nmd5Kj1hy4= =1iEb -----END PGP SIGNATURE----- Merge tag 'x86_cache_for_v6.1_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 cache resource control updates from Borislav Petkov: - More work by James Morse to disentangle the resctrl filesystem generic code from the architectural one with the endgoal of plugging ARM's MPAM implementation into it too so that the user interface remains the same - Properly restore the MSR_MISC_FEATURE_CONTROL value instead of blindly overwriting it to 0 * tag 'x86_cache_for_v6.1_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits) x86/resctrl: Make resctrl_arch_rmid_read() return values in bytes x86/resctrl: Add resctrl_rmid_realloc_limit to abstract x86's boot_cpu_data x86/resctrl: Rename and change the units of resctrl_cqm_threshold x86/resctrl: Move get_corrected_mbm_count() into resctrl_arch_rmid_read() x86/resctrl: Move mbm_overflow_count() into resctrl_arch_rmid_read() x86/resctrl: Pass the required parameters into resctrl_arch_rmid_read() x86/resctrl: Abstract __rmid_read() x86/resctrl: Allow per-rmid arch private storage to be reset x86/resctrl: Add per-rmid arch private storage for overflow and chunks x86/resctrl: Calculate bandwidth from the previous __mon_event_count() chunks x86/resctrl: Allow update_mba_bw() to update controls directly x86/resctrl: Remove architecture copy of mbps_val x86/resctrl: Switch over to the resctrl mbps_val list x86/resctrl: Create mba_sc configuration in the rdt_domain x86/resctrl: Abstract and use supports_mba_mbps() x86/resctrl: Remove set_mba_sc()s control array re-initialisation x86/resctrl: Add domain offline callback for resctrl work x86/resctrl: Group struct rdt_hw_domain cleanup x86/resctrl: Add domain online callback for resctrl work x86/resctrl: Merge mon_capable and mon_enabled ...
This commit is contained in:
commit
193e2268a3
@ -81,6 +81,15 @@ static void __resctrl_sched_in(void)
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
|
||||
{
|
||||
unsigned int scale = boot_cpu_data.x86_cache_occ_scale;
|
||||
|
||||
/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
|
||||
val /= scale;
|
||||
return val * scale;
|
||||
}
|
||||
|
||||
static inline void resctrl_sched_in(void)
|
||||
{
|
||||
if (static_branch_likely(&rdt_enable_key))
|
||||
|
@ -147,7 +147,6 @@ static inline void cache_alloc_hsw_probe(void)
|
||||
r->cache.shareable_bits = 0xc0000;
|
||||
r->cache.min_cbm_bits = 2;
|
||||
r->alloc_capable = true;
|
||||
r->alloc_enabled = true;
|
||||
|
||||
rdt_alloc_capable = true;
|
||||
}
|
||||
@ -211,7 +210,6 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
|
||||
thread_throttle_mode_init();
|
||||
|
||||
r->alloc_capable = true;
|
||||
r->alloc_enabled = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -242,7 +240,6 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
|
||||
r->data_width = 4;
|
||||
|
||||
r->alloc_capable = true;
|
||||
r->alloc_enabled = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -261,7 +258,6 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
|
||||
r->cache.shareable_bits = ebx & r->default_ctrl;
|
||||
r->data_width = (r->cache.cbm_len + 3) / 4;
|
||||
r->alloc_capable = true;
|
||||
r->alloc_enabled = true;
|
||||
}
|
||||
|
||||
static void rdt_get_cdp_config(int level)
|
||||
@ -300,7 +296,7 @@ mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
|
||||
* that can be written to QOS_MSRs.
|
||||
* There are currently no SKUs which support non linear delay values.
|
||||
*/
|
||||
u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
|
||||
static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
|
||||
{
|
||||
if (r->membw.delay_linear)
|
||||
return MAX_MBA_BW - bw;
|
||||
@ -401,7 +397,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
|
||||
static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
int i;
|
||||
@ -410,12 +406,17 @@ void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
|
||||
* Initialize the Control MSRs to having no control.
|
||||
* For Cache Allocation: Set all bits in cbm
|
||||
* For Memory Allocation: Set b/w requested to 100%
|
||||
* and the bandwidth in MBps to U32_MAX
|
||||
*/
|
||||
for (i = 0; i < hw_res->num_closid; i++, dc++, dm++) {
|
||||
for (i = 0; i < hw_res->num_closid; i++, dc++)
|
||||
*dc = r->default_ctrl;
|
||||
*dm = MBA_MAX_MBPS;
|
||||
}
|
||||
}
|
||||
|
||||
static void domain_free(struct rdt_hw_domain *hw_dom)
|
||||
{
|
||||
kfree(hw_dom->arch_mbm_total);
|
||||
kfree(hw_dom->arch_mbm_local);
|
||||
kfree(hw_dom->ctrl_val);
|
||||
kfree(hw_dom);
|
||||
}
|
||||
|
||||
static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
|
||||
@ -423,23 +424,15 @@ static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||
struct msr_param m;
|
||||
u32 *dc, *dm;
|
||||
u32 *dc;
|
||||
|
||||
dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val),
|
||||
GFP_KERNEL);
|
||||
if (!dc)
|
||||
return -ENOMEM;
|
||||
|
||||
dm = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->mbps_val),
|
||||
GFP_KERNEL);
|
||||
if (!dm) {
|
||||
kfree(dc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
hw_dom->ctrl_val = dc;
|
||||
hw_dom->mbps_val = dm;
|
||||
setup_default_ctrlval(r, dc, dm);
|
||||
setup_default_ctrlval(r, dc);
|
||||
|
||||
m.low = 0;
|
||||
m.high = hw_res->num_closid;
|
||||
@ -447,39 +440,31 @@ static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
|
||||
/**
|
||||
* arch_domain_mbm_alloc() - Allocate arch private storage for the MBM counters
|
||||
* @num_rmid: The size of the MBM counter array
|
||||
* @hw_dom: The domain that owns the allocated arrays
|
||||
*/
|
||||
static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom)
|
||||
{
|
||||
size_t tsize;
|
||||
|
||||
if (is_llc_occupancy_enabled()) {
|
||||
d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL);
|
||||
if (!d->rmid_busy_llc)
|
||||
return -ENOMEM;
|
||||
INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
|
||||
}
|
||||
if (is_mbm_total_enabled()) {
|
||||
tsize = sizeof(*d->mbm_total);
|
||||
d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
|
||||
if (!d->mbm_total) {
|
||||
bitmap_free(d->rmid_busy_llc);
|
||||
tsize = sizeof(*hw_dom->arch_mbm_total);
|
||||
hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL);
|
||||
if (!hw_dom->arch_mbm_total)
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
if (is_mbm_local_enabled()) {
|
||||
tsize = sizeof(*d->mbm_local);
|
||||
d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
|
||||
if (!d->mbm_local) {
|
||||
bitmap_free(d->rmid_busy_llc);
|
||||
kfree(d->mbm_total);
|
||||
tsize = sizeof(*hw_dom->arch_mbm_local);
|
||||
hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL);
|
||||
if (!hw_dom->arch_mbm_local) {
|
||||
kfree(hw_dom->arch_mbm_total);
|
||||
hw_dom->arch_mbm_total = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_mbm_enabled()) {
|
||||
INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
|
||||
mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -502,6 +487,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
|
||||
struct list_head *add_pos = NULL;
|
||||
struct rdt_hw_domain *hw_dom;
|
||||
struct rdt_domain *d;
|
||||
int err;
|
||||
|
||||
d = rdt_find_domain(r, id, &add_pos);
|
||||
if (IS_ERR(d)) {
|
||||
@ -527,25 +513,22 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
|
||||
rdt_domain_reconfigure_cdp(r);
|
||||
|
||||
if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
|
||||
kfree(hw_dom);
|
||||
domain_free(hw_dom);
|
||||
return;
|
||||
}
|
||||
|
||||
if (r->mon_capable && domain_setup_mon_state(r, d)) {
|
||||
kfree(hw_dom->ctrl_val);
|
||||
kfree(hw_dom->mbps_val);
|
||||
kfree(hw_dom);
|
||||
if (r->mon_capable && arch_domain_mbm_alloc(r->num_rmid, hw_dom)) {
|
||||
domain_free(hw_dom);
|
||||
return;
|
||||
}
|
||||
|
||||
list_add_tail(&d->list, add_pos);
|
||||
|
||||
/*
|
||||
* If resctrl is mounted, add
|
||||
* per domain monitor data directories.
|
||||
*/
|
||||
if (static_branch_unlikely(&rdt_mon_enable_key))
|
||||
mkdir_mondata_subdir_allrdtgrp(r, d);
|
||||
err = resctrl_online_domain(r, d);
|
||||
if (err) {
|
||||
list_del(&d->list);
|
||||
domain_free(hw_dom);
|
||||
}
|
||||
}
|
||||
|
||||
static void domain_remove_cpu(int cpu, struct rdt_resource *r)
|
||||
@ -563,27 +546,8 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
|
||||
|
||||
cpumask_clear_cpu(cpu, &d->cpu_mask);
|
||||
if (cpumask_empty(&d->cpu_mask)) {
|
||||
/*
|
||||
* If resctrl is mounted, remove all the
|
||||
* per domain monitor data directories.
|
||||
*/
|
||||
if (static_branch_unlikely(&rdt_mon_enable_key))
|
||||
rmdir_mondata_subdir_allrdtgrp(r, d->id);
|
||||
resctrl_offline_domain(r, d);
|
||||
list_del(&d->list);
|
||||
if (r->mon_capable && is_mbm_enabled())
|
||||
cancel_delayed_work(&d->mbm_over);
|
||||
if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
|
||||
/*
|
||||
* When a package is going down, forcefully
|
||||
* decrement rmid->ebusy. There is no way to know
|
||||
* that the L3 was flushed and hence may lead to
|
||||
* incorrect counts in rare scenarios, but leaving
|
||||
* the RMID as busy creates RMID leaks if the
|
||||
* package never comes back.
|
||||
*/
|
||||
__check_limbo(d, true);
|
||||
cancel_delayed_work(&d->cqm_limbo);
|
||||
}
|
||||
|
||||
/*
|
||||
* rdt_domain "d" is going to be freed below, so clear
|
||||
@ -591,13 +555,8 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
|
||||
*/
|
||||
if (d->plr)
|
||||
d->plr->d = NULL;
|
||||
domain_free(hw_dom);
|
||||
|
||||
kfree(hw_dom->ctrl_val);
|
||||
kfree(hw_dom->mbps_val);
|
||||
bitmap_free(d->rmid_busy_llc);
|
||||
kfree(d->mbm_total);
|
||||
kfree(d->mbm_local);
|
||||
kfree(hw_dom);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -61,6 +61,7 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
|
||||
struct rdt_domain *d)
|
||||
{
|
||||
struct resctrl_staged_config *cfg;
|
||||
u32 closid = data->rdtgrp->closid;
|
||||
struct rdt_resource *r = s->res;
|
||||
unsigned long bw_val;
|
||||
|
||||
@ -72,6 +73,12 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
|
||||
|
||||
if (!bw_validate(data->buf, &bw_val, r))
|
||||
return -EINVAL;
|
||||
|
||||
if (is_mba_sc(r)) {
|
||||
d->mbps_val[closid] = bw_val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
cfg->new_ctrl = bw_val;
|
||||
cfg->have_new_ctrl = true;
|
||||
|
||||
@ -261,14 +268,13 @@ static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
|
||||
|
||||
static bool apply_config(struct rdt_hw_domain *hw_dom,
|
||||
struct resctrl_staged_config *cfg, u32 idx,
|
||||
cpumask_var_t cpu_mask, bool mba_sc)
|
||||
cpumask_var_t cpu_mask)
|
||||
{
|
||||
struct rdt_domain *dom = &hw_dom->d_resctrl;
|
||||
u32 *dc = !mba_sc ? hw_dom->ctrl_val : hw_dom->mbps_val;
|
||||
|
||||
if (cfg->new_ctrl != dc[idx]) {
|
||||
if (cfg->new_ctrl != hw_dom->ctrl_val[idx]) {
|
||||
cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
|
||||
dc[idx] = cfg->new_ctrl;
|
||||
hw_dom->ctrl_val[idx] = cfg->new_ctrl;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -276,6 +282,27 @@ static bool apply_config(struct rdt_hw_domain *hw_dom,
|
||||
return false;
|
||||
}
|
||||
|
||||
int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
|
||||
u32 closid, enum resctrl_conf_type t, u32 cfg_val)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||
u32 idx = get_config_index(closid, t);
|
||||
struct msr_param msr_param;
|
||||
|
||||
if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
|
||||
return -EINVAL;
|
||||
|
||||
hw_dom->ctrl_val[idx] = cfg_val;
|
||||
|
||||
msr_param.res = r;
|
||||
msr_param.low = idx;
|
||||
msr_param.high = idx + 1;
|
||||
hw_res->msr_update(d, &msr_param, r);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
|
||||
{
|
||||
struct resctrl_staged_config *cfg;
|
||||
@ -284,14 +311,12 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
|
||||
enum resctrl_conf_type t;
|
||||
cpumask_var_t cpu_mask;
|
||||
struct rdt_domain *d;
|
||||
bool mba_sc;
|
||||
int cpu;
|
||||
u32 idx;
|
||||
|
||||
if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
mba_sc = is_mba_sc(r);
|
||||
msr_param.res = NULL;
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
hw_dom = resctrl_to_arch_dom(d);
|
||||
@ -301,7 +326,7 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
|
||||
continue;
|
||||
|
||||
idx = get_config_index(closid, t);
|
||||
if (!apply_config(hw_dom, cfg, idx, cpu_mask, mba_sc))
|
||||
if (!apply_config(hw_dom, cfg, idx, cpu_mask))
|
||||
continue;
|
||||
|
||||
if (!msr_param.res) {
|
||||
@ -315,11 +340,7 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid writing the control msr with control values when
|
||||
* MBA software controller is enabled
|
||||
*/
|
||||
if (cpumask_empty(cpu_mask) || mba_sc)
|
||||
if (cpumask_empty(cpu_mask))
|
||||
goto done;
|
||||
cpu = get_cpu();
|
||||
/* Update resource control msr on this CPU if it's in cpu_mask. */
|
||||
@ -406,6 +427,14 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
|
||||
|
||||
list_for_each_entry(s, &resctrl_schema_all, list) {
|
||||
r = s->res;
|
||||
|
||||
/*
|
||||
* Writes to mba_sc resources update the software controller,
|
||||
* not the control MSR.
|
||||
*/
|
||||
if (is_mba_sc(r))
|
||||
continue;
|
||||
|
||||
ret = resctrl_arch_update_domains(r, rdtgrp->closid);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -433,9 +462,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
|
||||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||
u32 idx = get_config_index(closid, type);
|
||||
|
||||
if (!is_mba_sc(r))
|
||||
return hw_dom->ctrl_val[idx];
|
||||
return hw_dom->mbps_val[idx];
|
||||
return hw_dom->ctrl_val[idx];
|
||||
}
|
||||
|
||||
static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
|
||||
@ -450,8 +477,12 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo
|
||||
if (sep)
|
||||
seq_puts(s, ";");
|
||||
|
||||
ctrl_val = resctrl_arch_get_config(r, dom, closid,
|
||||
schema->conf_type);
|
||||
if (is_mba_sc(r))
|
||||
ctrl_val = dom->mbps_val[closid];
|
||||
else
|
||||
ctrl_val = resctrl_arch_get_config(r, dom, closid,
|
||||
schema->conf_type);
|
||||
|
||||
seq_printf(s, r->format_str, dom->id, max_data_width,
|
||||
ctrl_val);
|
||||
sep = true;
|
||||
@ -518,7 +549,6 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
|
||||
int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
||||
{
|
||||
struct kernfs_open_file *of = m->private;
|
||||
struct rdt_hw_resource *hw_res;
|
||||
u32 resid, evtid, domid;
|
||||
struct rdtgroup *rdtgrp;
|
||||
struct rdt_resource *r;
|
||||
@ -538,8 +568,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
||||
domid = md.u.domid;
|
||||
evtid = md.u.evtid;
|
||||
|
||||
hw_res = &rdt_resources_all[resid];
|
||||
r = &hw_res->r_resctrl;
|
||||
r = &rdt_resources_all[resid].r_resctrl;
|
||||
d = rdt_find_domain(r, domid, NULL);
|
||||
if (IS_ERR_OR_NULL(d)) {
|
||||
ret = -ENOENT;
|
||||
@ -548,12 +577,12 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
||||
|
||||
mon_event_read(&rr, r, d, rdtgrp, evtid, false);
|
||||
|
||||
if (rr.val & RMID_VAL_ERROR)
|
||||
if (rr.err == -EIO)
|
||||
seq_puts(m, "Error\n");
|
||||
else if (rr.val & RMID_VAL_UNAVAIL)
|
||||
else if (rr.err == -EINVAL)
|
||||
seq_puts(m, "Unavailable\n");
|
||||
else
|
||||
seq_printf(m, "%llu\n", rr.val * hw_res->mon_scale);
|
||||
seq_printf(m, "%llu\n", rr.val);
|
||||
|
||||
out:
|
||||
rdtgroup_kn_unlock(of->kn);
|
||||
|
@ -22,21 +22,12 @@
|
||||
|
||||
#define L2_QOS_CDP_ENABLE 0x01ULL
|
||||
|
||||
/*
|
||||
* Event IDs are used to program IA32_QM_EVTSEL before reading event
|
||||
* counter from IA32_QM_CTR
|
||||
*/
|
||||
#define QOS_L3_OCCUP_EVENT_ID 0x01
|
||||
#define QOS_L3_MBM_TOTAL_EVENT_ID 0x02
|
||||
#define QOS_L3_MBM_LOCAL_EVENT_ID 0x03
|
||||
|
||||
#define CQM_LIMBOCHECK_INTERVAL 1000
|
||||
|
||||
#define MBM_CNTR_WIDTH_BASE 24
|
||||
#define MBM_OVERFLOW_INTERVAL 1000
|
||||
#define MAX_MBA_BW 100u
|
||||
#define MBA_IS_LINEAR 0x4
|
||||
#define MBA_MAX_MBPS U32_MAX
|
||||
#define MAX_MBA_BW_AMD 0x800
|
||||
#define MBM_CNTR_WIDTH_OFFSET_AMD 20
|
||||
|
||||
@ -74,7 +65,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
|
||||
* @list: entry in &rdt_resource->evt_list
|
||||
*/
|
||||
struct mon_evt {
|
||||
u32 evtid;
|
||||
enum resctrl_event_id evtid;
|
||||
char *name;
|
||||
struct list_head list;
|
||||
};
|
||||
@ -91,9 +82,9 @@ struct mon_evt {
|
||||
union mon_data_bits {
|
||||
void *priv;
|
||||
struct {
|
||||
unsigned int rid : 10;
|
||||
unsigned int evtid : 8;
|
||||
unsigned int domid : 14;
|
||||
unsigned int rid : 10;
|
||||
enum resctrl_event_id evtid : 8;
|
||||
unsigned int domid : 14;
|
||||
} u;
|
||||
};
|
||||
|
||||
@ -101,12 +92,12 @@ struct rmid_read {
|
||||
struct rdtgroup *rgrp;
|
||||
struct rdt_resource *r;
|
||||
struct rdt_domain *d;
|
||||
int evtid;
|
||||
enum resctrl_event_id evtid;
|
||||
bool first;
|
||||
int err;
|
||||
u64 val;
|
||||
};
|
||||
|
||||
extern unsigned int resctrl_cqm_threshold;
|
||||
extern bool rdt_alloc_capable;
|
||||
extern bool rdt_mon_capable;
|
||||
extern unsigned int rdt_mon_features;
|
||||
@ -288,35 +279,45 @@ struct rftype {
|
||||
|
||||
/**
|
||||
* struct mbm_state - status for each MBM counter in each domain
|
||||
* @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
|
||||
* @prev_msr: Value of IA32_QM_CTR for this RMID last time we read it
|
||||
* @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting
|
||||
* @prev_bw_bytes: Previous bytes value read for bandwidth calculation
|
||||
* @prev_bw: The most recent bandwidth in MBps
|
||||
* @delta_bw: Difference between the current and previous bandwidth
|
||||
* @delta_comp: Indicates whether to compute the delta_bw
|
||||
*/
|
||||
struct mbm_state {
|
||||
u64 chunks;
|
||||
u64 prev_msr;
|
||||
u64 prev_bw_msr;
|
||||
u64 prev_bw_bytes;
|
||||
u32 prev_bw;
|
||||
u32 delta_bw;
|
||||
bool delta_comp;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct arch_mbm_state - values used to compute resctrl_arch_rmid_read()s
|
||||
* return value.
|
||||
* @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
|
||||
* @prev_msr: Value of IA32_QM_CTR last time it was read for the RMID used to
|
||||
* find this struct.
|
||||
*/
|
||||
struct arch_mbm_state {
|
||||
u64 chunks;
|
||||
u64 prev_msr;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rdt_hw_domain - Arch private attributes of a set of CPUs that share
|
||||
* a resource
|
||||
* @d_resctrl: Properties exposed to the resctrl file system
|
||||
* @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
|
||||
* @mbps_val: When mba_sc is enabled, this holds the bandwidth in MBps
|
||||
* @arch_mbm_total: arch private state for MBM total bandwidth
|
||||
* @arch_mbm_local: arch private state for MBM local bandwidth
|
||||
*
|
||||
* Members of this structure are accessed via helpers that provide abstraction.
|
||||
*/
|
||||
struct rdt_hw_domain {
|
||||
struct rdt_domain d_resctrl;
|
||||
u32 *ctrl_val;
|
||||
u32 *mbps_val;
|
||||
struct arch_mbm_state *arch_mbm_total;
|
||||
struct arch_mbm_state *arch_mbm_local;
|
||||
};
|
||||
|
||||
static inline struct rdt_hw_domain *resctrl_to_arch_dom(struct rdt_domain *r)
|
||||
@ -459,14 +460,6 @@ int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
|
||||
for_each_rdt_resource(r) \
|
||||
if (r->mon_capable)
|
||||
|
||||
#define for_each_alloc_enabled_rdt_resource(r) \
|
||||
for_each_rdt_resource(r) \
|
||||
if (r->alloc_enabled)
|
||||
|
||||
#define for_each_mon_enabled_rdt_resource(r) \
|
||||
for_each_rdt_resource(r) \
|
||||
if (r->mon_enabled)
|
||||
|
||||
/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
|
||||
union cpuid_0x10_1_eax {
|
||||
struct {
|
||||
@ -530,10 +523,6 @@ void free_rmid(u32 rmid);
|
||||
int rdt_get_mon_l3_config(struct rdt_resource *r);
|
||||
void mon_event_count(void *info);
|
||||
int rdtgroup_mondata_show(struct seq_file *m, void *arg);
|
||||
void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
|
||||
unsigned int dom_id);
|
||||
void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
|
||||
struct rdt_domain *d);
|
||||
void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
|
||||
struct rdt_domain *d, struct rdtgroup *rdtgrp,
|
||||
int evtid, int first);
|
||||
@ -542,8 +531,6 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom,
|
||||
void mbm_handle_overflow(struct work_struct *work);
|
||||
void __init intel_rdt_mbm_apply_quirk(void);
|
||||
bool is_mba_sc(struct rdt_resource *r);
|
||||
void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm);
|
||||
u32 delay_bw_map(unsigned long bw, struct rdt_resource *r);
|
||||
void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms);
|
||||
void cqm_handle_limbo(struct work_struct *work);
|
||||
bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
|
||||
|
@ -16,8 +16,12 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/resctrl.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
struct rmid_entry {
|
||||
@ -37,8 +41,8 @@ static LIST_HEAD(rmid_free_lru);
|
||||
* @rmid_limbo_count count of currently unused but (potentially)
|
||||
* dirty RMIDs.
|
||||
* This counts RMIDs that no one is currently using but that
|
||||
* may have a occupancy value > intel_cqm_threshold. User can change
|
||||
* the threshold occupancy value.
|
||||
* may have a occupancy value > resctrl_rmid_realloc_threshold. User can
|
||||
* change the threshold occupancy value.
|
||||
*/
|
||||
static unsigned int rmid_limbo_count;
|
||||
|
||||
@ -59,10 +63,15 @@ bool rdt_mon_capable;
|
||||
unsigned int rdt_mon_features;
|
||||
|
||||
/*
|
||||
* This is the threshold cache occupancy at which we will consider an
|
||||
* This is the threshold cache occupancy in bytes at which we will consider an
|
||||
* RMID available for re-allocation.
|
||||
*/
|
||||
unsigned int resctrl_cqm_threshold;
|
||||
unsigned int resctrl_rmid_realloc_threshold;
|
||||
|
||||
/*
|
||||
* This is the maximum value for the reallocation threshold, in bytes.
|
||||
*/
|
||||
unsigned int resctrl_rmid_realloc_limit;
|
||||
|
||||
#define CF(cf) ((unsigned long)(1048576 * (cf) + 0.5))
|
||||
|
||||
@ -137,9 +146,54 @@ static inline struct rmid_entry *__rmid_entry(u32 rmid)
|
||||
return entry;
|
||||
}
|
||||
|
||||
static u64 __rmid_read(u32 rmid, u32 eventid)
|
||||
static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
|
||||
u32 rmid,
|
||||
enum resctrl_event_id eventid)
|
||||
{
|
||||
u64 val;
|
||||
switch (eventid) {
|
||||
case QOS_L3_OCCUP_EVENT_ID:
|
||||
return NULL;
|
||||
case QOS_L3_MBM_TOTAL_EVENT_ID:
|
||||
return &hw_dom->arch_mbm_total[rmid];
|
||||
case QOS_L3_MBM_LOCAL_EVENT_ID:
|
||||
return &hw_dom->arch_mbm_local[rmid];
|
||||
}
|
||||
|
||||
/* Never expect to get here */
|
||||
WARN_ON_ONCE(1);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
|
||||
u32 rmid, enum resctrl_event_id eventid)
|
||||
{
|
||||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||
struct arch_mbm_state *am;
|
||||
|
||||
am = get_arch_mbm_state(hw_dom, rmid, eventid);
|
||||
if (am)
|
||||
memset(am, 0, sizeof(*am));
|
||||
}
|
||||
|
||||
static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
|
||||
{
|
||||
u64 shift = 64 - width, chunks;
|
||||
|
||||
chunks = (cur_msr << shift) - (prev_msr << shift);
|
||||
return chunks >> shift;
|
||||
}
|
||||
|
||||
int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
|
||||
u32 rmid, enum resctrl_event_id eventid, u64 *val)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||
struct arch_mbm_state *am;
|
||||
u64 msr_val, chunks;
|
||||
|
||||
if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
|
||||
@ -150,16 +204,26 @@ static u64 __rmid_read(u32 rmid, u32 eventid)
|
||||
* are error bits.
|
||||
*/
|
||||
wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
|
||||
rdmsrl(MSR_IA32_QM_CTR, val);
|
||||
rdmsrl(MSR_IA32_QM_CTR, msr_val);
|
||||
|
||||
return val;
|
||||
}
|
||||
if (msr_val & RMID_VAL_ERROR)
|
||||
return -EIO;
|
||||
if (msr_val & RMID_VAL_UNAVAIL)
|
||||
return -EINVAL;
|
||||
|
||||
static bool rmid_dirty(struct rmid_entry *entry)
|
||||
{
|
||||
u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
|
||||
am = get_arch_mbm_state(hw_dom, rmid, eventid);
|
||||
if (am) {
|
||||
am->chunks += mbm_overflow_count(am->prev_msr, msr_val,
|
||||
hw_res->mbm_width);
|
||||
chunks = get_corrected_mbm_count(rmid, am->chunks);
|
||||
am->prev_msr = msr_val;
|
||||
} else {
|
||||
chunks = msr_val;
|
||||
}
|
||||
|
||||
return val >= resctrl_cqm_threshold;
|
||||
*val = chunks * hw_res->mon_scale;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -170,11 +234,11 @@ static bool rmid_dirty(struct rmid_entry *entry)
|
||||
*/
|
||||
void __check_limbo(struct rdt_domain *d, bool force_free)
|
||||
{
|
||||
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
|
||||
struct rmid_entry *entry;
|
||||
struct rdt_resource *r;
|
||||
u32 crmid = 1, nrmid;
|
||||
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
|
||||
bool rmid_dirty;
|
||||
u64 val = 0;
|
||||
|
||||
/*
|
||||
* Skip RMID 0 and start from RMID 1 and check all the RMIDs that
|
||||
@ -188,7 +252,15 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
|
||||
break;
|
||||
|
||||
entry = __rmid_entry(nrmid);
|
||||
if (force_free || !rmid_dirty(entry)) {
|
||||
|
||||
if (resctrl_arch_rmid_read(r, d, entry->rmid,
|
||||
QOS_L3_OCCUP_EVENT_ID, &val)) {
|
||||
rmid_dirty = true;
|
||||
} else {
|
||||
rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
|
||||
}
|
||||
|
||||
if (force_free || !rmid_dirty) {
|
||||
clear_bit(entry->rmid, d->rmid_busy_llc);
|
||||
if (!--entry->busy) {
|
||||
rmid_limbo_count--;
|
||||
@ -227,19 +299,19 @@ int alloc_rmid(void)
|
||||
|
||||
static void add_rmid_to_limbo(struct rmid_entry *entry)
|
||||
{
|
||||
struct rdt_resource *r;
|
||||
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
|
||||
struct rdt_domain *d;
|
||||
int cpu;
|
||||
u64 val;
|
||||
|
||||
r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
|
||||
int cpu, err;
|
||||
u64 val = 0;
|
||||
|
||||
entry->busy = 0;
|
||||
cpu = get_cpu();
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
|
||||
val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
|
||||
if (val <= resctrl_cqm_threshold)
|
||||
err = resctrl_arch_rmid_read(r, d, entry->rmid,
|
||||
QOS_L3_OCCUP_EVENT_ID,
|
||||
&val);
|
||||
if (err || val <= resctrl_rmid_realloc_threshold)
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -277,24 +349,18 @@ void free_rmid(u32 rmid)
|
||||
list_add_tail(&entry->list, &rmid_free_lru);
|
||||
}
|
||||
|
||||
static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
|
||||
static int __mon_event_count(u32 rmid, struct rmid_read *rr)
|
||||
{
|
||||
u64 shift = 64 - width, chunks;
|
||||
|
||||
chunks = (cur_msr << shift) - (prev_msr << shift);
|
||||
return chunks >> shift;
|
||||
}
|
||||
|
||||
static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(rr->r);
|
||||
struct mbm_state *m;
|
||||
u64 chunks, tval;
|
||||
u64 tval = 0;
|
||||
|
||||
if (rr->first)
|
||||
resctrl_arch_reset_rmid(rr->r, rr->d, rmid, rr->evtid);
|
||||
|
||||
rr->err = resctrl_arch_rmid_read(rr->r, rr->d, rmid, rr->evtid, &tval);
|
||||
if (rr->err)
|
||||
return rr->err;
|
||||
|
||||
tval = __rmid_read(rmid, rr->evtid);
|
||||
if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
|
||||
return tval;
|
||||
}
|
||||
switch (rr->evtid) {
|
||||
case QOS_L3_OCCUP_EVENT_ID:
|
||||
rr->val += tval;
|
||||
@ -308,48 +374,47 @@ static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
|
||||
default:
|
||||
/*
|
||||
* Code would never reach here because an invalid
|
||||
* event id would fail the __rmid_read.
|
||||
* event id would fail in resctrl_arch_rmid_read().
|
||||
*/
|
||||
return RMID_VAL_ERROR;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rr->first) {
|
||||
memset(m, 0, sizeof(struct mbm_state));
|
||||
m->prev_bw_msr = m->prev_msr = tval;
|
||||
return 0;
|
||||
}
|
||||
|
||||
chunks = mbm_overflow_count(m->prev_msr, tval, hw_res->mbm_width);
|
||||
m->chunks += chunks;
|
||||
m->prev_msr = tval;
|
||||
|
||||
rr->val += get_corrected_mbm_count(rmid, m->chunks);
|
||||
rr->val += tval;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* mbm_bw_count() - Update bw count from values previously read by
|
||||
* __mon_event_count().
|
||||
* @rmid: The rmid used to identify the cached mbm_state.
|
||||
* @rr: The struct rmid_read populated by __mon_event_count().
|
||||
*
|
||||
* Supporting function to calculate the memory bandwidth
|
||||
* and delta bandwidth in MBps.
|
||||
* and delta bandwidth in MBps. The chunks value previously read by
|
||||
* __mon_event_count() is compared with the chunks value from the previous
|
||||
* invocation. This must be called once per second to maintain values in MBps.
|
||||
*/
|
||||
static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(rr->r);
|
||||
struct mbm_state *m = &rr->d->mbm_local[rmid];
|
||||
u64 tval, cur_bw, chunks;
|
||||
u64 cur_bw, bytes, cur_bytes;
|
||||
|
||||
tval = __rmid_read(rmid, rr->evtid);
|
||||
if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
|
||||
return;
|
||||
cur_bytes = rr->val;
|
||||
bytes = cur_bytes - m->prev_bw_bytes;
|
||||
m->prev_bw_bytes = cur_bytes;
|
||||
|
||||
chunks = mbm_overflow_count(m->prev_bw_msr, tval, hw_res->mbm_width);
|
||||
cur_bw = (get_corrected_mbm_count(rmid, chunks) * hw_res->mon_scale) >> 20;
|
||||
cur_bw = bytes / SZ_1M;
|
||||
|
||||
if (m->delta_comp)
|
||||
m->delta_bw = abs(cur_bw - m->prev_bw);
|
||||
m->delta_comp = false;
|
||||
m->prev_bw = cur_bw;
|
||||
m->prev_bw_msr = tval;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -361,11 +426,11 @@ void mon_event_count(void *info)
|
||||
struct rdtgroup *rdtgrp, *entry;
|
||||
struct rmid_read *rr = info;
|
||||
struct list_head *head;
|
||||
u64 ret_val;
|
||||
int ret;
|
||||
|
||||
rdtgrp = rr->rgrp;
|
||||
|
||||
ret_val = __mon_event_count(rdtgrp->mon.rmid, rr);
|
||||
ret = __mon_event_count(rdtgrp->mon.rmid, rr);
|
||||
|
||||
/*
|
||||
* For Ctrl groups read data from child monitor groups and
|
||||
@ -377,13 +442,17 @@ void mon_event_count(void *info)
|
||||
if (rdtgrp->type == RDTCTRL_GROUP) {
|
||||
list_for_each_entry(entry, head, mon.crdtgrp_list) {
|
||||
if (__mon_event_count(entry->mon.rmid, rr) == 0)
|
||||
ret_val = 0;
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Report error if none of rmid_reads are successful */
|
||||
if (ret_val)
|
||||
rr->val = ret_val;
|
||||
/*
|
||||
* __mon_event_count() calls for newly created monitor groups may
|
||||
* report -EINVAL/Unavailable if the monitor hasn't seen any traffic.
|
||||
* Discard error if any of the monitor event reads succeeded.
|
||||
*/
|
||||
if (ret == 0)
|
||||
rr->err = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -420,10 +489,8 @@ void mon_event_count(void *info)
|
||||
*/
|
||||
static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
||||
{
|
||||
u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val;
|
||||
u32 closid, rmid, cur_msr_val, new_msr_val;
|
||||
struct mbm_state *pmbm_data, *cmbm_data;
|
||||
struct rdt_hw_resource *hw_r_mba;
|
||||
struct rdt_hw_domain *hw_dom_mba;
|
||||
u32 cur_bw, delta_bw, user_bw;
|
||||
struct rdt_resource *r_mba;
|
||||
struct rdt_domain *dom_mba;
|
||||
@ -433,8 +500,8 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
||||
if (!is_mbm_local_enabled())
|
||||
return;
|
||||
|
||||
hw_r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
|
||||
r_mba = &hw_r_mba->r_resctrl;
|
||||
r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
|
||||
|
||||
closid = rgrp->closid;
|
||||
rmid = rgrp->mon.rmid;
|
||||
pmbm_data = &dom_mbm->mbm_local[rmid];
|
||||
@ -444,16 +511,13 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
||||
pr_warn_once("Failure to get domain for MBA update\n");
|
||||
return;
|
||||
}
|
||||
hw_dom_mba = resctrl_to_arch_dom(dom_mba);
|
||||
|
||||
cur_bw = pmbm_data->prev_bw;
|
||||
user_bw = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
|
||||
user_bw = dom_mba->mbps_val[closid];
|
||||
delta_bw = pmbm_data->delta_bw;
|
||||
/*
|
||||
* resctrl_arch_get_config() chooses the mbps/ctrl value to return
|
||||
* based on is_mba_sc(). For now, reach into the hw_dom.
|
||||
*/
|
||||
cur_msr_val = hw_dom_mba->ctrl_val[closid];
|
||||
|
||||
/* MBA resource doesn't support CDP */
|
||||
cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
|
||||
|
||||
/*
|
||||
* For Ctrl groups read data from child monitor groups.
|
||||
@ -488,9 +552,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
||||
return;
|
||||
}
|
||||
|
||||
cur_msr = hw_r_mba->msr_base + closid;
|
||||
wrmsrl(cur_msr, delay_bw_map(new_msr_val, r_mba));
|
||||
hw_dom_mba->ctrl_val[closid] = new_msr_val;
|
||||
resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
|
||||
|
||||
/*
|
||||
* Delta values are updated dynamically package wise for each
|
||||
@ -523,10 +585,12 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
|
||||
*/
|
||||
if (is_mbm_total_enabled()) {
|
||||
rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
|
||||
rr.val = 0;
|
||||
__mon_event_count(rmid, &rr);
|
||||
}
|
||||
if (is_mbm_local_enabled()) {
|
||||
rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
|
||||
rr.val = 0;
|
||||
__mon_event_count(rmid, &rr);
|
||||
|
||||
/*
|
||||
@ -686,9 +750,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
|
||||
{
|
||||
unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
unsigned int cl_size = boot_cpu_data.x86_cache_size;
|
||||
unsigned int threshold;
|
||||
int ret;
|
||||
|
||||
resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024;
|
||||
hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale;
|
||||
r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
|
||||
hw_res->mbm_width = MBM_CNTR_WIDTH_BASE;
|
||||
@ -705,10 +770,14 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
|
||||
*
|
||||
* For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
|
||||
*/
|
||||
resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
|
||||
threshold = resctrl_rmid_realloc_limit / r->num_rmid;
|
||||
|
||||
/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
|
||||
resctrl_cqm_threshold /= hw_res->mon_scale;
|
||||
/*
|
||||
* Because num_rmid may not be a power of two, round the value
|
||||
* to the nearest multiple of hw_res->mon_scale so it matches a
|
||||
* value the hardware will measure. mon_scale may not be a power of 2.
|
||||
*/
|
||||
resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(threshold);
|
||||
|
||||
ret = dom_data_init(r);
|
||||
if (ret)
|
||||
@ -717,7 +786,6 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
|
||||
l3_mon_evt_init(r);
|
||||
|
||||
r->mon_capable = true;
|
||||
r->mon_enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -420,6 +420,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
|
||||
struct pseudo_lock_region *plr = rdtgrp->plr;
|
||||
u32 rmid_p, closid_p;
|
||||
unsigned long i;
|
||||
u64 saved_msr;
|
||||
#ifdef CONFIG_KASAN
|
||||
/*
|
||||
* The registers used for local register variables are also used
|
||||
@ -463,6 +464,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
|
||||
* the buffer and evict pseudo-locked memory read earlier from the
|
||||
* cache.
|
||||
*/
|
||||
saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL);
|
||||
__wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
|
||||
closid_p = this_cpu_read(pqr_state.cur_closid);
|
||||
rmid_p = this_cpu_read(pqr_state.cur_rmid);
|
||||
@ -514,7 +516,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
|
||||
__wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p);
|
||||
|
||||
/* Re-enable the hardware prefetcher(s) */
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
|
||||
wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr);
|
||||
local_irq_enable();
|
||||
|
||||
plr->thread_done = 1;
|
||||
@ -835,7 +837,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
|
||||
* First determine which cpus have pseudo-locked regions
|
||||
* associated with them.
|
||||
*/
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
for_each_alloc_capable_rdt_resource(r) {
|
||||
list_for_each_entry(d_i, &r->domains, list) {
|
||||
if (d_i->plr)
|
||||
cpumask_or(cpu_with_psl, cpu_with_psl,
|
||||
@ -871,6 +873,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
|
||||
static int measure_cycles_lat_fn(void *_plr)
|
||||
{
|
||||
struct pseudo_lock_region *plr = _plr;
|
||||
u32 saved_low, saved_high;
|
||||
unsigned long i;
|
||||
u64 start, end;
|
||||
void *mem_r;
|
||||
@ -879,6 +882,7 @@ static int measure_cycles_lat_fn(void *_plr)
|
||||
/*
|
||||
* Disable hardware prefetchers.
|
||||
*/
|
||||
rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
|
||||
mem_r = READ_ONCE(plr->kmem);
|
||||
/*
|
||||
@ -895,7 +899,7 @@ static int measure_cycles_lat_fn(void *_plr)
|
||||
end = rdtsc_ordered();
|
||||
trace_pseudo_lock_mem_latency((u32)(end - start));
|
||||
}
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
|
||||
local_irq_enable();
|
||||
plr->thread_done = 1;
|
||||
wake_up_interruptible(&plr->lock_thread_wq);
|
||||
@ -940,6 +944,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
|
||||
u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0;
|
||||
struct perf_event *miss_event, *hit_event;
|
||||
int hit_pmcnum, miss_pmcnum;
|
||||
u32 saved_low, saved_high;
|
||||
unsigned int line_size;
|
||||
unsigned int size;
|
||||
unsigned long i;
|
||||
@ -973,6 +978,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
|
||||
/*
|
||||
* Disable hardware prefetchers.
|
||||
*/
|
||||
rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
|
||||
|
||||
/* Initialize rest of local variables */
|
||||
@ -1031,7 +1037,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
|
||||
*/
|
||||
rmb();
|
||||
/* Re-enable hardware prefetchers */
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
|
||||
local_irq_enable();
|
||||
out_hit:
|
||||
perf_event_release_kernel(hit_event);
|
||||
|
@ -1030,10 +1030,7 @@ static int rdt_delay_linear_show(struct kernfs_open_file *of,
|
||||
static int max_threshold_occ_show(struct kernfs_open_file *of,
|
||||
struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||
|
||||
seq_printf(seq, "%u\n", resctrl_cqm_threshold * hw_res->mon_scale);
|
||||
seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1055,7 +1052,6 @@ static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
|
||||
static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
|
||||
char *buf, size_t nbytes, loff_t off)
|
||||
{
|
||||
struct rdt_hw_resource *hw_res;
|
||||
unsigned int bytes;
|
||||
int ret;
|
||||
|
||||
@ -1063,11 +1059,10 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (bytes > (boot_cpu_data.x86_cache_size * 1024))
|
||||
if (bytes > resctrl_rmid_realloc_limit)
|
||||
return -EINVAL;
|
||||
|
||||
hw_res = resctrl_to_arch_res(of->kn->parent->priv);
|
||||
resctrl_cqm_threshold = bytes / hw_res->mon_scale;
|
||||
resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
@ -1356,11 +1351,13 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
|
||||
struct seq_file *s, void *v)
|
||||
{
|
||||
struct resctrl_schema *schema;
|
||||
enum resctrl_conf_type type;
|
||||
struct rdtgroup *rdtgrp;
|
||||
struct rdt_resource *r;
|
||||
struct rdt_domain *d;
|
||||
unsigned int size;
|
||||
int ret = 0;
|
||||
u32 closid;
|
||||
bool sep;
|
||||
u32 ctrl;
|
||||
|
||||
@ -1386,8 +1383,11 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
|
||||
goto out;
|
||||
}
|
||||
|
||||
closid = rdtgrp->closid;
|
||||
|
||||
list_for_each_entry(schema, &resctrl_schema_all, list) {
|
||||
r = schema->res;
|
||||
type = schema->conf_type;
|
||||
sep = false;
|
||||
seq_printf(s, "%*s:", max_name_width, schema->name);
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
@ -1396,9 +1396,12 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
|
||||
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
|
||||
size = 0;
|
||||
} else {
|
||||
ctrl = resctrl_arch_get_config(r, d,
|
||||
rdtgrp->closid,
|
||||
schema->conf_type);
|
||||
if (is_mba_sc(r))
|
||||
ctrl = d->mbps_val[closid];
|
||||
else
|
||||
ctrl = resctrl_arch_get_config(r, d,
|
||||
closid,
|
||||
type);
|
||||
if (r->rid == RDT_RESOURCE_MBA)
|
||||
size = ctrl;
|
||||
else
|
||||
@ -1756,7 +1759,7 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
|
||||
if (ret)
|
||||
goto out_destroy;
|
||||
|
||||
/* loop over enabled controls, these are all alloc_enabled */
|
||||
/* loop over enabled controls, these are all alloc_capable */
|
||||
list_for_each_entry(s, &resctrl_schema_all, list) {
|
||||
r = s->res;
|
||||
fflags = r->fflags | RF_CTRL_INFO;
|
||||
@ -1765,7 +1768,7 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
|
||||
goto out_destroy;
|
||||
}
|
||||
|
||||
for_each_mon_enabled_rdt_resource(r) {
|
||||
for_each_mon_capable_rdt_resource(r) {
|
||||
fflags = r->fflags | RF_MON_INFO;
|
||||
sprintf(name, "%s_MON", r->name);
|
||||
ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
|
||||
@ -1889,26 +1892,61 @@ void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
|
||||
l3_qos_cfg_update(&hw_res->cdp_enabled);
|
||||
}
|
||||
|
||||
static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d)
|
||||
{
|
||||
u32 num_closid = resctrl_arch_get_num_closid(r);
|
||||
int cpu = cpumask_any(&d->cpu_mask);
|
||||
int i;
|
||||
|
||||
d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!d->mbps_val)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < num_closid; i++)
|
||||
d->mbps_val[i] = MBA_MAX_MBPS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mba_sc_domain_destroy(struct rdt_resource *r,
|
||||
struct rdt_domain *d)
|
||||
{
|
||||
kfree(d->mbps_val);
|
||||
d->mbps_val = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* MBA software controller is supported only if
|
||||
* MBM is supported and MBA is in linear scale.
|
||||
*/
|
||||
static bool supports_mba_mbps(void)
|
||||
{
|
||||
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
|
||||
|
||||
return (is_mbm_local_enabled() &&
|
||||
r->alloc_capable && is_mba_linear());
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable or disable the MBA software controller
|
||||
* which helps user specify bandwidth in MBps.
|
||||
* MBA software controller is supported only if
|
||||
* MBM is supported and MBA is in linear scale.
|
||||
*/
|
||||
static int set_mba_sc(bool mba_sc)
|
||||
{
|
||||
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
|
||||
struct rdt_hw_domain *hw_dom;
|
||||
u32 num_closid = resctrl_arch_get_num_closid(r);
|
||||
struct rdt_domain *d;
|
||||
int i;
|
||||
|
||||
if (!is_mbm_enabled() || !is_mba_linear() ||
|
||||
mba_sc == is_mba_sc(r))
|
||||
if (!supports_mba_mbps() || mba_sc == is_mba_sc(r))
|
||||
return -EINVAL;
|
||||
|
||||
r->membw.mba_sc = mba_sc;
|
||||
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
hw_dom = resctrl_to_arch_dom(d);
|
||||
setup_default_ctrlval(r, hw_dom->ctrl_val, hw_dom->mbps_val);
|
||||
for (i = 0; i < num_closid; i++)
|
||||
d->mbps_val[i] = MBA_MAX_MBPS;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2106,7 +2144,7 @@ static int schemata_list_create(void)
|
||||
struct rdt_resource *r;
|
||||
int ret = 0;
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
for_each_alloc_capable_rdt_resource(r) {
|
||||
if (resctrl_arch_get_cdp_enabled(r->rid)) {
|
||||
ret = schemata_list_add(r, CDP_CODE);
|
||||
if (ret)
|
||||
@ -2261,7 +2299,7 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
||||
ctx->enable_cdpl2 = true;
|
||||
return 0;
|
||||
case Opt_mba_mbps:
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
if (!supports_mba_mbps())
|
||||
return -EINVAL;
|
||||
ctx->enable_mba_mbps = true;
|
||||
return 0;
|
||||
@ -2452,7 +2490,7 @@ static void rdt_kill_sb(struct super_block *sb)
|
||||
set_mba_sc(false);
|
||||
|
||||
/*Put everything back to default values. */
|
||||
for_each_alloc_enabled_rdt_resource(r)
|
||||
for_each_alloc_capable_rdt_resource(r)
|
||||
reset_all_ctrls(r);
|
||||
cdp_disable_all();
|
||||
rmdir_all_sub();
|
||||
@ -2499,14 +2537,12 @@ static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
|
||||
* Remove all subdirectories of mon_data of ctrl_mon groups
|
||||
* and monitor groups with given domain id.
|
||||
*/
|
||||
void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id)
|
||||
static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
|
||||
unsigned int dom_id)
|
||||
{
|
||||
struct rdtgroup *prgrp, *crgrp;
|
||||
char name[32];
|
||||
|
||||
if (!r->mon_enabled)
|
||||
return;
|
||||
|
||||
list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
|
||||
sprintf(name, "mon_%s_%02d", r->name, dom_id);
|
||||
kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
|
||||
@ -2565,16 +2601,13 @@ out_destroy:
|
||||
* Add all subdirectories of mon_data for "ctrl_mon" groups
|
||||
* and "monitor" groups with given domain id.
|
||||
*/
|
||||
void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
|
||||
struct rdt_domain *d)
|
||||
static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
|
||||
struct rdt_domain *d)
|
||||
{
|
||||
struct kernfs_node *parent_kn;
|
||||
struct rdtgroup *prgrp, *crgrp;
|
||||
struct list_head *head;
|
||||
|
||||
if (!r->mon_enabled)
|
||||
return;
|
||||
|
||||
list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
|
||||
parent_kn = prgrp->mon.mon_data_kn;
|
||||
mkdir_mondata_subdir(parent_kn, d, r, prgrp);
|
||||
@ -2642,7 +2675,7 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn,
|
||||
* Create the subdirectories for each domain. Note that all events
|
||||
* in a domain like L3 are grouped into a resource whose domain is L3
|
||||
*/
|
||||
for_each_mon_enabled_rdt_resource(r) {
|
||||
for_each_mon_capable_rdt_resource(r) {
|
||||
ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
|
||||
if (ret)
|
||||
goto out_destroy;
|
||||
@ -2786,14 +2819,19 @@ static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
|
||||
}
|
||||
|
||||
/* Initialize MBA resource with default values. */
|
||||
static void rdtgroup_init_mba(struct rdt_resource *r)
|
||||
static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid)
|
||||
{
|
||||
struct resctrl_staged_config *cfg;
|
||||
struct rdt_domain *d;
|
||||
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
if (is_mba_sc(r)) {
|
||||
d->mbps_val[closid] = MBA_MAX_MBPS;
|
||||
continue;
|
||||
}
|
||||
|
||||
cfg = &d->staged_config[CDP_NONE];
|
||||
cfg->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
|
||||
cfg->new_ctrl = r->default_ctrl;
|
||||
cfg->have_new_ctrl = true;
|
||||
}
|
||||
}
|
||||
@ -2808,7 +2846,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||
list_for_each_entry(s, &resctrl_schema_all, list) {
|
||||
r = s->res;
|
||||
if (r->rid == RDT_RESOURCE_MBA) {
|
||||
rdtgroup_init_mba(r);
|
||||
rdtgroup_init_mba(r, rdtgrp->closid);
|
||||
if (is_mba_sc(r))
|
||||
continue;
|
||||
} else {
|
||||
ret = rdtgroup_init_cat(s, rdtgrp->closid);
|
||||
if (ret < 0)
|
||||
@ -3236,6 +3276,110 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void domain_destroy_mon_state(struct rdt_domain *d)
|
||||
{
|
||||
bitmap_free(d->rmid_busy_llc);
|
||||
kfree(d->mbm_total);
|
||||
kfree(d->mbm_local);
|
||||
}
|
||||
|
||||
void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d)
|
||||
{
|
||||
lockdep_assert_held(&rdtgroup_mutex);
|
||||
|
||||
if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA)
|
||||
mba_sc_domain_destroy(r, d);
|
||||
|
||||
if (!r->mon_capable)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If resctrl is mounted, remove all the
|
||||
* per domain monitor data directories.
|
||||
*/
|
||||
if (static_branch_unlikely(&rdt_mon_enable_key))
|
||||
rmdir_mondata_subdir_allrdtgrp(r, d->id);
|
||||
|
||||
if (is_mbm_enabled())
|
||||
cancel_delayed_work(&d->mbm_over);
|
||||
if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
|
||||
/*
|
||||
* When a package is going down, forcefully
|
||||
* decrement rmid->ebusy. There is no way to know
|
||||
* that the L3 was flushed and hence may lead to
|
||||
* incorrect counts in rare scenarios, but leaving
|
||||
* the RMID as busy creates RMID leaks if the
|
||||
* package never comes back.
|
||||
*/
|
||||
__check_limbo(d, true);
|
||||
cancel_delayed_work(&d->cqm_limbo);
|
||||
}
|
||||
|
||||
domain_destroy_mon_state(d);
|
||||
}
|
||||
|
||||
static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
|
||||
{
|
||||
size_t tsize;
|
||||
|
||||
if (is_llc_occupancy_enabled()) {
|
||||
d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL);
|
||||
if (!d->rmid_busy_llc)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (is_mbm_total_enabled()) {
|
||||
tsize = sizeof(*d->mbm_total);
|
||||
d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
|
||||
if (!d->mbm_total) {
|
||||
bitmap_free(d->rmid_busy_llc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
if (is_mbm_local_enabled()) {
|
||||
tsize = sizeof(*d->mbm_local);
|
||||
d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
|
||||
if (!d->mbm_local) {
|
||||
bitmap_free(d->rmid_busy_llc);
|
||||
kfree(d->mbm_total);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d)
|
||||
{
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&rdtgroup_mutex);
|
||||
|
||||
if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA)
|
||||
/* RDT_RESOURCE_MBA is never mon_capable */
|
||||
return mba_sc_domain_allocate(r, d);
|
||||
|
||||
if (!r->mon_capable)
|
||||
return 0;
|
||||
|
||||
err = domain_setup_mon_state(r, d);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (is_mbm_enabled()) {
|
||||
INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
|
||||
mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
|
||||
}
|
||||
|
||||
if (is_llc_occupancy_enabled())
|
||||
INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
|
||||
|
||||
/* If resctrl is mounted, add per domain monitor data directories. */
|
||||
if (static_branch_unlikely(&rdt_mon_enable_key))
|
||||
mkdir_mondata_subdir_allrdtgrp(r, d);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* rdtgroup_init - rdtgroup initialization
|
||||
*
|
||||
|
@ -15,6 +15,9 @@ int proc_resctrl_show(struct seq_file *m,
|
||||
|
||||
#endif
|
||||
|
||||
/* max value for struct rdt_domain's mbps_val */
|
||||
#define MBA_MAX_MBPS U32_MAX
|
||||
|
||||
/**
|
||||
* enum resctrl_conf_type - The type of configuration.
|
||||
* @CDP_NONE: No prioritisation, both code and data are controlled or monitored.
|
||||
@ -29,6 +32,16 @@ enum resctrl_conf_type {
|
||||
|
||||
#define CDP_NUM_TYPES (CDP_DATA + 1)
|
||||
|
||||
/*
|
||||
* Event IDs, the values match those used to program IA32_QM_EVTSEL before
|
||||
* reading IA32_QM_CTR on RDT systems.
|
||||
*/
|
||||
enum resctrl_event_id {
|
||||
QOS_L3_OCCUP_EVENT_ID = 0x01,
|
||||
QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
|
||||
QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct resctrl_staged_config - parsed configuration to be applied
|
||||
* @new_ctrl: new ctrl value to be loaded
|
||||
@ -53,6 +66,9 @@ struct resctrl_staged_config {
|
||||
* @cqm_work_cpu: worker CPU for CQM h/w counters
|
||||
* @plr: pseudo-locked region (if any) associated with domain
|
||||
* @staged_config: parsed configuration to be applied
|
||||
* @mbps_val: When mba_sc is enabled, this holds the array of user
|
||||
* specified control values for mba_sc in MBps, indexed
|
||||
* by closid
|
||||
*/
|
||||
struct rdt_domain {
|
||||
struct list_head list;
|
||||
@ -67,6 +83,7 @@ struct rdt_domain {
|
||||
int cqm_work_cpu;
|
||||
struct pseudo_lock_region *plr;
|
||||
struct resctrl_staged_config staged_config[CDP_NUM_TYPES];
|
||||
u32 *mbps_val;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -130,8 +147,6 @@ struct resctrl_schema;
|
||||
/**
|
||||
* struct rdt_resource - attributes of a resctrl resource
|
||||
* @rid: The index of the resource
|
||||
* @alloc_enabled: Is allocation enabled on this machine
|
||||
* @mon_enabled: Is monitoring enabled for this feature
|
||||
* @alloc_capable: Is allocation available on this machine
|
||||
* @mon_capable: Is monitor feature available on this machine
|
||||
* @num_rmid: Number of RMIDs available
|
||||
@ -150,8 +165,6 @@ struct resctrl_schema;
|
||||
*/
|
||||
struct rdt_resource {
|
||||
int rid;
|
||||
bool alloc_enabled;
|
||||
bool mon_enabled;
|
||||
bool alloc_capable;
|
||||
bool mon_capable;
|
||||
int num_rmid;
|
||||
@ -194,7 +207,50 @@ struct resctrl_schema {
|
||||
/* The number of closid supported by this resource regardless of CDP */
|
||||
u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
|
||||
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
|
||||
|
||||
/*
|
||||
* Update the ctrl_val and apply this config right now.
|
||||
* Must be called on one of the domain's CPUs.
|
||||
*/
|
||||
int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
|
||||
u32 closid, enum resctrl_conf_type t, u32 cfg_val);
|
||||
|
||||
u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
|
||||
u32 closid, enum resctrl_conf_type type);
|
||||
int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d);
|
||||
void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
|
||||
|
||||
/**
|
||||
* resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
|
||||
* for this resource and domain.
|
||||
* @r: resource that the counter should be read from.
|
||||
* @d: domain that the counter should be read from.
|
||||
* @rmid: rmid of the counter to read.
|
||||
* @eventid: eventid to read, e.g. L3 occupancy.
|
||||
* @val: result of the counter read in bytes.
|
||||
*
|
||||
* Call from process context on a CPU that belongs to domain @d.
|
||||
*
|
||||
* Return:
|
||||
* 0 on success, or -EIO, -EINVAL etc on error.
|
||||
*/
|
||||
int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
|
||||
u32 rmid, enum resctrl_event_id eventid, u64 *val);
|
||||
|
||||
/**
|
||||
* resctrl_arch_reset_rmid() - Reset any private state associated with rmid
|
||||
* and eventid.
|
||||
* @r: The domain's resource.
|
||||
* @d: The rmid's domain.
|
||||
* @rmid: The rmid whose counter values should be reset.
|
||||
* @eventid: The eventid whose counter values should be reset.
|
||||
*
|
||||
* This can be called from any CPU.
|
||||
*/
|
||||
void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
|
||||
u32 rmid, enum resctrl_event_id eventid);
|
||||
|
||||
extern unsigned int resctrl_rmid_realloc_threshold;
|
||||
extern unsigned int resctrl_rmid_realloc_limit;
|
||||
|
||||
#endif /* _RESCTRL_H */
|
||||
|
Loading…
Reference in New Issue
Block a user