mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
x86 / msr: add 64bit _on_cpu access functions
Having 64-bit MSR access methods on given CPU can avoid shifting and simplify MSR content manipulation. We already have other combinations of rdmsrl_xxx and wrmsrl_xxx but missing the _on_cpu version. Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Reviewed-by: H. Peter Anvin <hpa@linux.intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
parent
12cc4b3827
commit
1a6b991a98
@ -218,10 +218,14 @@ void msrs_free(struct msr *msrs);
|
||||
#ifdef CONFIG_SMP
|
||||
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
|
||||
int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
|
||||
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
|
||||
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
|
||||
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
|
||||
int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
|
||||
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
|
||||
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
|
||||
#else /* CONFIG_SMP */
|
||||
@ -235,6 +239,16 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
wrmsr(msr_no, l, h);
|
||||
return 0;
|
||||
}
|
||||
static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
|
||||
{
|
||||
rdmsrl(msr_no, *q);
|
||||
return 0;
|
||||
}
|
||||
static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
|
||||
{
|
||||
wrmsrl(msr_no, q);
|
||||
return 0;
|
||||
}
|
||||
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
|
||||
struct msr *msrs)
|
||||
{
|
||||
@ -254,6 +268,14 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
return wrmsr_safe(msr_no, l, h);
|
||||
}
|
||||
static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
|
||||
{
|
||||
return rdmsrl_safe(msr_no, q);
|
||||
}
|
||||
static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
|
||||
{
|
||||
return wrmsrl_safe(msr_no, q);
|
||||
}
|
||||
static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
|
||||
{
|
||||
return rdmsr_safe_regs(regs);
|
||||
|
@ -47,6 +47,21 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
}
|
||||
EXPORT_SYMBOL(rdmsr_on_cpu);
|
||||
|
||||
int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
|
||||
{
|
||||
int err;
|
||||
struct msr_info rv;
|
||||
|
||||
memset(&rv, 0, sizeof(rv));
|
||||
|
||||
rv.msr_no = msr_no;
|
||||
err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
|
||||
*q = rv.reg.q;
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(rdmsrl_on_cpu);
|
||||
|
||||
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
int err;
|
||||
@ -63,6 +78,22 @@ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
}
|
||||
EXPORT_SYMBOL(wrmsr_on_cpu);
|
||||
|
||||
int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
|
||||
{
|
||||
int err;
|
||||
struct msr_info rv;
|
||||
|
||||
memset(&rv, 0, sizeof(rv));
|
||||
|
||||
rv.msr_no = msr_no;
|
||||
rv.reg.q = q;
|
||||
|
||||
err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(wrmsrl_on_cpu);
|
||||
|
||||
static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
|
||||
struct msr *msrs,
|
||||
void (*msr_func) (void *info))
|
||||
@ -159,6 +190,37 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
}
|
||||
EXPORT_SYMBOL(wrmsr_safe_on_cpu);
|
||||
|
||||
int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
|
||||
{
|
||||
int err;
|
||||
struct msr_info rv;
|
||||
|
||||
memset(&rv, 0, sizeof(rv));
|
||||
|
||||
rv.msr_no = msr_no;
|
||||
rv.reg.q = q;
|
||||
|
||||
err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
|
||||
|
||||
return err ? err : rv.err;
|
||||
}
|
||||
EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
|
||||
|
||||
int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
|
||||
{
|
||||
int err;
|
||||
struct msr_info rv;
|
||||
|
||||
memset(&rv, 0, sizeof(rv));
|
||||
|
||||
rv.msr_no = msr_no;
|
||||
err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
|
||||
*q = rv.reg.q;
|
||||
|
||||
return err ? err : rv.err;
|
||||
}
|
||||
EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
|
||||
|
||||
/*
|
||||
* These variants are significantly slower, but allows control over
|
||||
* the entire 32-bit GPR set.
|
||||
|
Loading…
Reference in New Issue
Block a user