mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
KVM: x86: Copy kvm_pmu_ops by value to eliminate layer of indirection
Replace the kvm_pmu_ops pointer in common x86 with an instance of the struct to save one pointer dereference when invoking functions. Copy the struct by value to set the ops during kvm_init(). Signed-off-by: Like Xu <likexu@tencent.com> [sean: Move pmc_is_enabled(), make kvm_pmu_ops static] Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220329235054.3534728-3-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
fdc298da86
commit
8f969c0c34
@ -49,6 +49,18 @@
|
||||
* * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
|
||||
*/
|
||||
|
||||
static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;
|
||||
|
||||
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
|
||||
{
|
||||
memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops));
|
||||
}
|
||||
|
||||
static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
|
||||
{
|
||||
return kvm_pmu_ops.pmc_is_enabled(pmc);
|
||||
}
|
||||
|
||||
static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
|
||||
{
|
||||
struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
|
||||
@ -213,7 +225,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
||||
ARCH_PERFMON_EVENTSEL_CMASK |
|
||||
HSW_IN_TX |
|
||||
HSW_IN_TX_CHECKPOINTED))) {
|
||||
config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
|
||||
config = kvm_pmu_ops.pmc_perf_hw_id(pmc);
|
||||
if (config != PERF_COUNT_HW_MAX)
|
||||
type = PERF_TYPE_HARDWARE;
|
||||
}
|
||||
@ -263,7 +275,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
|
||||
|
||||
pmc->current_config = (u64)ctrl;
|
||||
pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
|
||||
kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc),
|
||||
kvm_pmu_ops.pmc_perf_hw_id(pmc),
|
||||
!(en_field & 0x2), /* exclude user */
|
||||
!(en_field & 0x1), /* exclude kernel */
|
||||
pmi);
|
||||
@ -272,7 +284,7 @@ EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
|
||||
|
||||
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
|
||||
{
|
||||
struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
|
||||
struct kvm_pmc *pmc = kvm_pmu_ops.pmc_idx_to_pmc(pmu, pmc_idx);
|
||||
|
||||
if (!pmc)
|
||||
return;
|
||||
@ -294,7 +306,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
|
||||
int bit;
|
||||
|
||||
for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
|
||||
struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit);
|
||||
struct kvm_pmc *pmc = kvm_pmu_ops.pmc_idx_to_pmc(pmu, bit);
|
||||
|
||||
if (unlikely(!pmc || !pmc->perf_event)) {
|
||||
clear_bit(bit, pmu->reprogram_pmi);
|
||||
@ -316,7 +328,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
|
||||
/* check if idx is a valid index to access PMU */
|
||||
bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
|
||||
{
|
||||
return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
|
||||
return kvm_pmu_ops.is_valid_rdpmc_ecx(vcpu, idx);
|
||||
}
|
||||
|
||||
bool is_vmware_backdoor_pmc(u32 pmc_idx)
|
||||
@ -366,7 +378,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
||||
if (is_vmware_backdoor_pmc(idx))
|
||||
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
|
||||
|
||||
pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
|
||||
pmc = kvm_pmu_ops.rdpmc_ecx_to_pmc(vcpu, idx, &mask);
|
||||
if (!pmc)
|
||||
return 1;
|
||||
|
||||
@ -382,22 +394,22 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
||||
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (lapic_in_kernel(vcpu)) {
|
||||
if (kvm_x86_ops.pmu_ops->deliver_pmi)
|
||||
kvm_x86_ops.pmu_ops->deliver_pmi(vcpu);
|
||||
if (kvm_pmu_ops.deliver_pmi)
|
||||
kvm_pmu_ops.deliver_pmi(vcpu);
|
||||
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
|
||||
}
|
||||
}
|
||||
|
||||
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
|
||||
{
|
||||
return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
|
||||
kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr);
|
||||
return kvm_pmu_ops.msr_idx_to_pmc(vcpu, msr) ||
|
||||
kvm_pmu_ops.is_valid_msr(vcpu, msr);
|
||||
}
|
||||
|
||||
static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
|
||||
{
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr);
|
||||
struct kvm_pmc *pmc = kvm_pmu_ops.msr_idx_to_pmc(vcpu, msr);
|
||||
|
||||
if (pmc)
|
||||
__set_bit(pmc->idx, pmu->pmc_in_use);
|
||||
@ -405,13 +417,13 @@ static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
|
||||
|
||||
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
{
|
||||
return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info);
|
||||
return kvm_pmu_ops.get_msr(vcpu, msr_info);
|
||||
}
|
||||
|
||||
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
{
|
||||
kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
|
||||
return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info);
|
||||
return kvm_pmu_ops.set_msr(vcpu, msr_info);
|
||||
}
|
||||
|
||||
/* refresh PMU settings. This function generally is called when underlying
|
||||
@ -420,7 +432,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
*/
|
||||
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_x86_ops.pmu_ops->refresh(vcpu);
|
||||
kvm_pmu_ops.refresh(vcpu);
|
||||
}
|
||||
|
||||
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
|
||||
@ -428,7 +440,7 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
|
||||
irq_work_sync(&pmu->irq_work);
|
||||
kvm_x86_ops.pmu_ops->reset(vcpu);
|
||||
kvm_pmu_ops.reset(vcpu);
|
||||
}
|
||||
|
||||
void kvm_pmu_init(struct kvm_vcpu *vcpu)
|
||||
@ -436,7 +448,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
|
||||
memset(pmu, 0, sizeof(*pmu));
|
||||
kvm_x86_ops.pmu_ops->init(vcpu);
|
||||
kvm_pmu_ops.init(vcpu);
|
||||
init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
|
||||
pmu->event_count = 0;
|
||||
pmu->need_cleanup = false;
|
||||
@ -468,14 +480,14 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
|
||||
pmu->pmc_in_use, X86_PMC_IDX_MAX);
|
||||
|
||||
for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
|
||||
pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
|
||||
pmc = kvm_pmu_ops.pmc_idx_to_pmc(pmu, i);
|
||||
|
||||
if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
|
||||
pmc_stop_counter(pmc);
|
||||
}
|
||||
|
||||
if (kvm_x86_ops.pmu_ops->cleanup)
|
||||
kvm_x86_ops.pmu_ops->cleanup(vcpu);
|
||||
if (kvm_pmu_ops.cleanup)
|
||||
kvm_pmu_ops.cleanup(vcpu);
|
||||
|
||||
bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
|
||||
}
|
||||
@ -505,7 +517,7 @@ static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
|
||||
unsigned int config;
|
||||
|
||||
pmc->eventsel &= (ARCH_PERFMON_EVENTSEL_EVENT | ARCH_PERFMON_EVENTSEL_UMASK);
|
||||
config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
|
||||
config = kvm_pmu_ops.pmc_perf_hw_id(pmc);
|
||||
pmc->eventsel = old_eventsel;
|
||||
return config == perf_hw_id;
|
||||
}
|
||||
@ -533,7 +545,7 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
|
||||
int i;
|
||||
|
||||
for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
|
||||
pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
|
||||
pmc = kvm_pmu_ops.pmc_idx_to_pmc(pmu, i);
|
||||
|
||||
if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
|
||||
continue;
|
||||
|
@ -39,6 +39,8 @@ struct kvm_pmu_ops {
|
||||
void (*cleanup)(struct kvm_vcpu *vcpu);
|
||||
};
|
||||
|
||||
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
|
||||
|
||||
static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
|
||||
@ -86,11 +88,6 @@ static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
|
||||
return pmc->type == KVM_PMC_FIXED;
|
||||
}
|
||||
|
||||
static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
|
||||
{
|
||||
return kvm_x86_ops.pmu_ops->pmc_is_enabled(pmc);
|
||||
}
|
||||
|
||||
static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
|
||||
u64 data)
|
||||
{
|
||||
|
@ -11632,6 +11632,8 @@ static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
|
||||
(void *)__static_call_return0);
|
||||
#include <asm/kvm-x86-ops.h>
|
||||
#undef __KVM_X86_OP
|
||||
|
||||
kvm_pmu_ops_update(ops->runtime_ops->pmu_ops);
|
||||
}
|
||||
|
||||
int kvm_arch_hardware_setup(void *opaque)
|
||||
|
Loading…
Reference in New Issue
Block a user