forked from Minki/linux
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Misc fixes: an ABI fix for a reserved field, AMD IBS fixes, an Intel uncore PMU driver fix and a header typo fix" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/headers: Fix spelling s/EACCESS/EACCES/, s/privilidge/privilege/ perf/x86/uncore: Fix event group support perf/x86/amd/ibs: Handle erratum #420 only on the affected CPU family (10h) perf/x86/amd/ibs: Fix reading of the IBS OpData register and thus precise RIP validity perf/core: Start rejecting the syscall with attr.__reserved_2 set
This commit is contained in:
commit
355f83c1d0
@ -377,7 +377,8 @@ static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
|
||||
struct hw_perf_event *hwc, u64 config)
|
||||
{
|
||||
config &= ~perf_ibs->cnt_mask;
|
||||
wrmsrl(hwc->config_base, config);
|
||||
if (boot_cpu_data.x86 == 0x10)
|
||||
wrmsrl(hwc->config_base, config);
|
||||
config &= ~perf_ibs->enable_mask;
|
||||
wrmsrl(hwc->config_base, config);
|
||||
}
|
||||
@ -553,7 +554,8 @@ static struct perf_ibs perf_ibs_op = {
|
||||
},
|
||||
.msr = MSR_AMD64_IBSOPCTL,
|
||||
.config_mask = IBS_OP_CONFIG_MASK,
|
||||
.cnt_mask = IBS_OP_MAX_CNT,
|
||||
.cnt_mask = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
|
||||
IBS_OP_CUR_CNT_RAND,
|
||||
.enable_mask = IBS_OP_ENABLE,
|
||||
.valid_mask = IBS_OP_VAL,
|
||||
.max_period = IBS_OP_MAX_CNT << 4,
|
||||
@ -614,7 +616,7 @@ fail:
|
||||
if (event->attr.sample_type & PERF_SAMPLE_RAW)
|
||||
offset_max = perf_ibs->offset_max;
|
||||
else if (check_rip)
|
||||
offset_max = 2;
|
||||
offset_max = 3;
|
||||
else
|
||||
offset_max = 1;
|
||||
do {
|
||||
|
@ -502,10 +502,8 @@ void uncore_pmu_event_start(struct perf_event *event, int flags)
|
||||
local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
|
||||
uncore_enable_event(box, event);
|
||||
|
||||
if (box->n_active == 1) {
|
||||
uncore_enable_box(box);
|
||||
if (box->n_active == 1)
|
||||
uncore_pmu_start_hrtimer(box);
|
||||
}
|
||||
}
|
||||
|
||||
void uncore_pmu_event_stop(struct perf_event *event, int flags)
|
||||
@ -529,10 +527,8 @@ void uncore_pmu_event_stop(struct perf_event *event, int flags)
|
||||
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
||||
hwc->state |= PERF_HES_STOPPED;
|
||||
|
||||
if (box->n_active == 0) {
|
||||
uncore_disable_box(box);
|
||||
if (box->n_active == 0)
|
||||
uncore_pmu_cancel_hrtimer(box);
|
||||
}
|
||||
}
|
||||
|
||||
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
|
||||
@ -778,6 +774,40 @@ static int uncore_pmu_event_init(struct perf_event *event)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void uncore_pmu_enable(struct pmu *pmu)
|
||||
{
|
||||
struct intel_uncore_pmu *uncore_pmu;
|
||||
struct intel_uncore_box *box;
|
||||
|
||||
uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
|
||||
if (!uncore_pmu)
|
||||
return;
|
||||
|
||||
box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
|
||||
if (!box)
|
||||
return;
|
||||
|
||||
if (uncore_pmu->type->ops->enable_box)
|
||||
uncore_pmu->type->ops->enable_box(box);
|
||||
}
|
||||
|
||||
static void uncore_pmu_disable(struct pmu *pmu)
|
||||
{
|
||||
struct intel_uncore_pmu *uncore_pmu;
|
||||
struct intel_uncore_box *box;
|
||||
|
||||
uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
|
||||
if (!uncore_pmu)
|
||||
return;
|
||||
|
||||
box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
|
||||
if (!box)
|
||||
return;
|
||||
|
||||
if (uncore_pmu->type->ops->disable_box)
|
||||
uncore_pmu->type->ops->disable_box(box);
|
||||
}
|
||||
|
||||
static ssize_t uncore_get_attr_cpumask(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
@ -803,6 +833,8 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
|
||||
pmu->pmu = (struct pmu) {
|
||||
.attr_groups = pmu->type->attr_groups,
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
.pmu_enable = uncore_pmu_enable,
|
||||
.pmu_disable = uncore_pmu_disable,
|
||||
.event_init = uncore_pmu_event_init,
|
||||
.add = uncore_pmu_event_add,
|
||||
.del = uncore_pmu_event_del,
|
||||
|
@ -441,18 +441,6 @@ static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void uncore_disable_box(struct intel_uncore_box *box)
|
||||
{
|
||||
if (box->pmu->type->ops->disable_box)
|
||||
box->pmu->type->ops->disable_box(box);
|
||||
}
|
||||
|
||||
static inline void uncore_enable_box(struct intel_uncore_box *box)
|
||||
{
|
||||
if (box->pmu->type->ops->enable_box)
|
||||
box->pmu->type->ops->enable_box(box);
|
||||
}
|
||||
|
||||
static inline void uncore_disable_event(struct intel_uncore_box *box,
|
||||
struct perf_event *event)
|
||||
{
|
||||
|
@ -292,7 +292,7 @@ struct pmu {
|
||||
* -EBUSY -- @event is for this PMU but PMU temporarily unavailable
|
||||
* -EINVAL -- @event is for this PMU but @event is not valid
|
||||
* -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
|
||||
* -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
|
||||
* -EACCES -- @event is for this PMU, @event is valid, but no privileges
|
||||
*
|
||||
* 0 -- @event is for this PMU and valid
|
||||
*
|
||||
|
@ -10635,7 +10635,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
|
||||
|
||||
attr->size = size;
|
||||
|
||||
if (attr->__reserved_1)
|
||||
if (attr->__reserved_1 || attr->__reserved_2)
|
||||
return -EINVAL;
|
||||
|
||||
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
|
||||
|
Loading…
Reference in New Issue
Block a user