1
0
Fork 0

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Two hw-enablement patches, two race fixes, three fixes for regressions
  of semantics, plus a number of tooling fixes"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86/intel: Add proper condition to run sched_task callbacks
  perf/core: Fix locking for children siblings group read
  perf/core: Fix scheduling regression of pinned groups
  perf/x86/intel: Fix debug_store reset field for freq events
  perf/x86/intel: Add Goldmont Plus CPU PMU support
  perf/x86/intel: Enable C-state residency events for Apollo Lake
  perf symbols: Accept zero as the kernel base address
  Revert "perf/core: Drop kernel samples even though :u is specified"
  perf annotate: Fix broken arrow at row 0 connecting jmp instruction to its target
  perf evsel: State in the default event name if attr.exclude_kernel is set
  perf evsel: Fix attr.exclude_kernel setting for default cycles:p
zero-colors
Linus Torvalds 2017-07-21 11:12:48 -07:00
commit bbcdea658f
9 changed files with 221 additions and 42 deletions

View File

@ -1708,6 +1708,120 @@ static __initconst const u64 glm_hw_cache_extra_regs
},
};
static __initconst const u64 glp_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
[C(RESULT_MISS)] = 0x0,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
[C(RESULT_MISS)] = 0x0,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x0,
[C(RESULT_MISS)] = 0x0,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
[C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x0,
[C(RESULT_MISS)] = 0x0,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
[C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
[C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x0,
[C(RESULT_MISS)] = 0x0,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
[C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
[C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x0,
[C(RESULT_MISS)] = 0x0,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
[C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
[C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
};
static __initconst const u64 glp_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = GLM_DEMAND_READ|
GLM_LLC_ACCESS,
[C(RESULT_MISS)] = GLM_DEMAND_READ|
GLM_LLC_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
GLM_LLC_ACCESS,
[C(RESULT_MISS)] = GLM_DEMAND_WRITE|
GLM_LLC_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0x0,
[C(RESULT_MISS)] = 0x0,
},
},
};
#define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
#define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
#define KNL_MCDRAM_LOCAL BIT_ULL(21)
@ -3016,6 +3130,9 @@ static int hsw_hw_config(struct perf_event *event)
return 0;
}
static struct event_constraint counter0_constraint =
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
static struct event_constraint counter2_constraint =
EVENT_CONSTRAINT(0, 0x4, 0);
@ -3037,6 +3154,21 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
return c;
}
static struct event_constraint *
glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
struct event_constraint *c;
/* :ppp means to do reduced skid PEBS which is PMC0 only. */
if (event->attr.precise_ip == 3)
return &counter0_constraint;
c = intel_get_event_constraints(cpuc, idx, event);
return c;
}
/*
* Broadwell:
*
@ -3265,10 +3397,8 @@ static void intel_pmu_cpu_dying(int cpu)
static void intel_pmu_sched_task(struct perf_event_context *ctx,
bool sched_in)
{
if (x86_pmu.pebs_active)
intel_pmu_pebs_sched_task(ctx, sched_in);
if (x86_pmu.lbr_nr)
intel_pmu_lbr_sched_task(ctx, sched_in);
intel_pmu_pebs_sched_task(ctx, sched_in);
intel_pmu_lbr_sched_task(ctx, sched_in);
}
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
@ -3838,6 +3968,32 @@ __init int intel_pmu_init(void)
pr_cont("Goldmont events, ");
break;
case INTEL_FAM6_ATOM_GEMINI_LAKE:
memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_skl();
x86_pmu.event_constraints = intel_slm_event_constraints;
x86_pmu.pebs_constraints = intel_glp_pebs_event_constraints;
x86_pmu.extra_regs = intel_glm_extra_regs;
/*
* It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
* for precise cycles.
*/
x86_pmu.pebs_aliases = NULL;
x86_pmu.pebs_prec_dist = true;
x86_pmu.lbr_pt_coexist = true;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.get_event_constraints = glp_get_event_constraints;
x86_pmu.cpu_events = glm_events_attrs;
/* Goldmont Plus has 4-wide pipeline */
event_attr_td_total_slots_scale_glm.event_str = "4";
pr_cont("Goldmont plus events, ");
break;
case INTEL_FAM6_WESTMERE:
case INTEL_FAM6_WESTMERE_EP:
case INTEL_FAM6_WESTMERE_EX:

View File

@ -40,16 +40,16 @@
* Model specific counters:
* MSR_CORE_C1_RES: CORE C1 Residency Counter
* perf code: 0x00
* Available model: SLM,AMT
* Available model: SLM,AMT,GLM
* Scope: Core (each processor core has a MSR)
* MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
* perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM
* Scope: Core
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
* SKL,KNL
* SKL,KNL,GLM
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
@ -57,16 +57,17 @@
* Scope: Core
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
* GLM
* Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
* SKL,KNL
* SKL,KNL,GLM
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
@ -82,7 +83,7 @@
* Scope: Package (physical package)
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
* perf code: 0x06
* Available model: HSW ULT only
* Available model: HSW ULT, GLM
* Scope: Package (physical package)
*
*/
@ -504,6 +505,17 @@ static const struct cstate_model knl_cstates __initconst = {
};
static const struct cstate_model glm_cstates __initconst = {
.core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
BIT(PERF_CSTATE_CORE_C3_RES) |
BIT(PERF_CSTATE_CORE_C6_RES),
.pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
BIT(PERF_CSTATE_PKG_C3_RES) |
BIT(PERF_CSTATE_PKG_C6_RES) |
BIT(PERF_CSTATE_PKG_C10_RES),
};
#define X86_CSTATES_MODEL(model, states) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
@ -546,6 +558,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);

View File

@ -606,12 +606,6 @@ static inline void intel_pmu_drain_pebs_buffer(void)
x86_pmu.drain_pebs(&regs);
}
void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
{
if (!sched_in)
intel_pmu_drain_pebs_buffer();
}
/*
* PEBS
*/
@ -651,6 +645,12 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
struct event_constraint intel_glp_pebs_event_constraints[] = {
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
EVENT_CONSTRAINT_END
};
struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
@ -816,6 +816,14 @@ static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
}
void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!sched_in && pebs_needs_sched_cb(cpuc))
intel_pmu_drain_pebs_buffer();
}
static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{
struct debug_store *ds = cpuc->ds;
@ -889,6 +897,8 @@ void intel_pmu_pebs_enable(struct perf_event *event)
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
ds->pebs_event_reset[hwc->idx] =
(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
} else {
ds->pebs_event_reset[hwc->idx] = 0;
}
}

View File

@ -380,8 +380,12 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct x86_perf_task_context *task_ctx;
if (!cpuc->lbr_users)
return;
/*
* If LBR callstack feature is enabled and the stack was saved when
* the task was scheduled out, restore the stack. Otherwise flush

View File

@ -879,6 +879,8 @@ extern struct event_constraint intel_slm_pebs_event_constraints[];
extern struct event_constraint intel_glm_pebs_event_constraints[];
extern struct event_constraint intel_glp_pebs_event_constraints[];
extern struct event_constraint intel_nehalem_pebs_event_constraints[];
extern struct event_constraint intel_westmere_pebs_event_constraints[];

View File

@ -1452,6 +1452,13 @@ static enum event_type_t get_event_type(struct perf_event *event)
lockdep_assert_held(&ctx->lock);
/*
* It's 'group type', really, because if our group leader is
* pinned, so are we.
*/
if (event->group_leader != event)
event = event->group_leader;
event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
if (!ctx->task)
event_type |= EVENT_CPU;
@ -4378,7 +4385,9 @@ EXPORT_SYMBOL_GPL(perf_event_read_value);
static int __perf_read_group_add(struct perf_event *leader,
u64 read_format, u64 *values)
{
struct perf_event_context *ctx = leader->ctx;
struct perf_event *sub;
unsigned long flags;
int n = 1; /* skip @nr */
int ret;
@ -4408,12 +4417,15 @@ static int __perf_read_group_add(struct perf_event *leader,
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
raw_spin_lock_irqsave(&ctx->lock, flags);
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
values[n++] += perf_event_count(sub);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
}
raw_spin_unlock_irqrestore(&ctx->lock, flags);
return 0;
}
@ -7321,21 +7333,6 @@ int perf_event_account_interrupt(struct perf_event *event)
return __perf_event_account_interrupt(event, 1);
}
static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
{
/*
* Due to interrupt latency (AKA "skid"), we may enter the
* kernel before taking an overflow, even if the PMU is only
* counting user events.
* To avoid leaking information to userspace, we must always
* reject kernel samples when exclude_kernel is set.
*/
if (event->attr.exclude_kernel && !user_mode(regs))
return false;
return true;
}
/*
* Generic event overflow handling, sampling.
*/
@ -7356,12 +7353,6 @@ static int __perf_event_overflow(struct perf_event *event,
ret = __perf_event_account_interrupt(event, throttle);
/*
* For security, drop the skid kernel samples if necessary.
*/
if (!sample_is_allowed(event, regs))
return ret;
/*
* XXX event_limit might not quite work as expected on inherited
* events

View File

@ -704,7 +704,7 @@ static void __ui_browser__line_arrow_down(struct ui_browser *browser,
ui_browser__gotorc(browser, row, column + 1);
SLsmg_draw_hline(2);
if (row++ == 0)
if (++row == 0)
goto out;
} else
row = 0;

View File

@ -273,7 +273,7 @@ struct perf_evsel *perf_evsel__new_cycles(void)
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.exclude_kernel = 1,
.exclude_kernel = geteuid() != 0,
};
struct perf_evsel *evsel;
@ -298,8 +298,10 @@ struct perf_evsel *perf_evsel__new_cycles(void)
goto out;
/* use asprintf() because free(evsel) assumes name is allocated */
if (asprintf(&evsel->name, "cycles%.*s",
attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
if (asprintf(&evsel->name, "cycles%s%s%.*s",
(attr.precise_ip || attr.exclude_kernel) ? ":" : "",
attr.exclude_kernel ? "u" : "",
attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
goto error_free;
out:
return evsel;

View File

@ -2209,7 +2209,7 @@ int machine__get_kernel_start(struct machine *machine)
machine->kernel_start = 1ULL << 63;
if (map) {
err = map__load(map);
if (map->start)
if (!err)
machine->kernel_start = map->start;
}
return err;