1
0
Fork 0

x86/intel_rdt/cqm: Add sched_in support

OS associates an RMID/CLOSid to a task by writing the per CPU
IA32_PQR_ASSOC MSR when a task is scheduled in.

The sched_in code will stay as no-op unless we are running on Intel SKU
which supports either resource control or monitoring and we also enable
them by mounting the resctrl fs.  The per cpu CLOSid/RMID values are
cached and the write is performed only when a task with a different
CLOSid/RMID is scheduled in.

Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: ravi.v.shankar@intel.com
Cc: tony.luck@intel.com
Cc: fenghua.yu@intel.com
Cc: peterz@infradead.org
Cc: eranian@google.com
Cc: vikas.shivappa@intel.com
Cc: ak@linux.intel.com
Cc: davidcc@google.com
Cc: reinette.chatre@intel.com
Link: http://lkml.kernel.org/r/1501017287-28083-25-git-send-email-vikas.shivappa@linux.intel.com
hifive-unleashed-5.1
Vikas Shivappa 2017-07-25 14:14:43 -07:00 committed by Thomas Gleixner
parent 4be6c07842
commit 748b6b881c
2 changed files with 29 additions and 25 deletions

View File

@ -15,7 +15,8 @@
*
* The upper 32 bits of IA32_PQR_ASSOC contain closid and the
* lower 10 bits rmid. The update to IA32_PQR_ASSOC always
* contains both parts, so we need to cache them.
* contains both parts, so we need to cache them. This also
* stores the user configured per cpu CLOSID and RMID.
*
* The cache also helps to avoid pointless updates if the value does
* not change.
@ -30,38 +31,45 @@ DECLARE_PER_CPU_READ_MOSTLY(struct intel_pqr_state, rdt_cpu_default);
DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
/*
* __intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
* __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
*
* Following considerations are made so that this has minimal impact
* on scheduler hot path:
* - This will stay as no-op unless we are running on an Intel SKU
* which supports resource control and we enable by mounting the
* resctrl file system.
* - Caches the per cpu CLOSid values and does the MSR write only
* when a task with a different CLOSid is scheduled in.
*
* which supports resource control or monitoring and we enable by
* mounting the resctrl file system.
* - Caches the per cpu CLOSid/RMID values and does the MSR write only
* when a task with a different CLOSid/RMID is scheduled in.
* - We allocate RMIDs/CLOSids globally in order to keep this as
* simple as possible.
* Must be called with preemption disabled.
*/
static inline void __intel_rdt_sched_in(void)
static void __intel_rdt_sched_in(void)
{
struct intel_pqr_state newstate = this_cpu_read(rdt_cpu_default);
struct intel_pqr_state *curstate = this_cpu_ptr(&pqr_state);
/*
* If this task has a closid/rmid assigned, use it.
* Else use the closid/rmid assigned to this cpu.
*/
if (static_branch_likely(&rdt_alloc_enable_key)) {
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 closid;
if (current->closid)
newstate.closid = current->closid;
}
/*
* If this task has a closid assigned, use it.
* Else use the closid assigned to this cpu.
*/
closid = current->closid;
if (closid == 0)
closid = this_cpu_read(rdt_cpu_default.closid);
if (static_branch_likely(&rdt_mon_enable_key)) {
if (current->rmid)
newstate.rmid = current->rmid;
}
if (closid != state->closid) {
state->closid = closid;
wrmsr(IA32_PQR_ASSOC, state->rmid, closid);
}
if (newstate.closid != curstate->closid ||
newstate.rmid != curstate->rmid) {
*curstate = newstate;
wrmsr(IA32_PQR_ASSOC, newstate.rmid, newstate.closid);
}
}

View File

@ -22,8 +22,6 @@
#define RMID_VAL_ERROR BIT_ULL(63)
#define RMID_VAL_UNAVAIL BIT_ULL(62)
DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
/**
* struct mon_evt - Entry in the event list of a resource
* @evtid: event id
@ -61,8 +59,6 @@ extern bool rdt_alloc_capable;
extern bool rdt_mon_capable;
extern unsigned int rdt_mon_features;
DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
enum rdt_group_type {
RDTCTRL_GROUP = 0,
RDTMON_GROUP,