1
0
Fork 0

A set of fixes for x86:

- Make the AMD L3 QoS code and data priorization enable/disable mechanism
    work correctly. The control bit was only set/cleared on one of the CPUs
    in a L3 domain, but it has to be modified on all CPUs in the domain. The
    initial documentation was not clear about this, but the updated one from
    Oct 2020 spells it out.
 
  - Fix an off by one in the UV platform detection code which causes the UV
    hubs to be identified wrongly. The chip revisions start at 1 not at 0.
 
  - Fix a long standing bug in the evaluation of prefixes in the uprobes
    code which fails to handle repeated prefixes properly. The aggregate
    size of the prefixes can be larger than the bytes array but the code
    blindly iterated over the aggregate size beyond the array boundary.
    Add a macro to handle this case properly and use it at the affected
    places.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAl/M2GoTHHRnbHhAbGlu
 dXRyb25peC5kZQAKCRCmGPVMDXSYoUGeD/0TxQyVIZTJjY/ExDYvQZeSWgvFVon9
 A/QcwkgtkWzYKI3YThgxBtSNKhHPP8eQ9QK8ErcaQKEHU5TdXVEOwHgTm1A6OGat
 0+M9/EWEe7tTu+cLpu7esQ+1VbSvEcFXZbljbhCKrShlBlsIjFG4eCPAprSw9yjI
 RSgfXKZu4NmHVS6nfJTwmIIaTeLQ6U3b7b1D5s/66slBFScqnLbRNhABVbHbos4F
 pl/lxDCFOddy2YbEojHjjGqMA7oxPav7c0nYFOM/zG+wAqfEjbqOxReT31bGQPi2
 XT9K4JEqDqILo0KnhV4GsYoWAhes3BtmsJ9IoZ7IijsMriYl80mD9URAORidJ4PX
 28Ckk9V/DlE8uDrAnBDcWDSoKlg78mhVV7V9L6v43teg/gJfSZNROtNDBmqRmwG4
 Op2NJfzJITtaxVQuSZRkSs8rzGv+QUfaM1sBUQ+Oz4KYeIjjA7G2MAOECrzIAWKB
 GWc5toYRVS6oGT+RbZhSxZYoh8ASoGJ2MrL8K4OV4RqEqHHcXcih0WmmljtsDIFI
 td4FHHH6fghIb9S6iYKiApd6k2qKa33mwJwa/xZOoIrv0w5xT0WDJnxT60gu/Mec
 YDkqhmA009CNSD2G4oNRNF5MH7gp34UII+25jOGatbVh+5DDPYs+5Jnh/DR7jssR
 PryAG9ER7UUb6w==
 =rNBq
 -----END PGP SIGNATURE-----

Merge tag 'x86-urgent-2020-12-06' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
 "A set of fixes for x86:

   - Make the AMD L3 QoS code and data priorization enable/disable
     mechanism work correctly.

     The control bit was only set/cleared on one of the CPUs in a L3
     domain, but it has to be modified on all CPUs in the domain. The
     initial documentation was not clear about this, but the updated one
     from Oct 2020 spells it out.

   - Fix an off by one in the UV platform detection code which causes
     the UV hubs to be identified wrongly.

     The chip revisions start at 1 not at 0.

   - Fix a long standing bug in the evaluation of prefixes in the
     uprobes code which fails to handle repeated prefixes properly.

     The aggregate size of the prefixes can be larger than the bytes
     array but the code blindly iterated over the aggregate size beyond
     the array boundary. Add a macro to handle this case properly and
     use it at the affected places"

* tag 'x86-urgent-2020-12-06' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/sev-es: Use new for_each_insn_prefix() macro to loop over prefixes bytes
  x86/insn-eval: Use new for_each_insn_prefix() macro to loop over prefixes bytes
  x86/uprobes: Do not use prefixes.nbytes when looping over prefixes.bytes
  x86/platform/uv: Fix UV4 hub revision adjustment
  x86/resctrl: Fix AMD L3 QOS CDP enable/disable
zero-sugar-mainline-defconfig
Linus Torvalds 2020-12-06 11:22:39 -08:00
commit 8100a58044
9 changed files with 58 additions and 15 deletions

View File

@ -32,13 +32,12 @@ struct ghcb *boot_ghcb;
*/
static bool insn_has_rep_prefix(struct insn *insn)
{
insn_byte_t p;
int i;
insn_get_prefixes(insn);
for (i = 0; i < insn->prefixes.nbytes; i++) {
insn_byte_t p = insn->prefixes.bytes[i];
for_each_insn_prefix(insn, i, p) {
if (p == 0xf2 || p == 0xf3)
return true;
}

View File

@ -201,6 +201,21 @@ static inline int insn_offset_immediate(struct insn *insn)
return insn_offset_displacement(insn) + insn->displacement.nbytes;
}
/**
* for_each_insn_prefix() -- Iterate prefixes in the instruction
* @insn: Pointer to struct insn.
* @idx: Index storage.
* @prefix: Prefix byte.
*
* Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
* and the index is stored in @idx (note that this @idx is just for a cursor,
* do not change it.)
* Since prefixes.nbytes can be bigger than 4 if some prefixes
* are repeated, it cannot be used for looping over the prefixes.
*/
#define for_each_insn_prefix(insn, idx, prefix) \
for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
#define POP_SS_OPCODE 0x1f
#define MOV_SREG_OPCODE 0x8e

View File

@ -161,7 +161,7 @@ static int __init early_set_hub_type(void)
/* UV4/4A only have a revision difference */
case UV4_HUB_PART_NUMBER:
uv_min_hub_revision_id = node_id.s.revision
+ UV4_HUB_REVISION_BASE;
+ UV4_HUB_REVISION_BASE - 1;
uv_hub_type_set(UV4);
if (uv_min_hub_revision_id == UV4A_HUB_REVISION_BASE)
uv_hub_type_set(UV4|UV4A);

View File

@ -570,6 +570,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
if (d) {
cpumask_set_cpu(cpu, &d->cpu_mask);
if (r->cache.arch_has_per_cpu_cfg)
rdt_domain_reconfigure_cdp(r);
return;
}
@ -923,6 +925,7 @@ static __init void rdt_init_res_defs_intel(void)
r->rid == RDT_RESOURCE_L2CODE) {
r->cache.arch_has_sparse_bitmaps = false;
r->cache.arch_has_empty_bitmaps = false;
r->cache.arch_has_per_cpu_cfg = false;
} else if (r->rid == RDT_RESOURCE_MBA) {
r->msr_base = MSR_IA32_MBA_THRTL_BASE;
r->msr_update = mba_wrmsr_intel;
@ -943,6 +946,7 @@ static __init void rdt_init_res_defs_amd(void)
r->rid == RDT_RESOURCE_L2CODE) {
r->cache.arch_has_sparse_bitmaps = true;
r->cache.arch_has_empty_bitmaps = true;
r->cache.arch_has_per_cpu_cfg = true;
} else if (r->rid == RDT_RESOURCE_MBA) {
r->msr_base = MSR_IA32_MBA_BW_BASE;
r->msr_update = mba_wrmsr_amd;

View File

@ -360,6 +360,8 @@ struct msr_param {
* executing entities
* @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid.
* @arch_has_empty_bitmaps: True if the '0' bitmap is valid.
* @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
* level has CPU scope.
*/
struct rdt_cache {
unsigned int cbm_len;
@ -369,6 +371,7 @@ struct rdt_cache {
unsigned int shareable_bits;
bool arch_has_sparse_bitmaps;
bool arch_has_empty_bitmaps;
bool arch_has_per_cpu_cfg;
};
/**

View File

@ -1909,8 +1909,13 @@ static int set_cache_qos_cfg(int level, bool enable)
r_l = &rdt_resources_all[level];
list_for_each_entry(d, &r_l->domains, list) {
/* Pick one CPU from each domain instance to update MSR */
cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
if (r_l->cache.arch_has_per_cpu_cfg)
/* Pick all the CPUs in the domain instance */
for_each_cpu(cpu, &d->cpu_mask)
cpumask_set_cpu(cpu, cpu_mask);
else
/* Pick one CPU from each domain instance to update MSR */
cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
}
cpu = get_cpu();
/* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */

View File

@ -255,12 +255,13 @@ static volatile u32 good_2byte_insns[256 / 32] = {
static bool is_prefix_bad(struct insn *insn)
{
insn_byte_t p;
int i;
for (i = 0; i < insn->prefixes.nbytes; i++) {
for_each_insn_prefix(insn, i, p) {
insn_attr_t attr;
attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
attr = inat_get_opcode_attribute(p);
switch (attr) {
case INAT_MAKE_PREFIX(INAT_PFX_ES):
case INAT_MAKE_PREFIX(INAT_PFX_CS):
@ -715,6 +716,7 @@ static const struct uprobe_xol_ops push_xol_ops = {
static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
{
u8 opc1 = OPCODE1(insn);
insn_byte_t p;
int i;
switch (opc1) {
@ -746,8 +748,8 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
* Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
* No one uses these insns, reject any branch insns with such prefix.
*/
for (i = 0; i < insn->prefixes.nbytes; i++) {
if (insn->prefixes.bytes[i] == 0x66)
for_each_insn_prefix(insn, i, p) {
if (p == 0x66)
return -ENOTSUPP;
}

View File

@ -63,13 +63,12 @@ static bool is_string_insn(struct insn *insn)
*/
bool insn_has_rep_prefix(struct insn *insn)
{
insn_byte_t p;
int i;
insn_get_prefixes(insn);
for (i = 0; i < insn->prefixes.nbytes; i++) {
insn_byte_t p = insn->prefixes.bytes[i];
for_each_insn_prefix(insn, i, p) {
if (p == 0xf2 || p == 0xf3)
return true;
}
@ -95,14 +94,15 @@ static int get_seg_reg_override_idx(struct insn *insn)
{
int idx = INAT_SEG_REG_DEFAULT;
int num_overrides = 0, i;
insn_byte_t p;
insn_get_prefixes(insn);
/* Look for any segment override prefixes. */
for (i = 0; i < insn->prefixes.nbytes; i++) {
for_each_insn_prefix(insn, i, p) {
insn_attr_t attr;
attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
attr = inat_get_opcode_attribute(p);
switch (attr) {
case INAT_MAKE_PREFIX(INAT_PFX_CS):
idx = INAT_SEG_REG_CS;

View File

@ -201,6 +201,21 @@ static inline int insn_offset_immediate(struct insn *insn)
return insn_offset_displacement(insn) + insn->displacement.nbytes;
}
/**
* for_each_insn_prefix() -- Iterate prefixes in the instruction
* @insn: Pointer to struct insn.
* @idx: Index storage.
* @prefix: Prefix byte.
*
* Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
* and the index is stored in @idx (note that this @idx is just for a cursor,
* do not change it.)
* Since prefixes.nbytes can be bigger than 4 if some prefixes
* are repeated, it cannot be used for looping over the prefixes.
*/
#define for_each_insn_prefix(insn, idx, prefix) \
for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
#define POP_SS_OPCODE 0x1f
#define MOV_SREG_OPCODE 0x8e