1
0
Fork 0

arm64: cpufeature: Rework ptr auth hwcaps using multi_entry_cap_matches

Open-coding the pointer-auth HWCAPs is a mess and can be avoided by
reusing the multi-cap logic from the CPU errata framework.

Move the multi_entry_cap_matches code to cpufeature.h and reuse it for
the pointer auth HWCAPs.

Reviewed-by: Suzuki Poulose <suzuki.poulose@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
hifive-unleashed-5.1
Will Deacon 2018-12-12 15:53:54 +00:00
parent a56005d321
commit 1e013d0612
3 changed files with 99 additions and 89 deletions

View File

@ -321,19 +321,20 @@ struct arm64_cpu_capabilities {
bool sign;
unsigned long hwcap;
};
/*
* A list of "matches/cpu_enable" pair for the same
* "capability" of the same "type" as described by the parent.
* Only matches(), cpu_enable() and fields relevant to these
* methods are significant in the list. The cpu_enable is
* invoked only if the corresponding entry "matches()".
* However, if a cpu_enable() method is associated
* with multiple matches(), care should be taken that either
* the match criteria are mutually exclusive, or that the
* method is robust against being called multiple times.
*/
const struct arm64_cpu_capabilities *match_list;
};
/*
* An optional list of "matches/cpu_enable" pair for the same
* "capability" of the same "type" as described by the parent.
* Only matches(), cpu_enable() and fields relevant to these
* methods are significant in the list. The cpu_enable is
* invoked only if the corresponding entry "matches()".
* However, if a cpu_enable() method is associated
* with multiple matches(), care should be taken that either
* the match criteria are mutually exclusive, or that the
* method is robust against being called multiple times.
*/
const struct arm64_cpu_capabilities *match_list;
};
static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
@ -353,6 +354,39 @@ cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
}
/*
* Generic helper for handling capabilties with multiple (match,enable) pairs
* of call backs, sharing the same capability bit.
* Iterate over each entry to see if at least one matches.
*/
static inline bool
cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
int scope)
{
const struct arm64_cpu_capabilities *caps;
for (caps = entry->match_list; caps->matches; caps++)
if (caps->matches(caps, scope))
return true;
return false;
}
/*
* Take appropriate action for all matching entries in the shared capability
* entry.
*/
static inline void
cpucap_multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
{
const struct arm64_cpu_capabilities *caps;
for (caps = entry->match_list; caps->matches; caps++)
if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
caps->cpu_enable)
caps->cpu_enable(caps);
}
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
@ -476,7 +510,6 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
void __init setup_cpu_features(void);
void check_local_cpu_capabilities(void);
u64 read_sanitised_ftr_reg(u32 id);
static inline bool cpu_supports_mixed_endian_el0(void)

View File

@ -507,38 +507,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
CAP_MIDR_RANGE_LIST(midr_list)
/*
* Generic helper for handling capabilties with multiple (match,enable) pairs
* of call backs, sharing the same capability bit.
* Iterate over each entry to see if at least one matches.
*/
static bool __maybe_unused
multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
{
const struct arm64_cpu_capabilities *caps;
for (caps = entry->match_list; caps->matches; caps++)
if (caps->matches(caps, scope))
return true;
return false;
}
/*
* Take appropriate action for all matching entries in the shared capability
* entry.
*/
static void __maybe_unused
multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
{
const struct arm64_cpu_capabilities *caps;
for (caps = entry->match_list; caps->matches; caps++)
if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
caps->cpu_enable)
caps->cpu_enable(caps);
}
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
/*
@ -700,7 +668,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
.matches = multi_entry_cap_matches,
.matches = cpucap_multi_entry_cap_matches,
.match_list = qcom_erratum_1003_list,
},
#endif

View File

@ -1196,34 +1196,6 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);
}
static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
int __unused)
{
u64 isar1 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
bool api, apa;
apa = cpuid_feature_extract_unsigned_field(isar1,
ID_AA64ISAR1_APA_SHIFT) > 0;
api = cpuid_feature_extract_unsigned_field(isar1,
ID_AA64ISAR1_API_SHIFT) > 0;
return apa || api;
}
static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
int __unused)
{
u64 isar1 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
bool gpi, gpa;
gpa = cpuid_feature_extract_unsigned_field(isar1,
ID_AA64ISAR1_GPA_SHIFT) > 0;
gpi = cpuid_feature_extract_unsigned_field(isar1,
ID_AA64ISAR1_GPI_SHIFT) > 0;
return gpa || gpi;
}
#endif /* CONFIG_ARM64_PTR_AUTH */
static const struct arm64_cpu_capabilities arm64_features[] = {
@ -1506,19 +1478,58 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{},
};
#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
{ \
.desc = #cap, \
.type = ARM64_CPUCAP_SYSTEM_FEATURE, \
.matches = has_cpuid_feature, \
.sys_reg = reg, \
.field_pos = field, \
.sign = s, \
.min_field_value = min_value, \
.hwcap_type = cap_type, \
.hwcap = cap, \
#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
.matches = has_cpuid_feature, \
.sys_reg = reg, \
.field_pos = field, \
.sign = s, \
.min_field_value = min_value,
#define __HWCAP_CAP(name, cap_type, cap) \
.desc = name, \
.type = ARM64_CPUCAP_SYSTEM_FEATURE, \
.hwcap_type = cap_type, \
.hwcap = cap, \
#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
{ \
__HWCAP_CAP(#cap, cap_type, cap) \
HWCAP_CPUID_MATCH(reg, field, s, min_value) \
}
#define HWCAP_MULTI_CAP(list, cap_type, cap) \
{ \
__HWCAP_CAP(#cap, cap_type, cap) \
.matches = cpucap_multi_entry_cap_matches, \
.match_list = list, \
}
#ifdef CONFIG_ARM64_PTR_AUTH
static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
{
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT,
FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED)
},
{
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT,
FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF)
},
{},
};
static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
{
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT,
FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED)
},
{
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT,
FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF)
},
{},
};
#endif
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
@ -1551,10 +1562,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
#endif
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
#ifdef CONFIG_ARM64_PTR_AUTH
{ .desc = "HWCAP_PACA", .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_address_auth,
.hwcap_type = CAP_HWCAP, .hwcap = HWCAP_PACA },
{ .desc = "HWCAP_PACG", .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_generic_auth,
.hwcap_type = CAP_HWCAP, .hwcap = HWCAP_PACG },
HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, HWCAP_PACA),
HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, HWCAP_PACG),
#endif
{},
};