From 1e0c08e3034de0659367393bfa825188462f22e6 Mon Sep 17 00:00:00 2001 From: Gayatri Kammela Date: Wed, 17 Jul 2019 16:46:31 -0700 Subject: [PATCH 01/25] cpu/cpuid-deps: Add a tab to cpuid dependent features Improve code readability by adding a tab between the elements of each structure in an array of cpuid-dep struct so longer feature names will fit. Signed-off-by: Gayatri Kammela Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20190717234632.32673-2-gayatri.kammela@intel.com --- arch/x86/kernel/cpu/cpuid-deps.c | 96 ++++++++++++++++---------------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index b5353244749b..630a9f77fb6b 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -20,54 +20,54 @@ struct cpuid_dep { * but it's difficult to tell that to the init reference checker. */ static const struct cpuid_dep cpuid_deps[] = { - { X86_FEATURE_FXSR, X86_FEATURE_FPU }, - { X86_FEATURE_XSAVEOPT, X86_FEATURE_XSAVE }, - { X86_FEATURE_XSAVEC, X86_FEATURE_XSAVE }, - { X86_FEATURE_XSAVES, X86_FEATURE_XSAVE }, - { X86_FEATURE_AVX, X86_FEATURE_XSAVE }, - { X86_FEATURE_PKU, X86_FEATURE_XSAVE }, - { X86_FEATURE_MPX, X86_FEATURE_XSAVE }, - { X86_FEATURE_XGETBV1, X86_FEATURE_XSAVE }, - { X86_FEATURE_CMOV, X86_FEATURE_FXSR }, - { X86_FEATURE_MMX, X86_FEATURE_FXSR }, - { X86_FEATURE_MMXEXT, X86_FEATURE_MMX }, - { X86_FEATURE_FXSR_OPT, X86_FEATURE_FXSR }, - { X86_FEATURE_XSAVE, X86_FEATURE_FXSR }, - { X86_FEATURE_XMM, X86_FEATURE_FXSR }, - { X86_FEATURE_XMM2, X86_FEATURE_XMM }, - { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, - { X86_FEATURE_XMM4_1, X86_FEATURE_XMM2 }, - { X86_FEATURE_XMM4_2, X86_FEATURE_XMM2 }, - { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, - { X86_FEATURE_PCLMULQDQ, X86_FEATURE_XMM2 }, - { X86_FEATURE_SSSE3, X86_FEATURE_XMM2, }, - { X86_FEATURE_F16C, X86_FEATURE_XMM2, }, - { X86_FEATURE_AES, X86_FEATURE_XMM2 }, - { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 }, - { X86_FEATURE_FMA, X86_FEATURE_AVX }, - { X86_FEATURE_AVX2, X86_FEATURE_AVX, }, - { X86_FEATURE_AVX512F, X86_FEATURE_AVX, }, - { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512PF, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512ER, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512CD, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512DQ, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512BW, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL }, - { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL }, - { X86_FEATURE_VAES, X86_FEATURE_AVX512VL }, - { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL }, - { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL }, - { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL }, - { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F }, - { X86_FEATURE_CQM_OCCUP_LLC, X86_FEATURE_CQM_LLC }, - { X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC }, - { X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC }, - { X86_FEATURE_AVX512_BF16, X86_FEATURE_AVX512VL }, + { X86_FEATURE_FXSR, X86_FEATURE_FPU }, + { X86_FEATURE_XSAVEOPT, X86_FEATURE_XSAVE }, + { X86_FEATURE_XSAVEC, X86_FEATURE_XSAVE }, + { X86_FEATURE_XSAVES, X86_FEATURE_XSAVE }, + { X86_FEATURE_AVX, X86_FEATURE_XSAVE }, + { X86_FEATURE_PKU, X86_FEATURE_XSAVE }, + { X86_FEATURE_MPX, X86_FEATURE_XSAVE }, + { X86_FEATURE_XGETBV1, X86_FEATURE_XSAVE }, + { X86_FEATURE_CMOV, X86_FEATURE_FXSR }, + { X86_FEATURE_MMX, X86_FEATURE_FXSR }, + { X86_FEATURE_MMXEXT, X86_FEATURE_MMX }, + { X86_FEATURE_FXSR_OPT, X86_FEATURE_FXSR }, + { X86_FEATURE_XSAVE, X86_FEATURE_FXSR }, + { X86_FEATURE_XMM, X86_FEATURE_FXSR }, + { X86_FEATURE_XMM2, X86_FEATURE_XMM }, + { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM4_1, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM4_2, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, + { X86_FEATURE_PCLMULQDQ, X86_FEATURE_XMM2 }, + { X86_FEATURE_SSSE3, X86_FEATURE_XMM2, }, + { X86_FEATURE_F16C, X86_FEATURE_XMM2, }, + { X86_FEATURE_AES, X86_FEATURE_XMM2 }, + { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 }, + { X86_FEATURE_FMA, X86_FEATURE_AVX }, + { X86_FEATURE_AVX2, X86_FEATURE_AVX, }, + { X86_FEATURE_AVX512F, X86_FEATURE_AVX, }, + { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512PF, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512ER, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512CD, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512DQ, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512BW, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL }, + { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL }, + { X86_FEATURE_VAES, X86_FEATURE_AVX512VL }, + { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F }, + { X86_FEATURE_CQM_OCCUP_LLC, X86_FEATURE_CQM_LLC }, + { X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC }, + { X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC }, + { X86_FEATURE_AVX512_BF16, X86_FEATURE_AVX512VL }, {} }; From 018ebca8bd704f18d56f8fff38e2c3d76d7d39fb Mon Sep 17 00:00:00 2001 From: Gayatri Kammela Date: Wed, 17 Jul 2019 16:46:32 -0700 Subject: [PATCH 02/25] x86/cpufeatures: Enable a new AVX512 CPU feature Add a new AVX512 instruction group/feature for enumeration in /proc/cpuinfo: AVX512_VP2INTERSECT. CPUID.(EAX=7,ECX=0):EDX[bit 8] AVX512_VP2INTERSECT Detailed information of CPUID bits for this feature can be found in the Intel Architecture Intsruction Set Extensions Programming Reference document (refer to Table 1-2). A copy of this document is available at https://bugzilla.kernel.org/show_bug.cgi?id=204215. Signed-off-by: Gayatri Kammela Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20190717234632.32673-3-gayatri.kammela@intel.com --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/kernel/cpu/cpuid-deps.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 998c2cc08363..56f53bf3bbbf 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -353,6 +353,7 @@ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */ #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index 630a9f77fb6b..3cbe24ca80ab 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -64,6 +64,7 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_VP2INTERSECT, X86_FEATURE_AVX512VL }, { X86_FEATURE_CQM_OCCUP_LLC, X86_FEATURE_CQM_LLC }, { X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC }, { X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC }, From 69732102426b1c55a257386841fb80ec1f425d32 Mon Sep 17 00:00:00 2001 From: Pingfan Liu Date: Tue, 16 Jul 2019 16:40:24 +0800 Subject: [PATCH 03/25] x86/realmode: Remove trampoline_status There is no reader of trampoline_status, it's only written. It turns out that after commit ce4b1b16502b ("x86/smpboot: Initialize secondary CPU only if master CPU will wait for it"), trampoline_status is not needed any more. Signed-off-by: Pingfan Liu Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/1563266424-3472-1-git-send-email-kernelfans@gmail.com --- arch/x86/include/asm/realmode.h | 1 - arch/x86/kernel/smpboot.c | 5 ----- arch/x86/realmode/rm/header.S | 1 - arch/x86/realmode/rm/trampoline_32.S | 3 --- arch/x86/realmode/rm/trampoline_64.S | 3 --- arch/x86/realmode/rm/trampoline_common.S | 4 ---- 6 files changed, 17 deletions(-) diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index c53682303c9c..09ecc32f6524 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -20,7 +20,6 @@ struct real_mode_header { u32 ro_end; /* SMP trampoline */ u32 trampoline_start; - u32 trampoline_status; u32 trampoline_header; #ifdef CONFIG_X86_64 u32 trampoline_pgd; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index fdbd47ceb84d..497e9b7077c1 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1023,8 +1023,6 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle) static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle, int *cpu0_nmi_registered) { - volatile u32 *trampoline_status = - (volatile u32 *) __va(real_mode_header->trampoline_status); /* start_ip had better be page-aligned! */ unsigned long start_ip = real_mode_header->trampoline_start; @@ -1116,9 +1114,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle, } } - /* mark "stuck" area as not stuck */ - *trampoline_status = 0; - if (x86_platform.legacy.warm_reset) { /* * Cleanup possible dangling ends... diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S index 30b0d30d861a..6363761cc74c 100644 --- a/arch/x86/realmode/rm/header.S +++ b/arch/x86/realmode/rm/header.S @@ -19,7 +19,6 @@ GLOBAL(real_mode_header) .long pa_ro_end /* SMP trampoline */ .long pa_trampoline_start - .long pa_trampoline_status .long pa_trampoline_header #ifdef CONFIG_X86_64 .long pa_trampoline_pgd; diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S index 2dd866c9e21e..1868b158480d 100644 --- a/arch/x86/realmode/rm/trampoline_32.S +++ b/arch/x86/realmode/rm/trampoline_32.S @@ -41,9 +41,6 @@ ENTRY(trampoline_start) movl tr_start, %eax # where we need to go - movl $0xA5A5A5A5, trampoline_status - # write marker for master knows we're running - /* * GDT tables in non default location kernel can be beyond 16MB and * lgdt will not be able to load the address as in real mode default diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S index 24bb7598774e..aee2b45d83b8 100644 --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S @@ -49,9 +49,6 @@ ENTRY(trampoline_start) mov %ax, %es mov %ax, %ss - movl $0xA5A5A5A5, trampoline_status - # write marker for master knows we're running - # Setup stack movl $rm_stack_end, %esp diff --git a/arch/x86/realmode/rm/trampoline_common.S b/arch/x86/realmode/rm/trampoline_common.S index 7c706772ab59..8d8208dcca24 100644 --- a/arch/x86/realmode/rm/trampoline_common.S +++ b/arch/x86/realmode/rm/trampoline_common.S @@ -2,7 +2,3 @@ .section ".rodata","a" .balign 16 tr_idt: .fill 1, 6, 0 - - .bss - .balign 4 -GLOBAL(trampoline_status) .space 4 From 48febc03e6c239d96f46d8b38d91863769fc18c8 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 5 Jul 2019 10:53:18 -0700 Subject: [PATCH 04/25] x86/mpx: Remove selftests Makefile entry MPX is being removed from the kernel due to a lack of support in the toolchain going forward (gcc). This is the smallest possible patch to fix some issues that have been reported around running the MPX selftests. It it would also have been part of any removal series, it is offered first. Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20190705175318.784C233E@viggo.jf.intel.com --- tools/testing/selftests/x86/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index fa07d526fe39..3bc5b744e644 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -11,7 +11,7 @@ CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c) CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh $(CC) trivial_program.c -no-pie) TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \ - check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \ + check_initial_reg_state sigreturn iopl ioperm \ protection_keys test_vdso test_vsyscall mov_ss_trap \ syscall_arg_fault TARGETS_C_32BIT_ONLY := entry_from_vm86 test_syscall_vdso unwind_vdso \ From e28df79ae2dfebf18e08dc66c0948b7950e4368a Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 5 Jul 2019 10:53:20 -0700 Subject: [PATCH 05/25] x86/mpx: Remove selftests themselves MPX is being removed from the kernel due to a lack of support in the toolchain going forward (gcc). Remove the x86 selftests since they have been causing some issues because of their propensity to do some debug-aiding tracepoint mucking. Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20190705175320.6542F8AE@viggo.jf.intel.com --- tools/testing/selftests/x86/mpx-debug.h | 15 - tools/testing/selftests/x86/mpx-dig.c | 497 ------ tools/testing/selftests/x86/mpx-hw.h | 124 -- tools/testing/selftests/x86/mpx-mini-test.c | 1613 ------------------- tools/testing/selftests/x86/mpx-mm.h | 10 - 5 files changed, 2259 deletions(-) delete mode 100644 tools/testing/selftests/x86/mpx-debug.h delete mode 100644 tools/testing/selftests/x86/mpx-dig.c delete mode 100644 tools/testing/selftests/x86/mpx-hw.h delete mode 100644 tools/testing/selftests/x86/mpx-mini-test.c delete mode 100644 tools/testing/selftests/x86/mpx-mm.h diff --git a/tools/testing/selftests/x86/mpx-debug.h b/tools/testing/selftests/x86/mpx-debug.h deleted file mode 100644 index 7546eba7f17a..000000000000 --- a/tools/testing/selftests/x86/mpx-debug.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _MPX_DEBUG_H -#define _MPX_DEBUG_H - -#ifndef DEBUG_LEVEL -#define DEBUG_LEVEL 0 -#endif -#define dprintf_level(level, args...) do { if(level <= DEBUG_LEVEL) printf(args); } while(0) -#define dprintf1(args...) dprintf_level(1, args) -#define dprintf2(args...) dprintf_level(2, args) -#define dprintf3(args...) dprintf_level(3, args) -#define dprintf4(args...) dprintf_level(4, args) -#define dprintf5(args...) dprintf_level(5, args) - -#endif /* _MPX_DEBUG_H */ diff --git a/tools/testing/selftests/x86/mpx-dig.c b/tools/testing/selftests/x86/mpx-dig.c deleted file mode 100644 index 880fbf676968..000000000000 --- a/tools/testing/selftests/x86/mpx-dig.c +++ /dev/null @@ -1,497 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Written by Dave Hansen - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "mpx-debug.h" -#include "mpx-mm.h" -#include "mpx-hw.h" - -unsigned long bounds_dir_global; - -#define mpx_dig_abort() __mpx_dig_abort(__FILE__, __func__, __LINE__) -static void inline __mpx_dig_abort(const char *file, const char *func, int line) -{ - fprintf(stderr, "MPX dig abort @ %s::%d in %s()\n", file, line, func); - printf("MPX dig abort @ %s::%d in %s()\n", file, line, func); - abort(); -} - -/* - * run like this (BDIR finds the probably bounds directory): - * - * BDIR="$(cat /proc/$pid/smaps | grep -B1 2097152 \ - * | head -1 | awk -F- '{print $1}')"; - * ./mpx-dig $pid 0x$BDIR - * - * NOTE: - * assumes that the only 2097152-kb VMA is the bounds dir - */ - -long nr_incore(void *ptr, unsigned long size_bytes) -{ - int i; - long ret = 0; - long vec_len = size_bytes / PAGE_SIZE; - unsigned char *vec = malloc(vec_len); - int incore_ret; - - if (!vec) - mpx_dig_abort(); - - incore_ret = mincore(ptr, size_bytes, vec); - if (incore_ret) { - printf("mincore ret: %d\n", incore_ret); - perror("mincore"); - mpx_dig_abort(); - } - for (i = 0; i < vec_len; i++) - ret += vec[i]; - free(vec); - return ret; -} - -int open_proc(int pid, char *file) -{ - static char buf[100]; - int fd; - - snprintf(&buf[0], sizeof(buf), "/proc/%d/%s", pid, file); - fd = open(&buf[0], O_RDONLY); - if (fd < 0) - perror(buf); - - return fd; -} - -struct vaddr_range { - unsigned long start; - unsigned long end; -}; -struct vaddr_range *ranges; -int nr_ranges_allocated; -int nr_ranges_populated; -int last_range = -1; - -int __pid_load_vaddrs(int pid) -{ - int ret = 0; - int proc_maps_fd = open_proc(pid, "maps"); - char linebuf[10000]; - unsigned long start; - unsigned long end; - char rest[1000]; - FILE *f = fdopen(proc_maps_fd, "r"); - - if (!f) - mpx_dig_abort(); - nr_ranges_populated = 0; - while (!feof(f)) { - char *readret = fgets(linebuf, sizeof(linebuf), f); - int parsed; - - if (readret == NULL) { - if (feof(f)) - break; - mpx_dig_abort(); - } - - parsed = sscanf(linebuf, "%lx-%lx%s", &start, &end, rest); - if (parsed != 3) - mpx_dig_abort(); - - dprintf4("result[%d]: %lx-%lx<->%s\n", parsed, start, end, rest); - if (nr_ranges_populated >= nr_ranges_allocated) { - ret = -E2BIG; - break; - } - ranges[nr_ranges_populated].start = start; - ranges[nr_ranges_populated].end = end; - nr_ranges_populated++; - } - last_range = -1; - fclose(f); - close(proc_maps_fd); - return ret; -} - -int pid_load_vaddrs(int pid) -{ - int ret; - - dprintf2("%s(%d)\n", __func__, pid); - if (!ranges) { - nr_ranges_allocated = 4; - ranges = malloc(nr_ranges_allocated * sizeof(ranges[0])); - dprintf2("%s(%d) allocated %d ranges @ %p\n", __func__, pid, - nr_ranges_allocated, ranges); - assert(ranges != NULL); - } - do { - ret = __pid_load_vaddrs(pid); - if (!ret) - break; - if (ret == -E2BIG) { - dprintf2("%s(%d) need to realloc\n", __func__, pid); - nr_ranges_allocated *= 2; - ranges = realloc(ranges, - nr_ranges_allocated * sizeof(ranges[0])); - dprintf2("%s(%d) allocated %d ranges @ %p\n", __func__, - pid, nr_ranges_allocated, ranges); - assert(ranges != NULL); - dprintf1("reallocating to hold %d ranges\n", nr_ranges_allocated); - } - } while (1); - - dprintf2("%s(%d) done\n", __func__, pid); - - return ret; -} - -static inline int vaddr_in_range(unsigned long vaddr, struct vaddr_range *r) -{ - if (vaddr < r->start) - return 0; - if (vaddr >= r->end) - return 0; - return 1; -} - -static inline int vaddr_mapped_by_range(unsigned long vaddr) -{ - int i; - - if (last_range > 0 && vaddr_in_range(vaddr, &ranges[last_range])) - return 1; - - for (i = 0; i < nr_ranges_populated; i++) { - struct vaddr_range *r = &ranges[i]; - - if (vaddr_in_range(vaddr, r)) - continue; - last_range = i; - return 1; - } - return 0; -} - -const int bt_entry_size_bytes = sizeof(unsigned long) * 4; - -void *read_bounds_table_into_buf(unsigned long table_vaddr) -{ -#ifdef MPX_DIG_STANDALONE - static char bt_buf[MPX_BOUNDS_TABLE_SIZE_BYTES]; - off_t seek_ret = lseek(fd, table_vaddr, SEEK_SET); - if (seek_ret != table_vaddr) - mpx_dig_abort(); - - int read_ret = read(fd, &bt_buf, sizeof(bt_buf)); - if (read_ret != sizeof(bt_buf)) - mpx_dig_abort(); - return &bt_buf; -#else - return (void *)table_vaddr; -#endif -} - -int dump_table(unsigned long table_vaddr, unsigned long base_controlled_vaddr, - unsigned long bde_vaddr) -{ - unsigned long offset_inside_bt; - int nr_entries = 0; - int do_abort = 0; - char *bt_buf; - - dprintf3("%s() base_controlled_vaddr: 0x%012lx bde_vaddr: 0x%012lx\n", - __func__, base_controlled_vaddr, bde_vaddr); - - bt_buf = read_bounds_table_into_buf(table_vaddr); - - dprintf4("%s() read done\n", __func__); - - for (offset_inside_bt = 0; - offset_inside_bt < MPX_BOUNDS_TABLE_SIZE_BYTES; - offset_inside_bt += bt_entry_size_bytes) { - unsigned long bt_entry_index; - unsigned long bt_entry_controls; - unsigned long this_bt_entry_for_vaddr; - unsigned long *bt_entry_buf; - int i; - - dprintf4("%s() offset_inside_bt: 0x%lx of 0x%llx\n", __func__, - offset_inside_bt, MPX_BOUNDS_TABLE_SIZE_BYTES); - bt_entry_buf = (void *)&bt_buf[offset_inside_bt]; - if (!bt_buf) { - printf("null bt_buf\n"); - mpx_dig_abort(); - } - if (!bt_entry_buf) { - printf("null bt_entry_buf\n"); - mpx_dig_abort(); - } - dprintf4("%s() reading *bt_entry_buf @ %p\n", __func__, - bt_entry_buf); - if (!bt_entry_buf[0] && - !bt_entry_buf[1] && - !bt_entry_buf[2] && - !bt_entry_buf[3]) - continue; - - nr_entries++; - - bt_entry_index = offset_inside_bt/bt_entry_size_bytes; - bt_entry_controls = sizeof(void *); - this_bt_entry_for_vaddr = - base_controlled_vaddr + bt_entry_index*bt_entry_controls; - /* - * We sign extend vaddr bits 48->63 which effectively - * creates a hole in the virtual address space. - * This calculation corrects for the hole. - */ - if (this_bt_entry_for_vaddr > 0x00007fffffffffffUL) - this_bt_entry_for_vaddr |= 0xffff800000000000; - - if (!vaddr_mapped_by_range(this_bt_entry_for_vaddr)) { - printf("bt_entry_buf: %p\n", bt_entry_buf); - printf("there is a bte for %lx but no mapping\n", - this_bt_entry_for_vaddr); - printf(" bde vaddr: %016lx\n", bde_vaddr); - printf("base_controlled_vaddr: %016lx\n", base_controlled_vaddr); - printf(" table_vaddr: %016lx\n", table_vaddr); - printf(" entry vaddr: %016lx @ offset %lx\n", - table_vaddr + offset_inside_bt, offset_inside_bt); - do_abort = 1; - mpx_dig_abort(); - } - if (DEBUG_LEVEL < 4) - continue; - - printf("table entry[%lx]: ", offset_inside_bt); - for (i = 0; i < bt_entry_size_bytes; i += sizeof(unsigned long)) - printf("0x%016lx ", bt_entry_buf[i]); - printf("\n"); - } - if (do_abort) - mpx_dig_abort(); - dprintf4("%s() done\n", __func__); - return nr_entries; -} - -int search_bd_buf(char *buf, int len_bytes, unsigned long bd_offset_bytes, - int *nr_populated_bdes) -{ - unsigned long i; - int total_entries = 0; - - dprintf3("%s(%p, %x, %lx, ...) buf end: %p\n", __func__, buf, - len_bytes, bd_offset_bytes, buf + len_bytes); - - for (i = 0; i < len_bytes; i += sizeof(unsigned long)) { - unsigned long bd_index = (bd_offset_bytes + i) / sizeof(unsigned long); - unsigned long *bounds_dir_entry_ptr = (unsigned long *)&buf[i]; - unsigned long bounds_dir_entry; - unsigned long bd_for_vaddr; - unsigned long bt_start; - unsigned long bt_tail; - int nr_entries; - - dprintf4("%s() loop i: %ld bounds_dir_entry_ptr: %p\n", __func__, i, - bounds_dir_entry_ptr); - - bounds_dir_entry = *bounds_dir_entry_ptr; - if (!bounds_dir_entry) { - dprintf4("no bounds dir at index 0x%lx / 0x%lx " - "start at offset:%lx %lx\n", bd_index, bd_index, - bd_offset_bytes, i); - continue; - } - dprintf3("found bounds_dir_entry: 0x%lx @ " - "index 0x%lx buf ptr: %p\n", bounds_dir_entry, i, - &buf[i]); - /* mask off the enable bit: */ - bounds_dir_entry &= ~0x1; - (*nr_populated_bdes)++; - dprintf4("nr_populated_bdes: %p\n", nr_populated_bdes); - dprintf4("*nr_populated_bdes: %d\n", *nr_populated_bdes); - - bt_start = bounds_dir_entry; - bt_tail = bounds_dir_entry + MPX_BOUNDS_TABLE_SIZE_BYTES - 1; - if (!vaddr_mapped_by_range(bt_start)) { - printf("bounds directory 0x%lx points to nowhere\n", - bounds_dir_entry); - mpx_dig_abort(); - } - if (!vaddr_mapped_by_range(bt_tail)) { - printf("bounds directory end 0x%lx points to nowhere\n", - bt_tail); - mpx_dig_abort(); - } - /* - * Each bounds directory entry controls 1MB of virtual address - * space. This variable is the virtual address in the process - * of the beginning of the area controlled by this bounds_dir. - */ - bd_for_vaddr = bd_index * (1UL<<20); - - nr_entries = dump_table(bounds_dir_entry, bd_for_vaddr, - bounds_dir_global+bd_offset_bytes+i); - total_entries += nr_entries; - dprintf5("dir entry[%4ld @ %p]: 0x%lx %6d entries " - "total this buf: %7d bd_for_vaddrs: 0x%lx -> 0x%lx\n", - bd_index, buf+i, - bounds_dir_entry, nr_entries, total_entries, - bd_for_vaddr, bd_for_vaddr + (1UL<<20)); - } - dprintf3("%s(%p, %x, %lx, ...) done\n", __func__, buf, len_bytes, - bd_offset_bytes); - return total_entries; -} - -int proc_pid_mem_fd = -1; - -void *fill_bounds_dir_buf_other(long byte_offset_inside_bounds_dir, - long buffer_size_bytes, void *buffer) -{ - unsigned long seekto = bounds_dir_global + byte_offset_inside_bounds_dir; - int read_ret; - off_t seek_ret = lseek(proc_pid_mem_fd, seekto, SEEK_SET); - - if (seek_ret != seekto) - mpx_dig_abort(); - - read_ret = read(proc_pid_mem_fd, buffer, buffer_size_bytes); - /* there shouldn't practically be short reads of /proc/$pid/mem */ - if (read_ret != buffer_size_bytes) - mpx_dig_abort(); - - return buffer; -} -void *fill_bounds_dir_buf_self(long byte_offset_inside_bounds_dir, - long buffer_size_bytes, void *buffer) - -{ - unsigned char vec[buffer_size_bytes / PAGE_SIZE]; - char *dig_bounds_dir_ptr = - (void *)(bounds_dir_global + byte_offset_inside_bounds_dir); - /* - * use mincore() to quickly find the areas of the bounds directory - * that have memory and thus will be worth scanning. - */ - int incore_ret; - - int incore = 0; - int i; - - dprintf4("%s() dig_bounds_dir_ptr: %p\n", __func__, dig_bounds_dir_ptr); - - incore_ret = mincore(dig_bounds_dir_ptr, buffer_size_bytes, &vec[0]); - if (incore_ret) { - printf("mincore ret: %d\n", incore_ret); - perror("mincore"); - mpx_dig_abort(); - } - for (i = 0; i < sizeof(vec); i++) - incore += vec[i]; - dprintf4("%s() total incore: %d\n", __func__, incore); - if (!incore) - return NULL; - dprintf3("%s() total incore: %d\n", __func__, incore); - return dig_bounds_dir_ptr; -} - -int inspect_pid(int pid) -{ - static int dig_nr; - long offset_inside_bounds_dir; - char bounds_dir_buf[sizeof(unsigned long) * (1UL << 15)]; - char *dig_bounds_dir_ptr; - int total_entries = 0; - int nr_populated_bdes = 0; - int inspect_self; - - if (getpid() == pid) { - dprintf4("inspecting self\n"); - inspect_self = 1; - } else { - dprintf4("inspecting pid %d\n", pid); - mpx_dig_abort(); - } - - for (offset_inside_bounds_dir = 0; - offset_inside_bounds_dir < MPX_BOUNDS_TABLE_SIZE_BYTES; - offset_inside_bounds_dir += sizeof(bounds_dir_buf)) { - static int bufs_skipped; - int this_entries; - - if (inspect_self) { - dig_bounds_dir_ptr = - fill_bounds_dir_buf_self(offset_inside_bounds_dir, - sizeof(bounds_dir_buf), - &bounds_dir_buf[0]); - } else { - dig_bounds_dir_ptr = - fill_bounds_dir_buf_other(offset_inside_bounds_dir, - sizeof(bounds_dir_buf), - &bounds_dir_buf[0]); - } - if (!dig_bounds_dir_ptr) { - bufs_skipped++; - continue; - } - this_entries = search_bd_buf(dig_bounds_dir_ptr, - sizeof(bounds_dir_buf), - offset_inside_bounds_dir, - &nr_populated_bdes); - total_entries += this_entries; - } - printf("mpx dig (%3d) complete, SUCCESS (%8d / %4d)\n", ++dig_nr, - total_entries, nr_populated_bdes); - return total_entries + nr_populated_bdes; -} - -#ifdef MPX_DIG_REMOTE -int main(int argc, char **argv) -{ - int err; - char *c; - unsigned long bounds_dir_entry; - int pid; - - printf("mpx-dig starting...\n"); - err = sscanf(argv[1], "%d", &pid); - printf("parsing: '%s', err: %d\n", argv[1], err); - if (err != 1) - mpx_dig_abort(); - - err = sscanf(argv[2], "%lx", &bounds_dir_global); - printf("parsing: '%s': %d\n", argv[2], err); - if (err != 1) - mpx_dig_abort(); - - proc_pid_mem_fd = open_proc(pid, "mem"); - if (proc_pid_mem_fd < 0) - mpx_dig_abort(); - - inspect_pid(pid); - return 0; -} -#endif - -long inspect_me(struct mpx_bounds_dir *bounds_dir) -{ - int pid = getpid(); - - pid_load_vaddrs(pid); - bounds_dir_global = (unsigned long)bounds_dir; - dprintf4("enter %s() bounds dir: %p\n", __func__, bounds_dir); - return inspect_pid(pid); -} diff --git a/tools/testing/selftests/x86/mpx-hw.h b/tools/testing/selftests/x86/mpx-hw.h deleted file mode 100644 index d1b61ab870f8..000000000000 --- a/tools/testing/selftests/x86/mpx-hw.h +++ /dev/null @@ -1,124 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _MPX_HW_H -#define _MPX_HW_H - -#include - -/* Describe the MPX Hardware Layout in here */ - -#define NR_MPX_BOUNDS_REGISTERS 4 - -#ifdef __i386__ - -#define MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES 16 /* 4 * 32-bits */ -#define MPX_BOUNDS_TABLE_SIZE_BYTES (1ULL << 14) /* 16k */ -#define MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES 4 -#define MPX_BOUNDS_DIR_SIZE_BYTES (1ULL << 22) /* 4MB */ - -#define MPX_BOUNDS_TABLE_BOTTOM_BIT 2 -#define MPX_BOUNDS_TABLE_TOP_BIT 11 -#define MPX_BOUNDS_DIR_BOTTOM_BIT 12 -#define MPX_BOUNDS_DIR_TOP_BIT 31 - -#else - -/* - * Linear Address of "pointer" (LAp) - * 0 -> 2: ignored - * 3 -> 19: index in to bounds table - * 20 -> 47: index in to bounds directory - * 48 -> 63: ignored - */ - -#define MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES 32 -#define MPX_BOUNDS_TABLE_SIZE_BYTES (1ULL << 22) /* 4MB */ -#define MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES 8 -#define MPX_BOUNDS_DIR_SIZE_BYTES (1ULL << 31) /* 2GB */ - -#define MPX_BOUNDS_TABLE_BOTTOM_BIT 3 -#define MPX_BOUNDS_TABLE_TOP_BIT 19 -#define MPX_BOUNDS_DIR_BOTTOM_BIT 20 -#define MPX_BOUNDS_DIR_TOP_BIT 47 - -#endif - -#define MPX_BOUNDS_DIR_NR_ENTRIES \ - (MPX_BOUNDS_DIR_SIZE_BYTES/MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES) -#define MPX_BOUNDS_TABLE_NR_ENTRIES \ - (MPX_BOUNDS_TABLE_SIZE_BYTES/MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES) - -#define MPX_BOUNDS_TABLE_ENTRY_VALID_BIT 0x1 - -struct mpx_bd_entry { - union { - char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES]; - void *contents[0]; - }; -} __attribute__((packed)); - -struct mpx_bt_entry { - union { - char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES]; - unsigned long contents[0]; - }; -} __attribute__((packed)); - -struct mpx_bounds_dir { - struct mpx_bd_entry entries[MPX_BOUNDS_DIR_NR_ENTRIES]; -} __attribute__((packed)); - -struct mpx_bounds_table { - struct mpx_bt_entry entries[MPX_BOUNDS_TABLE_NR_ENTRIES]; -} __attribute__((packed)); - -static inline unsigned long GET_BITS(unsigned long val, int bottombit, int topbit) -{ - int total_nr_bits = topbit - bottombit; - unsigned long mask = (1UL << total_nr_bits)-1; - return (val >> bottombit) & mask; -} - -static inline unsigned long __vaddr_bounds_table_index(void *vaddr) -{ - return GET_BITS((unsigned long)vaddr, MPX_BOUNDS_TABLE_BOTTOM_BIT, - MPX_BOUNDS_TABLE_TOP_BIT); -} - -static inline unsigned long __vaddr_bounds_directory_index(void *vaddr) -{ - return GET_BITS((unsigned long)vaddr, MPX_BOUNDS_DIR_BOTTOM_BIT, - MPX_BOUNDS_DIR_TOP_BIT); -} - -static inline struct mpx_bd_entry *mpx_vaddr_to_bd_entry(void *vaddr, - struct mpx_bounds_dir *bounds_dir) -{ - unsigned long index = __vaddr_bounds_directory_index(vaddr); - return &bounds_dir->entries[index]; -} - -static inline int bd_entry_valid(struct mpx_bd_entry *bounds_dir_entry) -{ - unsigned long __bd_entry = (unsigned long)bounds_dir_entry->contents; - return (__bd_entry & MPX_BOUNDS_TABLE_ENTRY_VALID_BIT); -} - -static inline struct mpx_bounds_table * -__bd_entry_to_bounds_table(struct mpx_bd_entry *bounds_dir_entry) -{ - unsigned long __bd_entry = (unsigned long)bounds_dir_entry->contents; - assert(__bd_entry & MPX_BOUNDS_TABLE_ENTRY_VALID_BIT); - __bd_entry &= ~MPX_BOUNDS_TABLE_ENTRY_VALID_BIT; - return (struct mpx_bounds_table *)__bd_entry; -} - -static inline struct mpx_bt_entry * -mpx_vaddr_to_bt_entry(void *vaddr, struct mpx_bounds_dir *bounds_dir) -{ - struct mpx_bd_entry *bde = mpx_vaddr_to_bd_entry(vaddr, bounds_dir); - struct mpx_bounds_table *bt = __bd_entry_to_bounds_table(bde); - unsigned long index = __vaddr_bounds_table_index(vaddr); - return &bt->entries[index]; -} - -#endif /* _MPX_HW_H */ diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c deleted file mode 100644 index 23ddd453f362..000000000000 --- a/tools/testing/selftests/x86/mpx-mini-test.c +++ /dev/null @@ -1,1613 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * mpx-mini-test.c: routines to test Intel MPX (Memory Protection eXtentions) - * - * Written by: - * "Ren, Qiaowei" - * "Wei, Gang" - * "Hansen, Dave" - */ - -/* - * 2014-12-05: Dave Hansen: fixed all of the compiler warnings, and made sure - * it works on 32-bit. - */ - -int inspect_every_this_many_mallocs = 100; -int zap_all_every_this_many_mallocs = 1000; - -#define _GNU_SOURCE -#define _LARGEFILE64_SOURCE - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "mpx-hw.h" -#include "mpx-debug.h" -#include "mpx-mm.h" - -#ifndef __always_inline -#define __always_inline inline __attribute__((always_inline) -#endif - -#ifndef TEST_DURATION_SECS -#define TEST_DURATION_SECS 3 -#endif - -void write_int_to(char *prefix, char *file, int int_to_write) -{ - char buf[100]; - int fd = open(file, O_RDWR); - int len; - int ret; - - assert(fd >= 0); - len = snprintf(buf, sizeof(buf), "%s%d", prefix, int_to_write); - assert(len >= 0); - assert(len < sizeof(buf)); - ret = write(fd, buf, len); - assert(ret == len); - ret = close(fd); - assert(!ret); -} - -void write_pid_to(char *prefix, char *file) -{ - write_int_to(prefix, file, getpid()); -} - -void trace_me(void) -{ -/* tracing events dir */ -#define TED "/sys/kernel/debug/tracing/events/" -/* - write_pid_to("common_pid=", TED "signal/filter"); - write_pid_to("common_pid=", TED "exceptions/filter"); - write_int_to("", TED "signal/enable", 1); - write_int_to("", TED "exceptions/enable", 1); -*/ - write_pid_to("", "/sys/kernel/debug/tracing/set_ftrace_pid"); - write_int_to("", "/sys/kernel/debug/tracing/trace", 0); -} - -#define test_failed() __test_failed(__FILE__, __LINE__) -static void __test_failed(char *f, int l) -{ - fprintf(stderr, "abort @ %s::%d\n", f, l); - abort(); -} - -/* Error Printf */ -#define eprintf(args...) fprintf(stderr, args) - -#ifdef __i386__ - -/* i386 directory size is 4MB */ -#define REG_IP_IDX REG_EIP -#define REX_PREFIX - -#define XSAVE_OFFSET_IN_FPMEM sizeof(struct _libc_fpstate) - -/* - * __cpuid() is from the Linux Kernel: - */ -static inline void __cpuid(unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) -{ - /* ecx is often an input as well as an output. */ - asm volatile( - "push %%ebx;" - "cpuid;" - "mov %%ebx, %1;" - "pop %%ebx" - : "=a" (*eax), - "=g" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (*eax), "2" (*ecx)); -} - -#else /* __i386__ */ - -#define REG_IP_IDX REG_RIP -#define REX_PREFIX "0x48, " - -#define XSAVE_OFFSET_IN_FPMEM 0 - -/* - * __cpuid() is from the Linux Kernel: - */ -static inline void __cpuid(unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) -{ - /* ecx is often an input as well as an output. */ - asm volatile( - "cpuid;" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (*eax), "2" (*ecx)); -} - -#endif /* !__i386__ */ - -struct xsave_hdr_struct { - uint64_t xstate_bv; - uint64_t reserved1[2]; - uint64_t reserved2[5]; -} __attribute__((packed)); - -struct bndregs_struct { - uint64_t bndregs[8]; -} __attribute__((packed)); - -struct bndcsr_struct { - uint64_t cfg_reg_u; - uint64_t status_reg; -} __attribute__((packed)); - -struct xsave_struct { - uint8_t fpu_sse[512]; - struct xsave_hdr_struct xsave_hdr; - uint8_t ymm[256]; - uint8_t lwp[128]; - struct bndregs_struct bndregs; - struct bndcsr_struct bndcsr; -} __attribute__((packed)); - -uint8_t __attribute__((__aligned__(64))) buffer[4096]; -struct xsave_struct *xsave_buf = (struct xsave_struct *)buffer; - -uint8_t __attribute__((__aligned__(64))) test_buffer[4096]; -struct xsave_struct *xsave_test_buf = (struct xsave_struct *)test_buffer; - -uint64_t num_bnd_chk; - -static __always_inline void xrstor_state(struct xsave_struct *fx, uint64_t mask) -{ - uint32_t lmask = mask; - uint32_t hmask = mask >> 32; - - asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) - : "memory"); -} - -static __always_inline void xsave_state_1(void *_fx, uint64_t mask) -{ - uint32_t lmask = mask; - uint32_t hmask = mask >> 32; - unsigned char *fx = _fx; - - asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t" - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) - : "memory"); -} - -static inline uint64_t xgetbv(uint32_t index) -{ - uint32_t eax, edx; - - asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ - : "=a" (eax), "=d" (edx) - : "c" (index)); - return eax + ((uint64_t)edx << 32); -} - -static uint64_t read_mpx_status_sig(ucontext_t *uctxt) -{ - memset(buffer, 0, sizeof(buffer)); - memcpy(buffer, - (uint8_t *)uctxt->uc_mcontext.fpregs + XSAVE_OFFSET_IN_FPMEM, - sizeof(struct xsave_struct)); - - return xsave_buf->bndcsr.status_reg; -} - -#include - -static uint8_t *get_next_inst_ip(uint8_t *addr) -{ - uint8_t *ip = addr; - uint8_t sib; - uint8_t rm; - uint8_t mod; - uint8_t base; - uint8_t modrm; - - /* determine the prefix. */ - switch(*ip) { - case 0xf2: - case 0xf3: - case 0x66: - ip++; - break; - } - - /* look for rex prefix */ - if ((*ip & 0x40) == 0x40) - ip++; - - /* Make sure we have a MPX instruction. */ - if (*ip++ != 0x0f) - return addr; - - /* Skip the op code byte. */ - ip++; - - /* Get the modrm byte. */ - modrm = *ip++; - - /* Break it down into parts. */ - rm = modrm & 7; - mod = (modrm >> 6); - - /* Init the parts of the address mode. */ - base = 8; - - /* Is it a mem mode? */ - if (mod != 3) { - /* look for scaled indexed addressing */ - if (rm == 4) { - /* SIB addressing */ - sib = *ip++; - base = sib & 7; - switch (mod) { - case 0: - if (base == 5) - ip += 4; - break; - - case 1: - ip++; - break; - - case 2: - ip += 4; - break; - } - - } else { - /* MODRM addressing */ - switch (mod) { - case 0: - /* DISP32 addressing, no base */ - if (rm == 5) - ip += 4; - break; - - case 1: - ip++; - break; - - case 2: - ip += 4; - break; - } - } - } - return ip; -} - -#ifdef si_lower -static inline void *__si_bounds_lower(siginfo_t *si) -{ - return si->si_lower; -} - -static inline void *__si_bounds_upper(siginfo_t *si) -{ - return si->si_upper; -} -#else - -/* - * This deals with old version of _sigfault in some distros: - * - -old _sigfault: - struct { - void *si_addr; - } _sigfault; - -new _sigfault: - struct { - void __user *_addr; - int _trapno; - short _addr_lsb; - union { - struct { - void __user *_lower; - void __user *_upper; - } _addr_bnd; - __u32 _pkey; - }; - } _sigfault; - * - */ - -static inline void **__si_bounds_hack(siginfo_t *si) -{ - void *sigfault = &si->_sifields._sigfault; - void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault); - int *trapno = (int*)end_sigfault; - /* skip _trapno and _addr_lsb */ - void **__si_lower = (void**)(trapno + 2); - - return __si_lower; -} - -static inline void *__si_bounds_lower(siginfo_t *si) -{ - return *__si_bounds_hack(si); -} - -static inline void *__si_bounds_upper(siginfo_t *si) -{ - return *(__si_bounds_hack(si) + 1); -} -#endif - -static int br_count; -static int expected_bnd_index = -1; -uint64_t shadow_plb[NR_MPX_BOUNDS_REGISTERS][2]; /* shadow MPX bound registers */ -unsigned long shadow_map[NR_MPX_BOUNDS_REGISTERS]; - -/* Failed address bound checks: */ -#ifndef SEGV_BNDERR -# define SEGV_BNDERR 3 -#endif - -/* - * The kernel is supposed to provide some information about the bounds - * exception in the siginfo. It should match what we have in the bounds - * registers that we are checking against. Just check against the shadow copy - * since it is easily available, and we also check that *it* matches the real - * registers. - */ -void check_siginfo_vs_shadow(siginfo_t* si) -{ - int siginfo_ok = 1; - void *shadow_lower = (void *)(unsigned long)shadow_plb[expected_bnd_index][0]; - void *shadow_upper = (void *)(unsigned long)shadow_plb[expected_bnd_index][1]; - - if ((expected_bnd_index < 0) || - (expected_bnd_index >= NR_MPX_BOUNDS_REGISTERS)) { - fprintf(stderr, "ERROR: invalid expected_bnd_index: %d\n", - expected_bnd_index); - exit(6); - } - if (__si_bounds_lower(si) != shadow_lower) - siginfo_ok = 0; - if (__si_bounds_upper(si) != shadow_upper) - siginfo_ok = 0; - - if (!siginfo_ok) { - fprintf(stderr, "ERROR: siginfo bounds do not match " - "shadow bounds for register %d\n", expected_bnd_index); - exit(7); - } -} - -void handler(int signum, siginfo_t *si, void *vucontext) -{ - int i; - ucontext_t *uctxt = vucontext; - int trapno; - unsigned long ip; - - dprintf1("entered signal handler\n"); - - trapno = uctxt->uc_mcontext.gregs[REG_TRAPNO]; - ip = uctxt->uc_mcontext.gregs[REG_IP_IDX]; - - if (trapno == 5) { - typeof(si->si_addr) *si_addr_ptr = &si->si_addr; - uint64_t status = read_mpx_status_sig(uctxt); - uint64_t br_reason = status & 0x3; - - br_count++; - dprintf1("#BR 0x%jx (total seen: %d)\n", status, br_count); - - dprintf2("Saw a #BR! status 0x%jx at %016lx br_reason: %jx\n", - status, ip, br_reason); - dprintf2("si_signo: %d\n", si->si_signo); - dprintf2(" signum: %d\n", signum); - dprintf2("info->si_code == SEGV_BNDERR: %d\n", - (si->si_code == SEGV_BNDERR)); - dprintf2("info->si_code: %d\n", si->si_code); - dprintf2("info->si_lower: %p\n", __si_bounds_lower(si)); - dprintf2("info->si_upper: %p\n", __si_bounds_upper(si)); - - for (i = 0; i < 8; i++) - dprintf3("[%d]: %p\n", i, si_addr_ptr[i]); - switch (br_reason) { - case 0: /* traditional BR */ - fprintf(stderr, - "Undefined status with bound exception:%jx\n", - status); - exit(5); - case 1: /* #BR MPX bounds exception */ - /* these are normal and we expect to see them */ - - check_siginfo_vs_shadow(si); - - dprintf1("bounds exception (normal): status 0x%jx at %p si_addr: %p\n", - status, (void *)ip, si->si_addr); - num_bnd_chk++; - uctxt->uc_mcontext.gregs[REG_IP_IDX] = - (greg_t)get_next_inst_ip((uint8_t *)ip); - break; - case 2: - fprintf(stderr, "#BR status == 2, missing bounds table," - "kernel should have handled!!\n"); - exit(4); - break; - default: - fprintf(stderr, "bound check error: status 0x%jx at %p\n", - status, (void *)ip); - num_bnd_chk++; - uctxt->uc_mcontext.gregs[REG_IP_IDX] = - (greg_t)get_next_inst_ip((uint8_t *)ip); - fprintf(stderr, "bound check error: si_addr %p\n", si->si_addr); - exit(3); - } - } else if (trapno == 14) { - eprintf("ERROR: In signal handler, page fault, trapno = %d, ip = %016lx\n", - trapno, ip); - eprintf("si_addr %p\n", si->si_addr); - eprintf("REG_ERR: %lx\n", (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]); - test_failed(); - } else { - eprintf("unexpected trap %d! at 0x%lx\n", trapno, ip); - eprintf("si_addr %p\n", si->si_addr); - eprintf("REG_ERR: %lx\n", (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]); - test_failed(); - } -} - -static inline void cpuid_count(unsigned int op, int count, - unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) -{ - *eax = op; - *ecx = count; - __cpuid(eax, ebx, ecx, edx); -} - -#define XSTATE_CPUID 0x0000000d - -/* - * List of XSAVE features Linux knows about: - */ -enum xfeature_bit { - XSTATE_BIT_FP, - XSTATE_BIT_SSE, - XSTATE_BIT_YMM, - XSTATE_BIT_BNDREGS, - XSTATE_BIT_BNDCSR, - XSTATE_BIT_OPMASK, - XSTATE_BIT_ZMM_Hi256, - XSTATE_BIT_Hi16_ZMM, - - XFEATURES_NR_MAX, -}; - -#define XSTATE_FP (1 << XSTATE_BIT_FP) -#define XSTATE_SSE (1 << XSTATE_BIT_SSE) -#define XSTATE_YMM (1 << XSTATE_BIT_YMM) -#define XSTATE_BNDREGS (1 << XSTATE_BIT_BNDREGS) -#define XSTATE_BNDCSR (1 << XSTATE_BIT_BNDCSR) -#define XSTATE_OPMASK (1 << XSTATE_BIT_OPMASK) -#define XSTATE_ZMM_Hi256 (1 << XSTATE_BIT_ZMM_Hi256) -#define XSTATE_Hi16_ZMM (1 << XSTATE_BIT_Hi16_ZMM) - -#define MPX_XSTATES (XSTATE_BNDREGS | XSTATE_BNDCSR) /* 0x18 */ - -bool one_bit(unsigned int x, int bit) -{ - return !!(x & (1<xsave_hdr.xstate_bv = 0x10; - xsave_buf->bndcsr.cfg_reg_u = (unsigned long)l1base | 1; - xsave_buf->bndcsr.status_reg = 0; - - dprintf2("bf xrstor\n"); - dprintf2("xsave cndcsr: status %jx, configu %jx\n", - xsave_buf->bndcsr.status_reg, xsave_buf->bndcsr.cfg_reg_u); - xrstor_state(xsave_buf, 0x18); - dprintf2("after xrstor\n"); - - xsave_state_1(xsave_buf, 0x18); - - dprintf1("xsave bndcsr: status %jx, configu %jx\n", - xsave_buf->bndcsr.status_reg, xsave_buf->bndcsr.cfg_reg_u); -} - -#include - -struct mpx_bounds_dir *bounds_dir_ptr; - -unsigned long __bd_incore(const char *func, int line) -{ - unsigned long ret = nr_incore(bounds_dir_ptr, MPX_BOUNDS_DIR_SIZE_BYTES); - return ret; -} -#define bd_incore() __bd_incore(__func__, __LINE__) - -void check_clear(void *ptr, unsigned long sz) -{ - unsigned long *i; - - for (i = ptr; (void *)i < ptr + sz; i++) { - if (*i) { - dprintf1("%p is NOT clear at %p\n", ptr, i); - assert(0); - } - } - dprintf1("%p is clear for %lx\n", ptr, sz); -} - -void check_clear_bd(void) -{ - check_clear(bounds_dir_ptr, 2UL << 30); -} - -#define USE_MALLOC_FOR_BOUNDS_DIR 1 -bool process_specific_init(void) -{ - unsigned long size; - unsigned long *dir; - /* Guarantee we have the space to align it, add padding: */ - unsigned long pad = getpagesize(); - - size = 2UL << 30; /* 2GB */ - if (sizeof(unsigned long) == 4) - size = 4UL << 20; /* 4MB */ - dprintf1("trying to allocate %ld MB bounds directory\n", (size >> 20)); - - if (USE_MALLOC_FOR_BOUNDS_DIR) { - unsigned long _dir; - - dir = malloc(size + pad); - assert(dir); - _dir = (unsigned long)dir; - _dir += 0xfffUL; - _dir &= ~0xfffUL; - dir = (void *)_dir; - } else { - /* - * This makes debugging easier because the address - * calculations are simpler: - */ - dir = mmap((void *)0x200000000000, size + pad, - PROT_READ|PROT_WRITE, - MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); - if (dir == (void *)-1) { - perror("unable to allocate bounds directory"); - abort(); - } - check_clear(dir, size); - } - bounds_dir_ptr = (void *)dir; - madvise(bounds_dir_ptr, size, MADV_NOHUGEPAGE); - bd_incore(); - dprintf1("bounds directory: 0x%p -> 0x%p\n", bounds_dir_ptr, - (char *)bounds_dir_ptr + size); - check_clear(dir, size); - enable_mpx(dir); - check_clear(dir, size); - if (prctl(43, 0, 0, 0, 0)) { - printf("no MPX support\n"); - abort(); - return false; - } - return true; -} - -bool process_specific_finish(void) -{ - if (prctl(44)) { - printf("no MPX support\n"); - return false; - } - return true; -} - -void setup_handler() -{ - int r, rs; - struct sigaction newact; - struct sigaction oldact; - - /* #BR is mapped to sigsegv */ - int signum = SIGSEGV; - - newact.sa_handler = 0; /* void(*)(int)*/ - newact.sa_sigaction = handler; /* void (*)(int, siginfo_t*, void *) */ - - /*sigset_t - signals to block while in the handler */ - /* get the old signal mask. */ - rs = sigprocmask(SIG_SETMASK, 0, &newact.sa_mask); - assert(rs == 0); - - /* call sa_sigaction, not sa_handler*/ - newact.sa_flags = SA_SIGINFO; - - newact.sa_restorer = 0; /* void(*)(), obsolete */ - r = sigaction(signum, &newact, &oldact); - assert(r == 0); -} - -void mpx_prepare(void) -{ - dprintf2("%s()\n", __func__); - setup_handler(); - process_specific_init(); -} - -void mpx_cleanup(void) -{ - printf("%s(): %jd BRs. bye...\n", __func__, num_bnd_chk); - process_specific_finish(); -} - -/*-------------- the following is test case ---------------*/ -#include -#include -#include -#include -#include - -uint64_t num_lower_brs; -uint64_t num_upper_brs; - -#define MPX_CONFIG_OFFSET 1024 -#define MPX_BOUNDS_OFFSET 960 -#define MPX_HEADER_OFFSET 512 -#define MAX_ADDR_TESTED (1<<28) -#define TEST_ROUNDS 100 - -/* - 0F 1A /r BNDLDX-Load - 0F 1B /r BNDSTX-Store Extended Bounds Using Address Translation - 66 0F 1A /r BNDMOV bnd1, bnd2/m128 - 66 0F 1B /r BNDMOV bnd1/m128, bnd2 - F2 0F 1A /r BNDCU bnd, r/m64 - F2 0F 1B /r BNDCN bnd, r/m64 - F3 0F 1A /r BNDCL bnd, r/m64 - F3 0F 1B /r BNDMK bnd, m64 -*/ - -static __always_inline void xsave_state(void *_fx, uint64_t mask) -{ - uint32_t lmask = mask; - uint32_t hmask = mask >> 32; - unsigned char *fx = _fx; - - asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t" - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) - : "memory"); -} - -static __always_inline void mpx_clear_bnd0(void) -{ - long size = 0; - void *ptr = NULL; - /* F3 0F 1B /r BNDMK bnd, m64 */ - /* f3 0f 1b 04 11 bndmk (%rcx,%rdx,1),%bnd0 */ - asm volatile(".byte 0xf3,0x0f,0x1b,0x04,0x11\n\t" - : : "c" (ptr), "d" (size-1) - : "memory"); -} - -static __always_inline void mpx_make_bound_helper(unsigned long ptr, - unsigned long size) -{ - /* F3 0F 1B /r BNDMK bnd, m64 */ - /* f3 0f 1b 04 11 bndmk (%rcx,%rdx,1),%bnd0 */ - asm volatile(".byte 0xf3,0x0f,0x1b,0x04,0x11\n\t" - : : "c" (ptr), "d" (size-1) - : "memory"); -} - -static __always_inline void mpx_check_lowerbound_helper(unsigned long ptr) -{ - /* F3 0F 1A /r NDCL bnd, r/m64 */ - /* f3 0f 1a 01 bndcl (%rcx),%bnd0 */ - asm volatile(".byte 0xf3,0x0f,0x1a,0x01\n\t" - : : "c" (ptr) - : "memory"); -} - -static __always_inline void mpx_check_upperbound_helper(unsigned long ptr) -{ - /* F2 0F 1A /r BNDCU bnd, r/m64 */ - /* f2 0f 1a 01 bndcu (%rcx),%bnd0 */ - asm volatile(".byte 0xf2,0x0f,0x1a,0x01\n\t" - : : "c" (ptr) - : "memory"); -} - -static __always_inline void mpx_movbndreg_helper() -{ - /* 66 0F 1B /r BNDMOV bnd1/m128, bnd2 */ - /* 66 0f 1b c2 bndmov %bnd0,%bnd2 */ - - asm volatile(".byte 0x66,0x0f,0x1b,0xc2\n\t"); -} - -static __always_inline void mpx_movbnd2mem_helper(uint8_t *mem) -{ - /* 66 0F 1B /r BNDMOV bnd1/m128, bnd2 */ - /* 66 0f 1b 01 bndmov %bnd0,(%rcx) */ - asm volatile(".byte 0x66,0x0f,0x1b,0x01\n\t" - : : "c" (mem) - : "memory"); -} - -static __always_inline void mpx_movbnd_from_mem_helper(uint8_t *mem) -{ - /* 66 0F 1A /r BNDMOV bnd1, bnd2/m128 */ - /* 66 0f 1a 01 bndmov (%rcx),%bnd0 */ - asm volatile(".byte 0x66,0x0f,0x1a,0x01\n\t" - : : "c" (mem) - : "memory"); -} - -static __always_inline void mpx_store_dsc_helper(unsigned long ptr_addr, - unsigned long ptr_val) -{ - /* 0F 1B /r BNDSTX-Store Extended Bounds Using Address Translation */ - /* 0f 1b 04 11 bndstx %bnd0,(%rcx,%rdx,1) */ - asm volatile(".byte 0x0f,0x1b,0x04,0x11\n\t" - : : "c" (ptr_addr), "d" (ptr_val) - : "memory"); -} - -static __always_inline void mpx_load_dsc_helper(unsigned long ptr_addr, - unsigned long ptr_val) -{ - /* 0F 1A /r BNDLDX-Load */ - /*/ 0f 1a 04 11 bndldx (%rcx,%rdx,1),%bnd0 */ - asm volatile(".byte 0x0f,0x1a,0x04,0x11\n\t" - : : "c" (ptr_addr), "d" (ptr_val) - : "memory"); -} - -void __print_context(void *__print_xsave_buffer, int line) -{ - uint64_t *bounds = (uint64_t *)(__print_xsave_buffer + MPX_BOUNDS_OFFSET); - uint64_t *cfg = (uint64_t *)(__print_xsave_buffer + MPX_CONFIG_OFFSET); - - int i; - eprintf("%s()::%d\n", "print_context", line); - for (i = 0; i < 4; i++) { - eprintf("bound[%d]: 0x%016lx 0x%016lx(0x%016lx)\n", i, - (unsigned long)bounds[i*2], - ~(unsigned long)bounds[i*2+1], - (unsigned long)bounds[i*2+1]); - } - - eprintf("cpcfg: %jx cpstatus: %jx\n", cfg[0], cfg[1]); -} -#define print_context(x) __print_context(x, __LINE__) -#ifdef DEBUG -#define dprint_context(x) print_context(x) -#else -#define dprint_context(x) do{}while(0) -#endif - -void init() -{ - int i; - - srand((unsigned int)time(NULL)); - - for (i = 0; i < 4; i++) { - shadow_plb[i][0] = 0; - shadow_plb[i][1] = ~(unsigned long)0; - } -} - -long int __mpx_random(int line) -{ -#ifdef NOT_SO_RANDOM - static long fake = 722122311; - fake += 563792075; - return fakse; -#else - return random(); -#endif -} -#define mpx_random() __mpx_random(__LINE__) - -uint8_t *get_random_addr() -{ - uint8_t*addr = (uint8_t *)(unsigned long)(rand() % MAX_ADDR_TESTED); - return (addr - (unsigned long)addr % sizeof(uint8_t *)); -} - -static inline bool compare_context(void *__xsave_buffer) -{ - uint64_t *bounds = (uint64_t *)(__xsave_buffer + MPX_BOUNDS_OFFSET); - - int i; - for (i = 0; i < 4; i++) { - dprintf3("shadow[%d]{%016lx/%016lx}\nbounds[%d]{%016lx/%016lx}\n", - i, (unsigned long)shadow_plb[i][0], (unsigned long)shadow_plb[i][1], - i, (unsigned long)bounds[i*2], ~(unsigned long)bounds[i*2+1]); - if ((shadow_plb[i][0] != bounds[i*2]) || - (shadow_plb[i][1] != ~(unsigned long)bounds[i*2+1])) { - eprintf("ERROR comparing shadow to real bound register %d\n", i); - eprintf("shadow{0x%016lx/0x%016lx}\nbounds{0x%016lx/0x%016lx}\n", - (unsigned long)shadow_plb[i][0], (unsigned long)shadow_plb[i][1], - (unsigned long)bounds[i*2], (unsigned long)bounds[i*2+1]); - return false; - } - } - - return true; -} - -void mkbnd_shadow(uint8_t *ptr, int index, long offset) -{ - uint64_t *lower = (uint64_t *)&(shadow_plb[index][0]); - uint64_t *upper = (uint64_t *)&(shadow_plb[index][1]); - *lower = (unsigned long)ptr; - *upper = (unsigned long)ptr + offset - 1; -} - -void check_lowerbound_shadow(uint8_t *ptr, int index) -{ - uint64_t *lower = (uint64_t *)&(shadow_plb[index][0]); - if (*lower > (uint64_t)(unsigned long)ptr) - num_lower_brs++; - else - dprintf1("LowerBoundChk passed:%p\n", ptr); -} - -void check_upperbound_shadow(uint8_t *ptr, int index) -{ - uint64_t upper = *(uint64_t *)&(shadow_plb[index][1]); - if (upper < (uint64_t)(unsigned long)ptr) - num_upper_brs++; - else - dprintf1("UpperBoundChk passed:%p\n", ptr); -} - -__always_inline void movbndreg_shadow(int src, int dest) -{ - shadow_plb[dest][0] = shadow_plb[src][0]; - shadow_plb[dest][1] = shadow_plb[src][1]; -} - -__always_inline void movbnd2mem_shadow(int src, unsigned long *dest) -{ - unsigned long *lower = (unsigned long *)&(shadow_plb[src][0]); - unsigned long *upper = (unsigned long *)&(shadow_plb[src][1]); - *dest = *lower; - *(dest+1) = *upper; -} - -__always_inline void movbnd_from_mem_shadow(unsigned long *src, int dest) -{ - unsigned long *lower = (unsigned long *)&(shadow_plb[dest][0]); - unsigned long *upper = (unsigned long *)&(shadow_plb[dest][1]); - *lower = *src; - *upper = *(src+1); -} - -__always_inline void stdsc_shadow(int index, uint8_t *ptr, uint8_t *ptr_val) -{ - shadow_map[0] = (unsigned long)shadow_plb[index][0]; - shadow_map[1] = (unsigned long)shadow_plb[index][1]; - shadow_map[2] = (unsigned long)ptr_val; - dprintf3("%s(%d, %p, %p) set shadow map[2]: %p\n", __func__, - index, ptr, ptr_val, ptr_val); - /*ptr ignored */ -} - -void lddsc_shadow(int index, uint8_t *ptr, uint8_t *ptr_val) -{ - uint64_t lower = shadow_map[0]; - uint64_t upper = shadow_map[1]; - uint8_t *value = (uint8_t *)shadow_map[2]; - - if (value != ptr_val) { - dprintf2("%s(%d, %p, %p) init shadow bounds[%d] " - "because %p != %p\n", __func__, index, ptr, - ptr_val, index, value, ptr_val); - shadow_plb[index][0] = 0; - shadow_plb[index][1] = ~(unsigned long)0; - } else { - shadow_plb[index][0] = lower; - shadow_plb[index][1] = upper; - } - /* ptr ignored */ -} - -static __always_inline void mpx_test_helper0(uint8_t *buf, uint8_t *ptr) -{ - mpx_make_bound_helper((unsigned long)ptr, 0x1800); -} - -static __always_inline void mpx_test_helper0_shadow(uint8_t *buf, uint8_t *ptr) -{ - mkbnd_shadow(ptr, 0, 0x1800); -} - -static __always_inline void mpx_test_helper1(uint8_t *buf, uint8_t *ptr) -{ - /* these are hard-coded to check bnd0 */ - expected_bnd_index = 0; - mpx_check_lowerbound_helper((unsigned long)(ptr-1)); - mpx_check_upperbound_helper((unsigned long)(ptr+0x1800)); - /* reset this since we do not expect any more bounds exceptions */ - expected_bnd_index = -1; -} - -static __always_inline void mpx_test_helper1_shadow(uint8_t *buf, uint8_t *ptr) -{ - check_lowerbound_shadow(ptr-1, 0); - check_upperbound_shadow(ptr+0x1800, 0); -} - -static __always_inline void mpx_test_helper2(uint8_t *buf, uint8_t *ptr) -{ - mpx_make_bound_helper((unsigned long)ptr, 0x1800); - mpx_movbndreg_helper(); - mpx_movbnd2mem_helper(buf); - mpx_make_bound_helper((unsigned long)(ptr+0x12), 0x1800); -} - -static __always_inline void mpx_test_helper2_shadow(uint8_t *buf, uint8_t *ptr) -{ - mkbnd_shadow(ptr, 0, 0x1800); - movbndreg_shadow(0, 2); - movbnd2mem_shadow(0, (unsigned long *)buf); - mkbnd_shadow(ptr+0x12, 0, 0x1800); -} - -static __always_inline void mpx_test_helper3(uint8_t *buf, uint8_t *ptr) -{ - mpx_movbnd_from_mem_helper(buf); -} - -static __always_inline void mpx_test_helper3_shadow(uint8_t *buf, uint8_t *ptr) -{ - movbnd_from_mem_shadow((unsigned long *)buf, 0); -} - -static __always_inline void mpx_test_helper4(uint8_t *buf, uint8_t *ptr) -{ - mpx_store_dsc_helper((unsigned long)buf, (unsigned long)ptr); - mpx_make_bound_helper((unsigned long)(ptr+0x12), 0x1800); -} - -static __always_inline void mpx_test_helper4_shadow(uint8_t *buf, uint8_t *ptr) -{ - stdsc_shadow(0, buf, ptr); - mkbnd_shadow(ptr+0x12, 0, 0x1800); -} - -static __always_inline void mpx_test_helper5(uint8_t *buf, uint8_t *ptr) -{ - mpx_load_dsc_helper((unsigned long)buf, (unsigned long)ptr); -} - -static __always_inline void mpx_test_helper5_shadow(uint8_t *buf, uint8_t *ptr) -{ - lddsc_shadow(0, buf, ptr); -} - -#define NR_MPX_TEST_FUNCTIONS 6 - -/* - * For compatibility reasons, MPX will clear the bounds registers - * when you make function calls (among other things). We have to - * preserve the registers in between calls to the "helpers" since - * they build on each other. - * - * Be very careful not to make any function calls inside the - * helpers, or anywhere else beween the xrstor and xsave. - */ -#define run_helper(helper_nr, buf, buf_shadow, ptr) do { \ - xrstor_state(xsave_test_buf, flags); \ - mpx_test_helper##helper_nr(buf, ptr); \ - xsave_state(xsave_test_buf, flags); \ - mpx_test_helper##helper_nr##_shadow(buf_shadow, ptr); \ -} while (0) - -static void run_helpers(int nr, uint8_t *buf, uint8_t *buf_shadow, uint8_t *ptr) -{ - uint64_t flags = 0x18; - - dprint_context(xsave_test_buf); - switch (nr) { - case 0: - run_helper(0, buf, buf_shadow, ptr); - break; - case 1: - run_helper(1, buf, buf_shadow, ptr); - break; - case 2: - run_helper(2, buf, buf_shadow, ptr); - break; - case 3: - run_helper(3, buf, buf_shadow, ptr); - break; - case 4: - run_helper(4, buf, buf_shadow, ptr); - break; - case 5: - run_helper(5, buf, buf_shadow, ptr); - break; - default: - test_failed(); - break; - } - dprint_context(xsave_test_buf); -} - -unsigned long buf_shadow[1024]; /* used to check load / store descriptors */ -extern long inspect_me(struct mpx_bounds_dir *bounds_dir); - -long cover_buf_with_bt_entries(void *buf, long buf_len) -{ - int i; - long nr_to_fill; - int ratio = 1000; - unsigned long buf_len_in_ptrs; - - /* Fill about 1/100 of the space with bt entries */ - nr_to_fill = buf_len / (sizeof(unsigned long) * ratio); - - if (!nr_to_fill) - dprintf3("%s() nr_to_fill: %ld\n", __func__, nr_to_fill); - - /* Align the buffer to pointer size */ - while (((unsigned long)buf) % sizeof(void *)) { - buf++; - buf_len--; - } - /* We are storing pointers, so make */ - buf_len_in_ptrs = buf_len / sizeof(void *); - - for (i = 0; i < nr_to_fill; i++) { - long index = (mpx_random() % buf_len_in_ptrs); - void *ptr = buf + index * sizeof(unsigned long); - unsigned long ptr_addr = (unsigned long)ptr; - - /* ptr and size can be anything */ - mpx_make_bound_helper((unsigned long)ptr, 8); - - /* - * take bnd0 and put it in to bounds tables "buf + index" is an - * address inside the buffer where we are pretending that we - * are going to put a pointer We do not, though because we will - * never load entries from the table, so it doesn't matter. - */ - mpx_store_dsc_helper(ptr_addr, (unsigned long)ptr); - dprintf4("storing bound table entry for %lx (buf start @ %p)\n", - ptr_addr, buf); - } - return nr_to_fill; -} - -unsigned long align_down(unsigned long alignme, unsigned long align_to) -{ - return alignme & ~(align_to-1); -} - -unsigned long align_up(unsigned long alignme, unsigned long align_to) -{ - return (alignme + align_to - 1) & ~(align_to-1); -} - -/* - * Using 1MB alignment guarantees that each no allocation - * will overlap with another's bounds tables. - * - * We have to cook our own allocator here. malloc() can - * mix other allocation with ours which means that even - * if we free all of our allocations, there might still - * be bounds tables for the *areas* since there is other - * valid memory there. - * - * We also can't use malloc() because a free() of an area - * might not free it back to the kernel. We want it - * completely unmapped an malloc() does not guarantee - * that. - */ -#ifdef __i386__ -long alignment = 4096; -long sz_alignment = 4096; -#else -long alignment = 1 * MB; -long sz_alignment = 1 * MB; -#endif -void *mpx_mini_alloc(unsigned long sz) -{ - unsigned long long tries = 0; - static void *last; - void *ptr; - void *try_at; - - sz = align_up(sz, sz_alignment); - - try_at = last + alignment; - while (1) { - ptr = mmap(try_at, sz, PROT_READ|PROT_WRITE, - MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); - if (ptr == (void *)-1) - return NULL; - if (ptr == try_at) - break; - - munmap(ptr, sz); - try_at += alignment; -#ifdef __i386__ - /* - * This isn't quite correct for 32-bit binaries - * on 64-bit kernels since they can use the - * entire 32-bit address space, but it's close - * enough. - */ - if (try_at > (void *)0xC0000000) -#else - if (try_at > (void *)0x0000800000000000) -#endif - try_at = (void *)0x0; - if (!(++tries % 10000)) - dprintf1("stuck in %s(), tries: %lld\n", __func__, tries); - continue; - } - last = ptr; - dprintf3("mpx_mini_alloc(0x%lx) returning: %p\n", sz, ptr); - return ptr; -} -void mpx_mini_free(void *ptr, long sz) -{ - dprintf2("%s() ptr: %p\n", __func__, ptr); - if ((unsigned long)ptr > 0x100000000000) { - dprintf1("uh oh !!!!!!!!!!!!!!! pointer too high: %p\n", ptr); - test_failed(); - } - sz = align_up(sz, sz_alignment); - dprintf3("%s() ptr: %p before munmap\n", __func__, ptr); - munmap(ptr, sz); - dprintf3("%s() ptr: %p DONE\n", __func__, ptr); -} - -#define NR_MALLOCS 100 -struct one_malloc { - char *ptr; - int nr_filled_btes; - unsigned long size; -}; -struct one_malloc mallocs[NR_MALLOCS]; - -void free_one_malloc(int index) -{ - unsigned long free_ptr; - unsigned long mask; - - if (!mallocs[index].ptr) - return; - - mpx_mini_free(mallocs[index].ptr, mallocs[index].size); - dprintf4("freed[%d]: %p\n", index, mallocs[index].ptr); - - free_ptr = (unsigned long)mallocs[index].ptr; - mask = alignment-1; - dprintf4("lowerbits: %lx / %lx mask: %lx\n", free_ptr, - (free_ptr & mask), mask); - assert((free_ptr & mask) == 0); - - mallocs[index].ptr = NULL; -} - -#ifdef __i386__ -#define MPX_BOUNDS_TABLE_COVERS 4096 -#else -#define MPX_BOUNDS_TABLE_COVERS (1 * MB) -#endif -void zap_everything(void) -{ - long after_zap; - long before_zap; - int i; - - before_zap = inspect_me(bounds_dir_ptr); - dprintf1("zapping everything start: %ld\n", before_zap); - for (i = 0; i < NR_MALLOCS; i++) - free_one_malloc(i); - - after_zap = inspect_me(bounds_dir_ptr); - dprintf1("zapping everything done: %ld\n", after_zap); - /* - * We only guarantee to empty the thing out if our allocations are - * exactly aligned on the boundaries of a boudns table. - */ - if ((alignment >= MPX_BOUNDS_TABLE_COVERS) && - (sz_alignment >= MPX_BOUNDS_TABLE_COVERS)) { - if (after_zap != 0) - test_failed(); - - assert(after_zap == 0); - } -} - -void do_one_malloc(void) -{ - static int malloc_counter; - long sz; - int rand_index = (mpx_random() % NR_MALLOCS); - void *ptr = mallocs[rand_index].ptr; - - dprintf3("%s() enter\n", __func__); - - if (ptr) { - dprintf3("freeing one malloc at index: %d\n", rand_index); - free_one_malloc(rand_index); - if (mpx_random() % (NR_MALLOCS*3) == 3) { - int i; - dprintf3("zapping some more\n"); - for (i = rand_index; i < NR_MALLOCS; i++) - free_one_malloc(i); - } - if ((mpx_random() % zap_all_every_this_many_mallocs) == 4) - zap_everything(); - } - - /* 1->~1M */ - sz = (1 + mpx_random() % 1000) * 1000; - ptr = mpx_mini_alloc(sz); - if (!ptr) { - /* - * If we are failing allocations, just assume we - * are out of memory and zap everything. - */ - dprintf3("zapping everything because out of memory\n"); - zap_everything(); - goto out; - } - - dprintf3("malloc: %p size: 0x%lx\n", ptr, sz); - mallocs[rand_index].nr_filled_btes = cover_buf_with_bt_entries(ptr, sz); - mallocs[rand_index].ptr = ptr; - mallocs[rand_index].size = sz; -out: - if ((++malloc_counter) % inspect_every_this_many_mallocs == 0) - inspect_me(bounds_dir_ptr); -} - -void run_timed_test(void (*test_func)(void)) -{ - int done = 0; - long iteration = 0; - static time_t last_print; - time_t now; - time_t start; - - time(&start); - while (!done) { - time(&now); - if ((now - start) > TEST_DURATION_SECS) - done = 1; - - test_func(); - iteration++; - - if ((now - last_print > 1) || done) { - printf("iteration %ld complete, OK so far\n", iteration); - last_print = now; - } - } -} - -void check_bounds_table_frees(void) -{ - printf("executing unmaptest\n"); - inspect_me(bounds_dir_ptr); - run_timed_test(&do_one_malloc); - printf("done with malloc() fun\n"); -} - -void insn_test_failed(int test_nr, int test_round, void *buf, - void *buf_shadow, void *ptr) -{ - print_context(xsave_test_buf); - eprintf("ERROR: test %d round %d failed\n", test_nr, test_round); - while (test_nr == 5) { - struct mpx_bt_entry *bte; - struct mpx_bounds_dir *bd = (void *)bounds_dir_ptr; - struct mpx_bd_entry *bde = mpx_vaddr_to_bd_entry(buf, bd); - - printf(" bd: %p\n", bd); - printf("&bde: %p\n", bde); - printf("*bde: %lx\n", *(unsigned long *)bde); - if (!bd_entry_valid(bde)) - break; - - bte = mpx_vaddr_to_bt_entry(buf, bd); - printf(" te: %p\n", bte); - printf("bte[0]: %lx\n", bte->contents[0]); - printf("bte[1]: %lx\n", bte->contents[1]); - printf("bte[2]: %lx\n", bte->contents[2]); - printf("bte[3]: %lx\n", bte->contents[3]); - break; - } - test_failed(); -} - -void check_mpx_insns_and_tables(void) -{ - int successes = 0; - int failures = 0; - int buf_size = (1024*1024); - unsigned long *buf = malloc(buf_size); - const int total_nr_tests = NR_MPX_TEST_FUNCTIONS * TEST_ROUNDS; - int i, j; - - memset(buf, 0, buf_size); - memset(buf_shadow, 0, sizeof(buf_shadow)); - - for (i = 0; i < TEST_ROUNDS; i++) { - uint8_t *ptr = get_random_addr() + 8; - - for (j = 0; j < NR_MPX_TEST_FUNCTIONS; j++) { - if (0 && j != 5) { - successes++; - continue; - } - dprintf2("starting test %d round %d\n", j, i); - dprint_context(xsave_test_buf); - /* - * test5 loads an address from the bounds tables. - * The load will only complete if 'ptr' matches - * the load and the store, so with random addrs, - * the odds of this are very small. Make it - * higher by only moving 'ptr' 1/10 times. - */ - if (random() % 10 <= 0) - ptr = get_random_addr() + 8; - dprintf3("random ptr{%p}\n", ptr); - dprint_context(xsave_test_buf); - run_helpers(j, (void *)buf, (void *)buf_shadow, ptr); - dprint_context(xsave_test_buf); - if (!compare_context(xsave_test_buf)) { - insn_test_failed(j, i, buf, buf_shadow, ptr); - failures++; - goto exit; - } - successes++; - dprint_context(xsave_test_buf); - dprintf2("finished test %d round %d\n", j, i); - dprintf3("\n"); - dprint_context(xsave_test_buf); - } - } - -exit: - dprintf2("\nabout to free:\n"); - free(buf); - dprintf1("successes: %d\n", successes); - dprintf1(" failures: %d\n", failures); - dprintf1(" tests: %d\n", total_nr_tests); - dprintf1(" expected: %jd #BRs\n", num_upper_brs + num_lower_brs); - dprintf1(" saw: %d #BRs\n", br_count); - if (failures) { - eprintf("ERROR: non-zero number of failures\n"); - exit(20); - } - if (successes != total_nr_tests) { - eprintf("ERROR: succeeded fewer than number of tries (%d != %d)\n", - successes, total_nr_tests); - exit(21); - } - if (num_upper_brs + num_lower_brs != br_count) { - eprintf("ERROR: unexpected number of #BRs: %jd %jd %d\n", - num_upper_brs, num_lower_brs, br_count); - eprintf("successes: %d\n", successes); - eprintf(" failures: %d\n", failures); - eprintf(" tests: %d\n", total_nr_tests); - eprintf(" expected: %jd #BRs\n", num_upper_brs + num_lower_brs); - eprintf(" saw: %d #BRs\n", br_count); - exit(22); - } -} - -/* - * This is supposed to SIGSEGV nicely once the kernel - * can no longer allocate vaddr space. - */ -void exhaust_vaddr_space(void) -{ - unsigned long ptr; - /* Try to make sure there is no room for a bounds table anywhere */ - unsigned long skip = MPX_BOUNDS_TABLE_SIZE_BYTES - PAGE_SIZE; -#ifdef __i386__ - unsigned long max_vaddr = 0xf7788000UL; -#else - unsigned long max_vaddr = 0x800000000000UL; -#endif - - dprintf1("%s() start\n", __func__); - /* do not start at 0, we aren't allowed to map there */ - for (ptr = PAGE_SIZE; ptr < max_vaddr; ptr += skip) { - void *ptr_ret; - int ret = madvise((void *)ptr, PAGE_SIZE, MADV_NORMAL); - - if (!ret) { - dprintf1("madvise() %lx ret: %d\n", ptr, ret); - continue; - } - ptr_ret = mmap((void *)ptr, PAGE_SIZE, PROT_READ|PROT_WRITE, - MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); - if (ptr_ret != (void *)ptr) { - perror("mmap"); - dprintf1("mmap(%lx) ret: %p\n", ptr, ptr_ret); - break; - } - if (!(ptr & 0xffffff)) - dprintf1("mmap(%lx) ret: %p\n", ptr, ptr_ret); - } - for (ptr = PAGE_SIZE; ptr < max_vaddr; ptr += skip) { - dprintf2("covering 0x%lx with bounds table entries\n", ptr); - cover_buf_with_bt_entries((void *)ptr, PAGE_SIZE); - } - dprintf1("%s() end\n", __func__); - printf("done with vaddr space fun\n"); -} - -void mpx_table_test(void) -{ - printf("starting mpx bounds table test\n"); - run_timed_test(check_mpx_insns_and_tables); - printf("done with mpx bounds table test\n"); -} - -int main(int argc, char **argv) -{ - int unmaptest = 0; - int vaddrexhaust = 0; - int tabletest = 0; - int i; - - check_mpx_support(); - mpx_prepare(); - srandom(11179); - - bd_incore(); - init(); - bd_incore(); - - trace_me(); - - xsave_state((void *)xsave_test_buf, 0x1f); - if (!compare_context(xsave_test_buf)) - printf("Init failed\n"); - - for (i = 1; i < argc; i++) { - if (!strcmp(argv[i], "unmaptest")) - unmaptest = 1; - if (!strcmp(argv[i], "vaddrexhaust")) - vaddrexhaust = 1; - if (!strcmp(argv[i], "tabletest")) - tabletest = 1; - } - if (!(unmaptest || vaddrexhaust || tabletest)) { - unmaptest = 1; - /* vaddrexhaust = 1; */ - tabletest = 1; - } - if (unmaptest) - check_bounds_table_frees(); - if (tabletest) - mpx_table_test(); - if (vaddrexhaust) - exhaust_vaddr_space(); - printf("%s completed successfully\n", argv[0]); - exit(0); -} - -#include "mpx-dig.c" diff --git a/tools/testing/selftests/x86/mpx-mm.h b/tools/testing/selftests/x86/mpx-mm.h deleted file mode 100644 index 6dbdd66b8242..000000000000 --- a/tools/testing/selftests/x86/mpx-mm.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _MPX_MM_H -#define _MPX_MM_H - -#define PAGE_SIZE 4096 -#define MB (1UL<<20) - -extern long nr_incore(void *ptr, unsigned long size_bytes); - -#endif /* _MPX_MM_H */ From f240652b6032b48ad7fa35c5e701cc4c8d697c0b Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 5 Jul 2019 10:53:21 -0700 Subject: [PATCH 06/25] x86/mpx: Remove MPX APIs MPX is being removed from the kernel due to a lack of support in the toolchain going forward (gcc). The first step is to remove the userspace-visible ABIs so that applications will stop using it. The most visible one are the enable/disable prctl()s. Remove them first. This is the most minimal and least invasive change needed to ensure that apps stop using MPX with new kernels. Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20190705175321.DB42F0AD@viggo.jf.intel.com --- include/uapi/linux/prctl.h | 2 +- kernel/sys.c | 16 ++-------------- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 094bb03b9cc2..961e0a4a0f73 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -181,7 +181,7 @@ struct prctl_mm_map { #define PR_GET_THP_DISABLE 42 /* - * Tell the kernel to start/stop helping userspace manage bounds tables. + * No longer implemented, but left here to ensure the numbers stay reserved: */ #define PR_MPX_ENABLE_MANAGEMENT 43 #define PR_MPX_DISABLE_MANAGEMENT 44 diff --git a/kernel/sys.c b/kernel/sys.c index 2969304c29fe..384b000b7865 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -103,12 +103,6 @@ #ifndef SET_TSC_CTL # define SET_TSC_CTL(a) (-EINVAL) #endif -#ifndef MPX_ENABLE_MANAGEMENT -# define MPX_ENABLE_MANAGEMENT() (-EINVAL) -#endif -#ifndef MPX_DISABLE_MANAGEMENT -# define MPX_DISABLE_MANAGEMENT() (-EINVAL) -#endif #ifndef GET_FP_MODE # define GET_FP_MODE(a) (-EINVAL) #endif @@ -2456,15 +2450,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, up_write(&me->mm->mmap_sem); break; case PR_MPX_ENABLE_MANAGEMENT: - if (arg2 || arg3 || arg4 || arg5) - return -EINVAL; - error = MPX_ENABLE_MANAGEMENT(); - break; case PR_MPX_DISABLE_MANAGEMENT: - if (arg2 || arg3 || arg4 || arg5) - return -EINVAL; - error = MPX_DISABLE_MANAGEMENT(); - break; + /* No longer implemented: */ + return -EINVAL; case PR_SET_FP_MODE: error = SET_FP_MODE(me, arg2); break; From be261ffce6f13229dad50f59c5e491f933d3167f Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 4 Jul 2019 10:46:37 -0500 Subject: [PATCH 07/25] x86: Remove X86_FEATURE_MFENCE_RDTSC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit AMD and Intel both have serializing lfence (X86_FEATURE_LFENCE_RDTSC). They've both had it for a long time, and AMD has had it enabled in Linux since Spectre v1 was announced. Back then, there was a proposal to remove the serializing mfence feature bit (X86_FEATURE_MFENCE_RDTSC), since both AMD and Intel have serializing lfence. At the time, it was (ahem) speculated that some hypervisors might not yet support its removal, so it remained for the time being. Now a year-and-a-half later, it should be safe to remove. I asked Andrew Cooper about whether it's still needed: So if you're virtualised, you've got no choice in the matter.  lfence is either dispatch-serialising or not on AMD, and you won't be able to change it. Furthermore, you can't accurately tell what state the bit is in, because the MSR might not be virtualised at all, or may not reflect the true state in hardware.  Worse still, attempting to set the bit may not be successful even if there isn't a fault for doing so. Xen sets the DE_CFG bit unconditionally, as does Linux by the looks of things (see MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT).  ISTR other hypervisor vendors saying the same, but I don't have any information to hand. If you are running under a hypervisor which has been updated, then lfence will almost certainly be dispatch-serialising in practice, and you'll almost certainly see the bit already set in DE_CFG.  If you're running under a hypervisor which hasn't been patched since Spectre, you've already lost in many more ways. I'd argue that X86_FEATURE_MFENCE_RDTSC is not worth keeping. So remove it. This will reduce some code rot, and also make it easier to hook barrier_nospec() up to a cmdline disable for performance raisins, without having to need an alternative_3() macro. Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/d990aa51e40063acb9888e8c1b688e41355a9588.1562255067.git.jpoimboe@redhat.com --- arch/x86/include/asm/barrier.h | 3 +-- arch/x86/include/asm/cpufeatures.h | 1 - arch/x86/include/asm/msr.h | 3 +-- arch/x86/kernel/cpu/amd.c | 21 +++------------------ arch/x86/kernel/cpu/hygon.c | 21 +++------------------ tools/arch/x86/include/asm/cpufeatures.h | 1 - 6 files changed, 8 insertions(+), 42 deletions(-) diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 84f848c2541a..7f828fe49797 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -49,8 +49,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, #define array_index_mask_nospec array_index_mask_nospec /* Prevent speculative execution past this barrier. */ -#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \ - "lfence", X86_FEATURE_LFENCE_RDTSC) +#define barrier_nospec() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC) #define dma_rmb() barrier() #define dma_wmb() barrier() diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 56f53bf3bbbf..fcc70ffd88c2 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -96,7 +96,6 @@ #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */ #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ -#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */ #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 5cc3930cb465..86f20d520a07 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -233,8 +233,7 @@ static __always_inline unsigned long long rdtsc_ordered(void) * Thus, use the preferred barrier on the respective CPU, aiming for * RDTSCP as the default. */ - asm volatile(ALTERNATIVE_3("rdtsc", - "mfence; rdtsc", X86_FEATURE_MFENCE_RDTSC, + asm volatile(ALTERNATIVE_2("rdtsc", "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC, "rdtscp", X86_FEATURE_RDTSCP) : EAX_EDX_RET(val, low, high) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 8d4e50428b68..3afe07d602dd 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -879,12 +879,8 @@ static void init_amd(struct cpuinfo_x86 *c) init_amd_cacheinfo(c); if (cpu_has(c, X86_FEATURE_XMM2)) { - unsigned long long val; - int ret; - /* - * A serializing LFENCE has less overhead than MFENCE, so - * use it for execution serialization. On families which + * Use LFENCE for execution serialization. On families which * don't have that MSR, LFENCE is already serializing. * msr_set_bit() uses the safe accessors, too, even if the MSR * is not present. @@ -892,19 +888,8 @@ static void init_amd(struct cpuinfo_x86 *c) msr_set_bit(MSR_F10H_DECFG, MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); - /* - * Verify that the MSR write was successful (could be running - * under a hypervisor) and only then assume that LFENCE is - * serializing. - */ - ret = rdmsrl_safe(MSR_F10H_DECFG, &val); - if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { - /* A serializing LFENCE stops RDTSC speculation */ - set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); - } else { - /* MFENCE stops RDTSC speculation */ - set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); - } + /* A serializing LFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); } /* diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 415621ddb8a2..4e28c1fc8749 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -330,12 +330,8 @@ static void init_hygon(struct cpuinfo_x86 *c) init_hygon_cacheinfo(c); if (cpu_has(c, X86_FEATURE_XMM2)) { - unsigned long long val; - int ret; - /* - * A serializing LFENCE has less overhead than MFENCE, so - * use it for execution serialization. On families which + * Use LFENCE for execution serialization. On families which * don't have that MSR, LFENCE is already serializing. * msr_set_bit() uses the safe accessors, too, even if the MSR * is not present. @@ -343,19 +339,8 @@ static void init_hygon(struct cpuinfo_x86 *c) msr_set_bit(MSR_F10H_DECFG, MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); - /* - * Verify that the MSR write was successful (could be running - * under a hypervisor) and only then assume that LFENCE is - * serializing. - */ - ret = rdmsrl_safe(MSR_F10H_DECFG, &val); - if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { - /* A serializing LFENCE stops RDTSC speculation */ - set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); - } else { - /* MFENCE stops RDTSC speculation */ - set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); - } + /* A serializing LFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); } /* diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 998c2cc08363..9b98edb6b2d3 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -96,7 +96,6 @@ #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */ #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ -#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */ #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ From bdd50d7421b2f8fd99f953e1f747e0cb3f3bed64 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 23 Jul 2019 16:44:15 +0900 Subject: [PATCH 08/25] x86/bitops: Use __builtin_constant_p() directly instead of IS_IMMEDIATE() __builtin_constant_p(nr) is used everywhere now. It does not make much sense to define IS_IMMEDIATE() as its alias. Signed-off-by: Masahiro Yamada Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20190723074415.26811-1-yamada.masahiro@socionext.com --- arch/x86/include/asm/bitops.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index ba15d53c1ca7..7d1f6a49bfae 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -45,14 +45,13 @@ * We do the locked ops that don't return the old value as * a mask operation on a byte. */ -#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) #define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3)) #define CONST_MASK(nr) (1 << ((nr) & 7)) static __always_inline void arch_set_bit(long nr, volatile unsigned long *addr) { - if (IS_IMMEDIATE(nr)) { + if (__builtin_constant_p(nr)) { asm volatile(LOCK_PREFIX "orb %1,%0" : CONST_MASK_ADDR(nr, addr) : "iq" ((u8)CONST_MASK(nr)) @@ -72,7 +71,7 @@ arch___set_bit(long nr, volatile unsigned long *addr) static __always_inline void arch_clear_bit(long nr, volatile unsigned long *addr) { - if (IS_IMMEDIATE(nr)) { + if (__builtin_constant_p(nr)) { asm volatile(LOCK_PREFIX "andb %1,%0" : CONST_MASK_ADDR(nr, addr) : "iq" ((u8)~CONST_MASK(nr))); @@ -123,7 +122,7 @@ arch___change_bit(long nr, volatile unsigned long *addr) static __always_inline void arch_change_bit(long nr, volatile unsigned long *addr) { - if (IS_IMMEDIATE(nr)) { + if (__builtin_constant_p(nr)) { asm volatile(LOCK_PREFIX "xorb %1,%0" : CONST_MASK_ADDR(nr, addr) : "iq" ((u8)CONST_MASK(nr))); From 4599c6671b8119ed5455e4ed6b967b461c27a9f7 Mon Sep 17 00:00:00 2001 From: Nikolas Nyby Date: Wed, 24 Jul 2019 00:13:37 -0400 Subject: [PATCH 09/25] x86/crash: Remove unnecessary comparison The ret comparison and return are unnecessary as of commit f296f2634920 ("x86/kexec: Remove walk_iomem_res() call with GART type") elf_header_exclude_ranges() returns ret in any case, with or without this comparison. [ tglx: Use a proper commit reference instead of full SHA ] Signed-off-by: Nikolas Nyby Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20190724041337.8346-1-nikolas@gnu.org --- arch/x86/kernel/crash.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 2bf70a2fed90..eb651fbde92a 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -225,8 +225,6 @@ static int elf_header_exclude_ranges(struct crash_mem *cmem) if (crashk_low_res.end) { ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); - if (ret) - return ret; } return ret; From 41b57d1bb8a4084e651c1f9a754fca64952666a0 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 6 Aug 2019 17:25:39 +0100 Subject: [PATCH 10/25] lib: Remove redundant ftrace flag removal Since architectures can implement ftrace using a variety of mechanisms, generic code should always use CC_FLAGS_FTRACE rather than assuming that ftrace is built using -pg. Since commit: 2464a609ded09420 ("ftrace: do not trace library functions") ... lib/Makefile has removed CC_FLAGS_FTRACE from KBUILD_CFLAGS, so ftrace is disabled for all files under lib/. Given that, we shouldn't explicitly remove -pg when building lib/string.o, as this is redundant and bad form. Clean things up accordingly. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Signed-off-by: Borislav Petkov Cc: Andrew Morton Cc: Andy Shevchenko Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Coly Li Cc: Gary R Hook Cc: Ingo Molnar Cc: Kees Cook Cc: Kent Overstreet Cc: Masahiro Yamada Cc: Matthew Wilcox Link: https://lkml.kernel.org/r/20190806162539.51918-1-mark.rutland@arm.com --- lib/Makefile | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/Makefile b/lib/Makefile index 095601ce371d..34f8a83b2cbd 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -21,10 +21,6 @@ KCOV_INSTRUMENT_dynamic_debug.o := n ifdef CONFIG_AMD_MEM_ENCRYPT KASAN_SANITIZE_string.o := n -ifdef CONFIG_FUNCTION_TRACER -CFLAGS_REMOVE_string.o = -pg -endif - CFLAGS_string.o := $(call cc-option, -fno-stack-protector) endif From bba10c5cab4ddd8725a7998e064fc72c9770c667 Mon Sep 17 00:00:00 2001 From: Rahul Tanwar Date: Fri, 16 Aug 2019 16:18:57 +0800 Subject: [PATCH 11/25] x86/cpu: Use constant definitions for CPU models Replace model numbers with their respective macro definitions when comparing CPU models. Suggested-by: Andy Shevchenko Signed-off-by: Rahul Tanwar Signed-off-by: Borislav Petkov Cc: alan@linux.intel.com Cc: cheol.yong.kim@intel.com Cc: Hans de Goede Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: qi-ming.wu@intel.com Cc: "Rafael J. Wysocki" Cc: Ricardo Neri Cc: Thomas Gleixner Cc: Tony Luck Cc: x86-ml Link: https://lkml.kernel.org/r/f7a0e142faa953a53d5f81f78055e1b3c793b134.1565940653.git.rahul.tanwar@linux.intel.com --- arch/x86/kernel/cpu/intel.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 8d6d92ebeb54..66de4b84c369 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -265,9 +265,9 @@ static void early_init_intel(struct cpuinfo_x86 *c) /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ if (c->x86 == 6) { switch (c->x86_model) { - case 0x27: /* Penwell */ - case 0x35: /* Cloverview */ - case 0x4a: /* Merrifield */ + case INTEL_FAM6_ATOM_SALTWELL_MID: + case INTEL_FAM6_ATOM_SALTWELL_TABLET: + case INTEL_FAM6_ATOM_SILVERMONT_MID: set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); break; default: From 342061c53a049569fc7f56d237753c26b4b2166d Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 19 Aug 2019 09:01:40 +0200 Subject: [PATCH 12/25] x86/msr-index: Move AMD MSRs where they belong ... sort them in and fixup comment, while at it. No functional changes. Signed-off-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20190819070140.23708-1-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/msr-index.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 6b4fc2788078..f9a01a04c708 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -375,13 +375,17 @@ /* Alternative perfctr range with full access. */ #define MSR_IA32_PMC0 0x000004c1 -/* AMD64 MSRs. Not complete. See the architecture manual for a more - complete list. */ - +/* + * AMD64 MSRs. Not complete. See the architecture manual for a more + * complete list. + */ #define MSR_AMD64_PATCH_LEVEL 0x0000008b #define MSR_AMD64_TSC_RATIO 0xc0000104 #define MSR_AMD64_NB_CFG 0xc001001f #define MSR_AMD64_PATCH_LOADER 0xc0010020 +#define MSR_AMD_PERF_CTL 0xc0010062 +#define MSR_AMD_PERF_STATUS 0xc0010063 +#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 #define MSR_AMD64_OSVW_STATUS 0xc0010141 #define MSR_AMD64_LS_CFG 0xc0011020 @@ -560,9 +564,6 @@ #define MSR_IA32_PERF_STATUS 0x00000198 #define MSR_IA32_PERF_CTL 0x00000199 #define INTEL_PERF_CTL_MASK 0xffff -#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 -#define MSR_AMD_PERF_STATUS 0xc0010063 -#define MSR_AMD_PERF_CTL 0xc0010062 #define MSR_IA32_MPERF 0x000000e7 #define MSR_IA32_APERF 0x000000e8 From f25896ebfe0cf818ebd1adb5e6a05dc40b820e45 Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczynski Date: Tue, 20 Aug 2019 08:51:21 +0200 Subject: [PATCH 13/25] x86/PCI: Remove superfluous returns from void functions Remove unnecessary empty return statements at the end of void functions in arch/x86/kernel/quirks.c. Signed-off-by: Krzysztof Wilczynski Signed-off-by: Borislav Petkov Cc: Bjorn Helgaas Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: linux-pci@vger.kernel.org Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20190820065121.16594-1-kw@linux.com --- arch/x86/kernel/quirks.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 8451f38ad399..1daf8f2aa21f 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -90,8 +90,6 @@ static void ich_force_hpet_resume(void) BUG(); else printk(KERN_DEBUG "Force enabled HPET at resume\n"); - - return; } static void ich_force_enable_hpet(struct pci_dev *dev) @@ -448,7 +446,6 @@ static void nvidia_force_enable_hpet(struct pci_dev *dev) dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", force_hpet_address); cached_dev = dev; - return; } /* ISA Bridges */ @@ -513,7 +510,6 @@ static void e6xx_force_enable_hpet(struct pci_dev *dev) force_hpet_resume_type = NONE_FORCE_HPET_RESUME; dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " "0x%lx\n", force_hpet_address); - return; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU, e6xx_force_enable_hpet); From 248d327ed7b6d3ebda2ac7b38482a306108bcd3e Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Mon, 26 Aug 2019 09:13:12 +0000 Subject: [PATCH 14/25] x86/ftrace: Remove mcount() declaration Commit 562e14f72292 ("ftrace/x86: Remove mcount support") removed the support for mcount, but forgot to remove the mcount() declaration. Clean it up. Signed-off-by: Jisheng Zhang Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r20190826170150.10f101ba@xhacker.debian --- arch/x86/include/asm/ftrace.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 287f1f7b2e52..c38a66661576 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -16,7 +16,6 @@ #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR #ifndef __ASSEMBLY__ -extern void mcount(void); extern atomic_t modifying_ftrace_code; extern void __fentry__(void); From cbb1133b563a63901cf778220eb17e3ff1425aed Mon Sep 17 00:00:00 2001 From: Cao Jin Date: Wed, 28 Aug 2019 14:11:00 +0800 Subject: [PATCH 15/25] x86/cpufeature: Explain the macro duplication Explain the intent behind the duplication of the BUILD_BUG_ON_ZERO(NCAPINTS != n) check in *_MASK_CHECK and its immediate use in the *MASK_BIT_SET macros too. [ bp: Massage. ] Suggested-by: Borislav Petkov Signed-off-by: Cao Jin Signed-off-by: Borislav Petkov Cc: Dave Hansen Cc: Fenghua Yu Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jann Horn Cc: Masahiro Yamada Cc: Michael Ellerman Cc: Nadav Amit Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20190828061100.27032-1-caoj.fnst@cn.fujitsu.com --- arch/x86/include/asm/cpufeature.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 58acda503817..59bf91c57aa8 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -61,6 +61,13 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; #define CHECK_BIT_IN_MASK_WORD(maskname, word, bit) \ (((bit)>>5)==(word) && (1UL<<((bit)&31) & maskname##word )) +/* + * {REQUIRED,DISABLED}_MASK_CHECK below may seem duplicated with the + * following BUILD_BUG_ON_ZERO() check but when NCAPINTS gets changed, all + * header macros which use NCAPINTS need to be changed. The duplicated macro + * use causes the compiler to issue errors for all headers so that all usage + * sites can be corrected. + */ #define REQUIRED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 0, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 1, feature_bit) || \ From c66f78a6de4de6cb520b15cf6a1b586617b9add5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 27 Aug 2019 21:48:21 +0200 Subject: [PATCH 16/25] x86/intel: Aggregate big core client naming Currently the big core client models either have: - no OPTDIFF - _CORE - _DESKTOP Make it uniformly: 'no OPTDIFF'. for i in `git grep -l "\(INTEL_FAM6_\|VULNWL_INTEL\|INTEL_CPU_FAM6\).*_\(CORE\|DESKTOP\)"` do sed -i -e 's/\(\(INTEL_FAM6_\|VULNWL_INTEL\|INTEL_CPU_FAM6\).*\)_\(CORE\|DESKTOP\)/\1/g' ${i} done Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Tony Luck Cc: x86@kernel.org Cc: Dave Hansen Cc: Borislav Petkov Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/20190827195122.513945586@infradead.org --- arch/x86/events/intel/core.c | 26 +++++++++--------- arch/x86/events/intel/cstate.c | 22 +++++++-------- arch/x86/events/intel/pt.c | 2 +- arch/x86/events/intel/rapl.c | 10 +++---- arch/x86/events/intel/uncore.c | 12 ++++----- arch/x86/events/msr.c | 8 +++--- arch/x86/include/asm/intel-family.h | 10 +++---- arch/x86/kernel/apic/apic.c | 8 +++--- arch/x86/kernel/cpu/bugs.c | 8 +++--- arch/x86/kernel/cpu/intel.c | 10 +++---- drivers/cpufreq/intel_pstate.c | 12 ++++----- drivers/idle/intel_idle.c | 10 +++---- drivers/platform/x86/intel_pmc_core.c | 4 +-- drivers/platform/x86/intel_pmc_core_pltdrv.c | 4 +-- drivers/powercap/intel_rapl_common.c | 10 +++---- tools/power/x86/turbostat/turbostat.c | 28 ++++++++++---------- 16 files changed, 92 insertions(+), 92 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 648260b5f367..76bff3a33725 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3964,12 +3964,12 @@ static __init void intel_clovertown_quirk(void) } static const struct x86_cpu_desc isolation_ucodes[] = { - INTEL_CPU_DESC(INTEL_FAM6_HASWELL_CORE, 3, 0x0000001f), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f), INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT, 1, 0x0000001e), INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E, 1, 0x00000015), INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037), INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_CORE, 4, 0x00000023), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E, 1, 0x00000014), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 2, 0x00000010), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 3, 0x07000009), @@ -3979,16 +3979,16 @@ static const struct x86_cpu_desc isolation_ucodes[] = { INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE, 3, 0x0000007c), - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_DESKTOP, 3, 0x0000007c), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 9, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e), INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 9, 0x0000004e), INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 10, 0x0000004e), INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 11, 0x0000004e), INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 12, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 10, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 11, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 12, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 13, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e), {} }; @@ -4857,7 +4857,7 @@ __init int intel_pmu_init(void) break; - case INTEL_FAM6_HASWELL_CORE: + case INTEL_FAM6_HASWELL: case INTEL_FAM6_HASWELL_X: case INTEL_FAM6_HASWELL_ULT: case INTEL_FAM6_HASWELL_GT3E: @@ -4890,7 +4890,7 @@ __init int intel_pmu_init(void) name = "haswell"; break; - case INTEL_FAM6_BROADWELL_CORE: + case INTEL_FAM6_BROADWELL: case INTEL_FAM6_BROADWELL_XEON_D: case INTEL_FAM6_BROADWELL_GT3E: case INTEL_FAM6_BROADWELL_X: @@ -4956,9 +4956,9 @@ __init int intel_pmu_init(void) pmem = true; /* fall through */ case INTEL_FAM6_SKYLAKE_MOBILE: - case INTEL_FAM6_SKYLAKE_DESKTOP: + case INTEL_FAM6_SKYLAKE: case INTEL_FAM6_KABYLAKE_MOBILE: - case INTEL_FAM6_KABYLAKE_DESKTOP: + case INTEL_FAM6_KABYLAKE: x86_add_quirk(intel_pebs_isolation_quirk); x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -5006,7 +5006,7 @@ __init int intel_pmu_init(void) pmem = true; /* fall through */ case INTEL_FAM6_ICELAKE_MOBILE: - case INTEL_FAM6_ICELAKE_DESKTOP: + case INTEL_FAM6_ICELAKE: x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 688592b34564..3854400ad8ff 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -593,40 +593,40 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_CORE, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_HASWELL, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE, hswult_cstates), X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_DESKTOP, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, snb_cstates), { }, }; MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index d3dc2274ddd4..8cb9626e6277 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -204,7 +204,7 @@ static int __init pt_pmu_hw_init(void) /* model-specific quirks */ switch (boot_cpu_data.x86_model) { - case INTEL_FAM6_BROADWELL_CORE: + case INTEL_FAM6_BROADWELL: case INTEL_FAM6_BROADWELL_XEON_D: case INTEL_FAM6_BROADWELL_GT3E: case INTEL_FAM6_BROADWELL_X: diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 64ab51ffdf06..c1278e2764f4 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -720,27 +720,27 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, model_snbep), X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, model_snb), X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, model_snbep), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, model_hsw), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, model_hsx), X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, model_hsw), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, model_hsw), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, model_hsx), X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, model_hsx), X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, model_knl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, model_knl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, model_skl), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE, model_skl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, model_hsx), X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, model_skl), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE, model_skl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE, model_skl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, model_skl), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_DESKTOP, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE, model_skl), {}, }; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 3694a5d0703d..d5e091586242 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1451,10 +1451,10 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL, hsw_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL, bdw_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init), @@ -1465,14 +1465,14 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE, skl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE, skl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_DESKTOP, icl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE, icl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ATOM_TREMONT_X, snr_uncore_init), {}, }; diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 9431447541e9..08d320a39dff 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -59,12 +59,12 @@ static bool test_intel(int idx, void *data) case INTEL_FAM6_IVYBRIDGE: case INTEL_FAM6_IVYBRIDGE_X: - case INTEL_FAM6_HASWELL_CORE: + case INTEL_FAM6_HASWELL: case INTEL_FAM6_HASWELL_X: case INTEL_FAM6_HASWELL_ULT: case INTEL_FAM6_HASWELL_GT3E: - case INTEL_FAM6_BROADWELL_CORE: + case INTEL_FAM6_BROADWELL: case INTEL_FAM6_BROADWELL_XEON_D: case INTEL_FAM6_BROADWELL_GT3E: case INTEL_FAM6_BROADWELL_X: @@ -85,10 +85,10 @@ static bool test_intel(int idx, void *data) break; case INTEL_FAM6_SKYLAKE_MOBILE: - case INTEL_FAM6_SKYLAKE_DESKTOP: + case INTEL_FAM6_SKYLAKE: case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_KABYLAKE_MOBILE: - case INTEL_FAM6_KABYLAKE_DESKTOP: + case INTEL_FAM6_KABYLAKE: case INTEL_FAM6_ICELAKE_MOBILE: if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF) return true; diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index fe7c205233f1..800dd17e61c4 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -49,27 +49,27 @@ #define INTEL_FAM6_IVYBRIDGE 0x3A #define INTEL_FAM6_IVYBRIDGE_X 0x3E -#define INTEL_FAM6_HASWELL_CORE 0x3C +#define INTEL_FAM6_HASWELL 0x3C #define INTEL_FAM6_HASWELL_X 0x3F #define INTEL_FAM6_HASWELL_ULT 0x45 #define INTEL_FAM6_HASWELL_GT3E 0x46 -#define INTEL_FAM6_BROADWELL_CORE 0x3D +#define INTEL_FAM6_BROADWELL 0x3D #define INTEL_FAM6_BROADWELL_GT3E 0x47 #define INTEL_FAM6_BROADWELL_X 0x4F #define INTEL_FAM6_BROADWELL_XEON_D 0x56 #define INTEL_FAM6_SKYLAKE_MOBILE 0x4E -#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E +#define INTEL_FAM6_SKYLAKE 0x5E #define INTEL_FAM6_SKYLAKE_X 0x55 #define INTEL_FAM6_KABYLAKE_MOBILE 0x8E -#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E +#define INTEL_FAM6_KABYLAKE 0x9E #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 #define INTEL_FAM6_ICELAKE_X 0x6A #define INTEL_FAM6_ICELAKE_XEON_D 0x6C -#define INTEL_FAM6_ICELAKE_DESKTOP 0x7D +#define INTEL_FAM6_ICELAKE 0x7D #define INTEL_FAM6_ICELAKE_MOBILE 0x7E #define INTEL_FAM6_ICELAKE_NNPI 0x9D diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index aa5495d0f478..0527465e1c98 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -593,18 +593,18 @@ static const struct x86_cpu_id deadline_match[] = { DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev), DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL, 0x22), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_GT3E, 0x17), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_CORE, 0x25), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL, 0x25), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_GT3E, 0x17), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_MOBILE, 0xb2), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_DESKTOP, 0xb2), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE, 0xb2), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_MOBILE, 0x52), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_DESKTOP, 0x52), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE, 0x52), {}, }; diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c6fa3ef10b4e..6c611abd6cdd 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1184,15 +1184,15 @@ static void override_cache_bits(struct cpuinfo_x86 *c) case INTEL_FAM6_WESTMERE: case INTEL_FAM6_SANDYBRIDGE: case INTEL_FAM6_IVYBRIDGE: - case INTEL_FAM6_HASWELL_CORE: + case INTEL_FAM6_HASWELL: case INTEL_FAM6_HASWELL_ULT: case INTEL_FAM6_HASWELL_GT3E: - case INTEL_FAM6_BROADWELL_CORE: + case INTEL_FAM6_BROADWELL: case INTEL_FAM6_BROADWELL_GT3E: case INTEL_FAM6_SKYLAKE_MOBILE: - case INTEL_FAM6_SKYLAKE_DESKTOP: + case INTEL_FAM6_SKYLAKE: case INTEL_FAM6_KABYLAKE_MOBILE: - case INTEL_FAM6_KABYLAKE_DESKTOP: + case INTEL_FAM6_KABYLAKE: if (c->x86_cache_bits < 44) c->x86_cache_bits = 44; break; diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 8d6d92ebeb54..ca62563c1bb3 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -142,21 +142,21 @@ struct sku_microcode { u32 microcode; }; static const struct sku_microcode spectre_bad_microcodes[] = { - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 }, - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 }, - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 }, + { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 }, + { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 }, + { INTEL_FAM6_KABYLAKE, 0x09, 0x80 }, { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 }, { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, - { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, + { INTEL_FAM6_BROADWELL, 0x04, 0x28 }, { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 }, { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 }, { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 }, - { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 }, + { INTEL_FAM6_HASWELL, 0x03, 0x23 }, { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index cc27d4c59dca..17cb9af25328 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1867,12 +1867,12 @@ static const struct pstate_funcs knl_funcs = { (unsigned long)&policy } static const struct x86_cpu_id intel_pstate_cpu_ids[] = { - ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs), + ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs), ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs), ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_funcs), ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs), - ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs), - ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs), + ICPU(INTEL_FAM6_HASWELL, core_funcs), + ICPU(INTEL_FAM6_BROADWELL, core_funcs), ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs), ICPU(INTEL_FAM6_HASWELL_X, core_funcs), ICPU(INTEL_FAM6_HASWELL_ULT, core_funcs), @@ -1881,7 +1881,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs), ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_funcs), ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), - ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs), + ICPU(INTEL_FAM6_SKYLAKE, core_funcs), ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs), ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs), @@ -1900,13 +1900,13 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { }; static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { - ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_funcs), + ICPU(INTEL_FAM6_KABYLAKE, core_funcs), {} }; static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = { ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), - ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs), + ICPU(INTEL_FAM6_SKYLAKE, core_funcs), {} }; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index fa5ff77b8fe4..f45898965fe1 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1072,19 +1072,19 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { INTEL_CPU_FAM6(ATOM_AIRMONT, idle_cpu_cht), INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb), INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt), - INTEL_CPU_FAM6(HASWELL_CORE, idle_cpu_hsw), + INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw), INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw), INTEL_CPU_FAM6(HASWELL_ULT, idle_cpu_hsw), INTEL_CPU_FAM6(HASWELL_GT3E, idle_cpu_hsw), INTEL_CPU_FAM6(ATOM_SILVERMONT_X, idle_cpu_avn), - INTEL_CPU_FAM6(BROADWELL_CORE, idle_cpu_bdw), + INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_GT3E, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_XEON_D, idle_cpu_bdw), INTEL_CPU_FAM6(SKYLAKE_MOBILE, idle_cpu_skl), - INTEL_CPU_FAM6(SKYLAKE_DESKTOP, idle_cpu_skl), + INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl), INTEL_CPU_FAM6(KABYLAKE_MOBILE, idle_cpu_skl), - INTEL_CPU_FAM6(KABYLAKE_DESKTOP, idle_cpu_skl), + INTEL_CPU_FAM6(KABYLAKE, idle_cpu_skl), INTEL_CPU_FAM6(SKYLAKE_X, idle_cpu_skx), INTEL_CPU_FAM6(XEON_PHI_KNL, idle_cpu_knl), INTEL_CPU_FAM6(XEON_PHI_KNM, idle_cpu_knl), @@ -1311,7 +1311,7 @@ static void intel_idle_state_table_update(void) case INTEL_FAM6_ATOM_GOLDMONT_PLUS: bxt_idle_state_table_update(); break; - case INTEL_FAM6_SKYLAKE_DESKTOP: + case INTEL_FAM6_SKYLAKE: sklh_idle_state_table_update(); break; } diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index c510d0d72475..3a82df45bced 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -807,9 +807,9 @@ static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) static const struct x86_cpu_id intel_pmc_core_ids[] = { INTEL_CPU_FAM6(SKYLAKE_MOBILE, spt_reg_map), - INTEL_CPU_FAM6(SKYLAKE_DESKTOP, spt_reg_map), + INTEL_CPU_FAM6(SKYLAKE, spt_reg_map), INTEL_CPU_FAM6(KABYLAKE_MOBILE, spt_reg_map), - INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map), + INTEL_CPU_FAM6(KABYLAKE, spt_reg_map), INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map), INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map), INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c index a8754a6db1b8..52f5bcb88011 100644 --- a/drivers/platform/x86/intel_pmc_core_pltdrv.c +++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c @@ -31,9 +31,9 @@ static struct platform_device pmc_core_device = { */ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = { INTEL_CPU_FAM6(SKYLAKE_MOBILE, pmc_core_device), - INTEL_CPU_FAM6(SKYLAKE_DESKTOP, pmc_core_device), + INTEL_CPU_FAM6(SKYLAKE, pmc_core_device), INTEL_CPU_FAM6(KABYLAKE_MOBILE, pmc_core_device), - INTEL_CPU_FAM6(KABYLAKE_DESKTOP, pmc_core_device), + INTEL_CPU_FAM6(KABYLAKE, pmc_core_device), INTEL_CPU_FAM6(CANNONLAKE_MOBILE, pmc_core_device), INTEL_CPU_FAM6(ICELAKE_MOBILE, pmc_core_device), {} diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 6df481896b5f..7c9eb78d3471 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -957,24 +957,24 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(IVYBRIDGE, rapl_defaults_core), INTEL_CPU_FAM6(IVYBRIDGE_X, rapl_defaults_core), - INTEL_CPU_FAM6(HASWELL_CORE, rapl_defaults_core), + INTEL_CPU_FAM6(HASWELL, rapl_defaults_core), INTEL_CPU_FAM6(HASWELL_ULT, rapl_defaults_core), INTEL_CPU_FAM6(HASWELL_GT3E, rapl_defaults_core), INTEL_CPU_FAM6(HASWELL_X, rapl_defaults_hsw_server), - INTEL_CPU_FAM6(BROADWELL_CORE, rapl_defaults_core), + INTEL_CPU_FAM6(BROADWELL, rapl_defaults_core), INTEL_CPU_FAM6(BROADWELL_GT3E, rapl_defaults_core), INTEL_CPU_FAM6(BROADWELL_XEON_D, rapl_defaults_core), INTEL_CPU_FAM6(BROADWELL_X, rapl_defaults_hsw_server), - INTEL_CPU_FAM6(SKYLAKE_DESKTOP, rapl_defaults_core), + INTEL_CPU_FAM6(SKYLAKE, rapl_defaults_core), INTEL_CPU_FAM6(SKYLAKE_MOBILE, rapl_defaults_core), INTEL_CPU_FAM6(SKYLAKE_X, rapl_defaults_hsw_server), INTEL_CPU_FAM6(KABYLAKE_MOBILE, rapl_defaults_core), - INTEL_CPU_FAM6(KABYLAKE_DESKTOP, rapl_defaults_core), + INTEL_CPU_FAM6(KABYLAKE, rapl_defaults_core), INTEL_CPU_FAM6(CANNONLAKE_MOBILE, rapl_defaults_core), INTEL_CPU_FAM6(ICELAKE_MOBILE, rapl_defaults_core), - INTEL_CPU_FAM6(ICELAKE_DESKTOP, rapl_defaults_core), + INTEL_CPU_FAM6(ICELAKE, rapl_defaults_core), INTEL_CPU_FAM6(ICELAKE_NNPI, rapl_defaults_core), INTEL_CPU_FAM6(ICELAKE_X, rapl_defaults_hsw_server), INTEL_CPU_FAM6(ICELAKE_XEON_D, rapl_defaults_hsw_server), diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 75fc4fb9901c..8083a354ae7f 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -3207,10 +3207,10 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) pkg_cstate_limits = snb_pkg_cstate_limits; has_misc_feature_control = 1; break; - case INTEL_FAM6_HASWELL_CORE: /* HSW */ + case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSX */ case INTEL_FAM6_HASWELL_GT3E: /* HSW */ - case INTEL_FAM6_BROADWELL_CORE: /* BDW */ + case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ @@ -3403,10 +3403,10 @@ int has_config_tdp(unsigned int family, unsigned int model) switch (model) { case INTEL_FAM6_IVYBRIDGE: /* IVB */ - case INTEL_FAM6_HASWELL_CORE: /* HSW */ + case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSX */ case INTEL_FAM6_HASWELL_GT3E: /* HSW */ - case INTEL_FAM6_BROADWELL_CORE: /* BDW */ + case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ @@ -3840,9 +3840,9 @@ void rapl_probe_intel(unsigned int family, unsigned int model) switch (model) { case INTEL_FAM6_SANDYBRIDGE: case INTEL_FAM6_IVYBRIDGE: - case INTEL_FAM6_HASWELL_CORE: /* HSW */ + case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_GT3E: /* HSW */ - case INTEL_FAM6_BROADWELL_CORE: /* BDW */ + case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; if (rapl_joules) { @@ -4031,7 +4031,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model) return; switch (model) { - case INTEL_FAM6_HASWELL_CORE: /* HSW */ + case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_GT3E: /* HSW */ do_gfx_perf_limit_reasons = 1; case INTEL_FAM6_HASWELL_X: /* HSX */ @@ -4249,10 +4249,10 @@ int has_snb_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_SANDYBRIDGE_X: case INTEL_FAM6_IVYBRIDGE: /* IVB */ case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ - case INTEL_FAM6_HASWELL_CORE: /* HSW */ + case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSW */ case INTEL_FAM6_HASWELL_GT3E: /* HSW */ - case INTEL_FAM6_BROADWELL_CORE: /* BDW */ + case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ @@ -4284,8 +4284,8 @@ int has_hsw_msrs(unsigned int family, unsigned int model) return 0; switch (model) { - case INTEL_FAM6_HASWELL_CORE: - case INTEL_FAM6_BROADWELL_CORE: /* BDW */ + case INTEL_FAM6_HASWELL: + case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ @@ -4569,16 +4569,16 @@ unsigned int intel_model_duplicates(unsigned int model) return INTEL_FAM6_XEON_PHI_KNL; case INTEL_FAM6_HASWELL_ULT: - return INTEL_FAM6_HASWELL_CORE; + return INTEL_FAM6_HASWELL; case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ return INTEL_FAM6_BROADWELL_X; case INTEL_FAM6_SKYLAKE_MOBILE: - case INTEL_FAM6_SKYLAKE_DESKTOP: + case INTEL_FAM6_SKYLAKE: case INTEL_FAM6_KABYLAKE_MOBILE: - case INTEL_FAM6_KABYLAKE_DESKTOP: + case INTEL_FAM6_KABYLAKE: return INTEL_FAM6_SKYLAKE_MOBILE; case INTEL_FAM6_ICELAKE_MOBILE: From af239c44e3f976762e9bc052f0d5796b90ea530b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 27 Aug 2019 21:48:22 +0200 Subject: [PATCH 17/25] x86/intel: Aggregate big core mobile naming Currently big core mobile chips have either: - _L - _ULT - _MOBILE Make it uniformly: _L. for i in `git grep -l "\(INTEL_FAM6_\|VULNWL_INTEL\|INTEL_CPU_FAM6\).*_\(MOBILE\|ULT\)"` do sed -i -e 's/\(\(INTEL_FAM6_\|VULNWL_INTEL\|INTEL_CPU_FAM6\).*\)_\(MOBILE\|ULT\)/\1_L/g' ${i} done Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Tony Luck Cc: x86@kernel.org Cc: Dave Hansen Cc: Borislav Petkov Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/20190827195122.568978530@infradead.org --- arch/x86/events/intel/core.c | 20 +++++----- arch/x86/events/intel/cstate.c | 18 ++++----- arch/x86/events/intel/rapl.c | 10 ++--- arch/x86/events/intel/uncore.c | 8 ++-- arch/x86/events/msr.c | 8 ++-- arch/x86/include/asm/intel-family.h | 10 ++--- arch/x86/kernel/apic/apic.c | 6 +-- arch/x86/kernel/cpu/bugs.c | 6 +-- arch/x86/kernel/cpu/intel.c | 6 +-- drivers/acpi/x86/utils.c | 4 +- drivers/cpufreq/intel_pstate.c | 4 +- drivers/idle/intel_idle.c | 6 +-- drivers/platform/x86/intel_pmc_core.c | 8 ++-- drivers/platform/x86/intel_pmc_core_pltdrv.c | 8 ++-- drivers/powercap/intel_rapl_common.c | 10 ++--- tools/power/x86/turbostat/turbostat.c | 40 ++++++++++---------- 16 files changed, 86 insertions(+), 86 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 76bff3a33725..22ef9ccaf45c 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3965,7 +3965,7 @@ static __init void intel_clovertown_quirk(void) static const struct x86_cpu_desc isolation_ucodes[] = { INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f), - INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT, 1, 0x0000001e), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e), INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E, 1, 0x00000015), INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037), INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a), @@ -3978,13 +3978,13 @@ static const struct x86_cpu_desc isolation_ucodes[] = { INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE, 3, 0x0000007c), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c), INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 9, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 10, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 11, 0x0000004e), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 12, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e), INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e), INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e), INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e), @@ -4859,7 +4859,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_HASWELL: case INTEL_FAM6_HASWELL_X: - case INTEL_FAM6_HASWELL_ULT: + case INTEL_FAM6_HASWELL_L: case INTEL_FAM6_HASWELL_GT3E: x86_add_quirk(intel_ht_bug); x86_add_quirk(intel_pebs_isolation_quirk); @@ -4955,9 +4955,9 @@ __init int intel_pmu_init(void) case INTEL_FAM6_SKYLAKE_X: pmem = true; /* fall through */ - case INTEL_FAM6_SKYLAKE_MOBILE: + case INTEL_FAM6_SKYLAKE_L: case INTEL_FAM6_SKYLAKE: - case INTEL_FAM6_KABYLAKE_MOBILE: + case INTEL_FAM6_KABYLAKE_L: case INTEL_FAM6_KABYLAKE: x86_add_quirk(intel_pebs_isolation_quirk); x86_pmu.late_ack = true; @@ -5005,7 +5005,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_ICELAKE_XEON_D: pmem = true; /* fall through */ - case INTEL_FAM6_ICELAKE_MOBILE: + case INTEL_FAM6_ICELAKE_L: case INTEL_FAM6_ICELAKE: x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 3854400ad8ff..9b014e813626 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -597,7 +597,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_L, hswult_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates), @@ -608,14 +608,14 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_L, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE, hswult_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_L, hswult_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE, hswult_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_L, cnl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), @@ -625,8 +625,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, snb_cstates), { }, }; MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index c1278e2764f4..70dcfe9312b0 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -722,7 +722,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, model_snbep), X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, model_hsx), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, model_hsw), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_L, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, model_hsw), @@ -730,16 +730,16 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, model_hsx), X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, model_knl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, model_knl), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_L, model_skl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE, model_skl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, model_hsx), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_L, model_skl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE, model_skl), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_L, model_skl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, model_hsw), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, model_skl), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_L, model_skl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE, model_skl), {}, }; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index d5e091586242..8428e28c9625 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1452,7 +1452,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL, hsw_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_L, hsw_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL, bdw_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init), @@ -1466,11 +1466,11 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE, skl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_L, skl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_L, skl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE, skl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_L, icl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE, icl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ATOM_TREMONT_X, snr_uncore_init), diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 08d320a39dff..12265c14e60d 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -61,7 +61,7 @@ static bool test_intel(int idx, void *data) case INTEL_FAM6_HASWELL: case INTEL_FAM6_HASWELL_X: - case INTEL_FAM6_HASWELL_ULT: + case INTEL_FAM6_HASWELL_L: case INTEL_FAM6_HASWELL_GT3E: case INTEL_FAM6_BROADWELL: @@ -84,12 +84,12 @@ static bool test_intel(int idx, void *data) return true; break; - case INTEL_FAM6_SKYLAKE_MOBILE: + case INTEL_FAM6_SKYLAKE_L: case INTEL_FAM6_SKYLAKE: case INTEL_FAM6_SKYLAKE_X: - case INTEL_FAM6_KABYLAKE_MOBILE: + case INTEL_FAM6_KABYLAKE_L: case INTEL_FAM6_KABYLAKE: - case INTEL_FAM6_ICELAKE_MOBILE: + case INTEL_FAM6_ICELAKE_L: if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF) return true; break; diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 800dd17e61c4..25b71d4e9224 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -51,7 +51,7 @@ #define INTEL_FAM6_HASWELL 0x3C #define INTEL_FAM6_HASWELL_X 0x3F -#define INTEL_FAM6_HASWELL_ULT 0x45 +#define INTEL_FAM6_HASWELL_L 0x45 #define INTEL_FAM6_HASWELL_GT3E 0x46 #define INTEL_FAM6_BROADWELL 0x3D @@ -59,18 +59,18 @@ #define INTEL_FAM6_BROADWELL_X 0x4F #define INTEL_FAM6_BROADWELL_XEON_D 0x56 -#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E +#define INTEL_FAM6_SKYLAKE_L 0x4E #define INTEL_FAM6_SKYLAKE 0x5E #define INTEL_FAM6_SKYLAKE_X 0x55 -#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E +#define INTEL_FAM6_KABYLAKE_L 0x8E #define INTEL_FAM6_KABYLAKE 0x9E -#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 +#define INTEL_FAM6_CANNONLAKE_L 0x66 #define INTEL_FAM6_ICELAKE_X 0x6A #define INTEL_FAM6_ICELAKE_XEON_D 0x6C #define INTEL_FAM6_ICELAKE 0x7D -#define INTEL_FAM6_ICELAKE_MOBILE 0x7E +#define INTEL_FAM6_ICELAKE_L 0x7E #define INTEL_FAM6_ICELAKE_NNPI 0x9D /* "Small Core" Processors (Atom) */ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 0527465e1c98..adf001d30b47 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -594,16 +594,16 @@ static const struct x86_cpu_id deadline_match[] = { DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL, 0x22), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_L, 0x20), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_GT3E, 0x17), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL, 0x25), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_GT3E, 0x17), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_MOBILE, 0xb2), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_L, 0xb2), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE, 0xb2), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_MOBILE, 0x52), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_L, 0x52), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE, 0x52), {}, diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 6c611abd6cdd..f435780fe45e 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1185,13 +1185,13 @@ static void override_cache_bits(struct cpuinfo_x86 *c) case INTEL_FAM6_SANDYBRIDGE: case INTEL_FAM6_IVYBRIDGE: case INTEL_FAM6_HASWELL: - case INTEL_FAM6_HASWELL_ULT: + case INTEL_FAM6_HASWELL_L: case INTEL_FAM6_HASWELL_GT3E: case INTEL_FAM6_BROADWELL: case INTEL_FAM6_BROADWELL_GT3E: - case INTEL_FAM6_SKYLAKE_MOBILE: + case INTEL_FAM6_SKYLAKE_L: case INTEL_FAM6_SKYLAKE: - case INTEL_FAM6_KABYLAKE_MOBILE: + case INTEL_FAM6_KABYLAKE_L: case INTEL_FAM6_KABYLAKE: if (c->x86_cache_bits < 44) c->x86_cache_bits = 44; diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index ca62563c1bb3..bafa2735f541 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -145,8 +145,8 @@ static const struct sku_microcode spectre_bad_microcodes[] = { { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 }, { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 }, { INTEL_FAM6_KABYLAKE, 0x09, 0x80 }, - { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 }, - { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, + { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 }, + { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 }, { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, { INTEL_FAM6_BROADWELL, 0x04, 0x28 }, @@ -154,7 +154,7 @@ static const struct sku_microcode spectre_bad_microcodes[] = { { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 }, { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, - { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 }, + { INTEL_FAM6_HASWELL_L, 0x01, 0x21 }, { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 }, { INTEL_FAM6_HASWELL, 0x03, 0x23 }, { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index ba277cd5c7fa..697a6b12d6b9 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -69,11 +69,11 @@ static const struct always_present_id always_present_ids[] = { * after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed * *and* _STA has been called at least 3 times since. */ - ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), { + ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_L), { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"), }), - ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), { + ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_L), { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"), }), diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 17cb9af25328..6e393aabdc13 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1875,11 +1875,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(INTEL_FAM6_BROADWELL, core_funcs), ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs), ICPU(INTEL_FAM6_HASWELL_X, core_funcs), - ICPU(INTEL_FAM6_HASWELL_ULT, core_funcs), + ICPU(INTEL_FAM6_HASWELL_L, core_funcs), ICPU(INTEL_FAM6_HASWELL_GT3E, core_funcs), ICPU(INTEL_FAM6_BROADWELL_GT3E, core_funcs), ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs), - ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_funcs), + ICPU(INTEL_FAM6_SKYLAKE_L, core_funcs), ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), ICPU(INTEL_FAM6_SKYLAKE, core_funcs), ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index f45898965fe1..d0e4f16b8f06 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1074,16 +1074,16 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt), INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw), INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw), - INTEL_CPU_FAM6(HASWELL_ULT, idle_cpu_hsw), + INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw), INTEL_CPU_FAM6(HASWELL_GT3E, idle_cpu_hsw), INTEL_CPU_FAM6(ATOM_SILVERMONT_X, idle_cpu_avn), INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_GT3E, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_XEON_D, idle_cpu_bdw), - INTEL_CPU_FAM6(SKYLAKE_MOBILE, idle_cpu_skl), + INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl), INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl), - INTEL_CPU_FAM6(KABYLAKE_MOBILE, idle_cpu_skl), + INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl), INTEL_CPU_FAM6(KABYLAKE, idle_cpu_skl), INTEL_CPU_FAM6(SKYLAKE_X, idle_cpu_skx), INTEL_CPU_FAM6(XEON_PHI_KNL, idle_cpu_knl), diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 3a82df45bced..fd1bfd5d2689 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -806,12 +806,12 @@ static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) #endif /* CONFIG_DEBUG_FS */ static const struct x86_cpu_id intel_pmc_core_ids[] = { - INTEL_CPU_FAM6(SKYLAKE_MOBILE, spt_reg_map), + INTEL_CPU_FAM6(SKYLAKE_L, spt_reg_map), INTEL_CPU_FAM6(SKYLAKE, spt_reg_map), - INTEL_CPU_FAM6(KABYLAKE_MOBILE, spt_reg_map), + INTEL_CPU_FAM6(KABYLAKE_L, spt_reg_map), INTEL_CPU_FAM6(KABYLAKE, spt_reg_map), - INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map), - INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map), + INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map), + INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map), INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), {} }; diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c index 52f5bcb88011..5977e0d66624 100644 --- a/drivers/platform/x86/intel_pmc_core_pltdrv.c +++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c @@ -30,12 +30,12 @@ static struct platform_device pmc_core_device = { * other list may grow, but this list should not. */ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = { - INTEL_CPU_FAM6(SKYLAKE_MOBILE, pmc_core_device), + INTEL_CPU_FAM6(SKYLAKE_L, pmc_core_device), INTEL_CPU_FAM6(SKYLAKE, pmc_core_device), - INTEL_CPU_FAM6(KABYLAKE_MOBILE, pmc_core_device), + INTEL_CPU_FAM6(KABYLAKE_L, pmc_core_device), INTEL_CPU_FAM6(KABYLAKE, pmc_core_device), - INTEL_CPU_FAM6(CANNONLAKE_MOBILE, pmc_core_device), - INTEL_CPU_FAM6(ICELAKE_MOBILE, pmc_core_device), + INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device), + INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 7c9eb78d3471..ac52a6ec4931 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -958,7 +958,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(IVYBRIDGE_X, rapl_defaults_core), INTEL_CPU_FAM6(HASWELL, rapl_defaults_core), - INTEL_CPU_FAM6(HASWELL_ULT, rapl_defaults_core), + INTEL_CPU_FAM6(HASWELL_L, rapl_defaults_core), INTEL_CPU_FAM6(HASWELL_GT3E, rapl_defaults_core), INTEL_CPU_FAM6(HASWELL_X, rapl_defaults_hsw_server), @@ -968,12 +968,12 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(BROADWELL_X, rapl_defaults_hsw_server), INTEL_CPU_FAM6(SKYLAKE, rapl_defaults_core), - INTEL_CPU_FAM6(SKYLAKE_MOBILE, rapl_defaults_core), + INTEL_CPU_FAM6(SKYLAKE_L, rapl_defaults_core), INTEL_CPU_FAM6(SKYLAKE_X, rapl_defaults_hsw_server), - INTEL_CPU_FAM6(KABYLAKE_MOBILE, rapl_defaults_core), + INTEL_CPU_FAM6(KABYLAKE_L, rapl_defaults_core), INTEL_CPU_FAM6(KABYLAKE, rapl_defaults_core), - INTEL_CPU_FAM6(CANNONLAKE_MOBILE, rapl_defaults_core), - INTEL_CPU_FAM6(ICELAKE_MOBILE, rapl_defaults_core), + INTEL_CPU_FAM6(CANNONLAKE_L, rapl_defaults_core), + INTEL_CPU_FAM6(ICELAKE_L, rapl_defaults_core), INTEL_CPU_FAM6(ICELAKE, rapl_defaults_core), INTEL_CPU_FAM6(ICELAKE_NNPI, rapl_defaults_core), INTEL_CPU_FAM6(ICELAKE_X, rapl_defaults_hsw_server), diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 8083a354ae7f..bb1bef6bf0b9 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -3213,8 +3213,8 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ case INTEL_FAM6_BROADWELL_X: /* BDX */ - case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ - case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ + case INTEL_FAM6_SKYLAKE_L: /* SKL */ + case INTEL_FAM6_CANNONLAKE_L: /* CNL */ pkg_cstate_limits = hsw_pkg_cstate_limits; has_misc_feature_control = 1; break; @@ -3409,8 +3409,8 @@ int has_config_tdp(unsigned int family, unsigned int model) case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ case INTEL_FAM6_BROADWELL_X: /* BDX */ - case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ - case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ + case INTEL_FAM6_SKYLAKE_L: /* SKL */ + case INTEL_FAM6_CANNONLAKE_L: /* CNL */ case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */ @@ -3863,8 +3863,8 @@ void rapl_probe_intel(unsigned int family, unsigned int model) else BIC_PRESENT(BIC_PkgWatt); break; - case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ - case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ + case INTEL_FAM6_SKYLAKE_L: /* SKL */ + case INTEL_FAM6_CANNONLAKE_L: /* CNL */ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); @@ -4255,8 +4255,8 @@ int has_snb_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ case INTEL_FAM6_BROADWELL_X: /* BDX */ - case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ - case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ + case INTEL_FAM6_SKYLAKE_L: /* SKL */ + case INTEL_FAM6_CANNONLAKE_L: /* CNL */ case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: @@ -4286,8 +4286,8 @@ int has_hsw_msrs(unsigned int family, unsigned int model) switch (model) { case INTEL_FAM6_HASWELL: case INTEL_FAM6_BROADWELL: /* BDW */ - case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ - case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ + case INTEL_FAM6_SKYLAKE_L: /* SKL */ + case INTEL_FAM6_CANNONLAKE_L: /* CNL */ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: return 1; @@ -4309,8 +4309,8 @@ int has_skl_msrs(unsigned int family, unsigned int model) return 0; switch (model) { - case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ - case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ + case INTEL_FAM6_SKYLAKE_L: /* SKL */ + case INTEL_FAM6_CANNONLAKE_L: /* CNL */ return 1; } return 0; @@ -4345,7 +4345,7 @@ int is_cnl(unsigned int family, unsigned int model) return 0; switch (model) { - case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ + case INTEL_FAM6_CANNONLAKE_L: /* CNL */ return 1; } @@ -4568,21 +4568,21 @@ unsigned int intel_model_duplicates(unsigned int model) case INTEL_FAM6_XEON_PHI_KNM: return INTEL_FAM6_XEON_PHI_KNL; - case INTEL_FAM6_HASWELL_ULT: + case INTEL_FAM6_HASWELL_L: return INTEL_FAM6_HASWELL; case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ return INTEL_FAM6_BROADWELL_X; - case INTEL_FAM6_SKYLAKE_MOBILE: + case INTEL_FAM6_SKYLAKE_L: case INTEL_FAM6_SKYLAKE: - case INTEL_FAM6_KABYLAKE_MOBILE: + case INTEL_FAM6_KABYLAKE_L: case INTEL_FAM6_KABYLAKE: - return INTEL_FAM6_SKYLAKE_MOBILE; + return INTEL_FAM6_SKYLAKE_L; - case INTEL_FAM6_ICELAKE_MOBILE: - return INTEL_FAM6_CANNONLAKE_MOBILE; + case INTEL_FAM6_ICELAKE_L: + return INTEL_FAM6_CANNONLAKE_L; } return model; } @@ -4731,7 +4731,7 @@ void process_cpuid() if (crystal_hz == 0) switch(model) { - case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ + case INTEL_FAM6_SKYLAKE_L: /* SKL */ crystal_hz = 24000000; /* 24.0 MHz */ break; case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */ From 5e741407eab7c602ee5a2b06afb0070a02f4412f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 27 Aug 2019 21:48:23 +0200 Subject: [PATCH 18/25] x86/intel: Aggregate big core graphics naming Currently big core clients with extra graphics on have: - _G - _GT3E Make it uniformly: _G for i in `git grep -l "\(INTEL_FAM6_\|VULNWL_INTEL\|INTEL_CPU_FAM6\).*_GT3E"` do sed -i -e 's/\(\(INTEL_FAM6_\|VULNWL_INTEL\|INTEL_CPU_FAM6\).*\)_GT3E/\1_G/g' ${i} done Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Tony Luck Cc: x86@kernel.org Cc: Dave Hansen Cc: Thomas Gleixner Cc: Borislav Petkov Link: https://lkml.kernel.org/r/20190827195122.622802314@infradead.org --- arch/x86/events/intel/core.c | 8 ++++---- arch/x86/events/intel/cstate.c | 8 ++++---- arch/x86/events/intel/pt.c | 2 +- arch/x86/events/intel/rapl.c | 4 ++-- arch/x86/events/intel/uncore.c | 4 ++-- arch/x86/events/msr.c | 4 ++-- arch/x86/include/asm/intel-family.h | 4 ++-- arch/x86/kernel/apic/apic.c | 4 ++-- arch/x86/kernel/cpu/bugs.c | 4 ++-- arch/x86/kernel/cpu/intel.c | 4 ++-- drivers/cpufreq/intel_pstate.c | 4 ++-- drivers/idle/intel_idle.c | 4 ++-- drivers/powercap/intel_rapl_common.c | 4 ++-- tools/power/x86/turbostat/turbostat.c | 18 +++++++++--------- 14 files changed, 38 insertions(+), 38 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 22ef9ccaf45c..472b45c3eeb1 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3966,11 +3966,11 @@ static __init void intel_clovertown_quirk(void) static const struct x86_cpu_desc isolation_ucodes[] = { INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f), INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e), - INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E, 1, 0x00000015), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015), INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037), INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E, 1, 0x00000014), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 2, 0x00000010), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 3, 0x07000009), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 4, 0x0f000009), @@ -4860,7 +4860,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_HASWELL: case INTEL_FAM6_HASWELL_X: case INTEL_FAM6_HASWELL_L: - case INTEL_FAM6_HASWELL_GT3E: + case INTEL_FAM6_HASWELL_G: x86_add_quirk(intel_ht_bug); x86_add_quirk(intel_pebs_isolation_quirk); x86_pmu.late_ack = true; @@ -4892,7 +4892,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_BROADWELL: case INTEL_FAM6_BROADWELL_XEON_D: - case INTEL_FAM6_BROADWELL_GT3E: + case INTEL_FAM6_BROADWELL_G: case INTEL_FAM6_BROADWELL_X: x86_add_quirk(intel_pebs_isolation_quirk); x86_pmu.late_ack = true; diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 9b014e813626..03d7a4042bc5 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -593,9 +593,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_HASWELL, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_HASWELL, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_G, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_L, hswult_cstates), @@ -605,7 +605,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_G, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_L, snb_cstates), diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 8cb9626e6277..d0195d151938 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -206,7 +206,7 @@ static int __init pt_pmu_hw_init(void) switch (boot_cpu_data.x86_model) { case INTEL_FAM6_BROADWELL: case INTEL_FAM6_BROADWELL_XEON_D: - case INTEL_FAM6_BROADWELL_GT3E: + case INTEL_FAM6_BROADWELL_G: case INTEL_FAM6_BROADWELL_X: /* not setting BRANCH_EN will #GP, erratum BDM106 */ pt_pmu.branch_en_always_on = true; diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 70dcfe9312b0..82e2c0ea99e4 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -723,9 +723,9 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, model_hsx), X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_L, model_hsw), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, model_hsw), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_G, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL, model_hsw), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, model_hsw), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_G, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, model_hsx), X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, model_hsx), X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, model_knl), diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 8428e28c9625..5a2f237707b8 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1453,9 +1453,9 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL, hsw_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_L, hsw_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_G, hsw_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL, bdw_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_G, bdw_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init), diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 12265c14e60d..e11fbdb3a035 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -62,11 +62,11 @@ static bool test_intel(int idx, void *data) case INTEL_FAM6_HASWELL: case INTEL_FAM6_HASWELL_X: case INTEL_FAM6_HASWELL_L: - case INTEL_FAM6_HASWELL_GT3E: + case INTEL_FAM6_HASWELL_G: case INTEL_FAM6_BROADWELL: case INTEL_FAM6_BROADWELL_XEON_D: - case INTEL_FAM6_BROADWELL_GT3E: + case INTEL_FAM6_BROADWELL_G: case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_ATOM_SILVERMONT: diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 25b71d4e9224..0bc7f397b4a6 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -52,10 +52,10 @@ #define INTEL_FAM6_HASWELL 0x3C #define INTEL_FAM6_HASWELL_X 0x3F #define INTEL_FAM6_HASWELL_L 0x45 -#define INTEL_FAM6_HASWELL_GT3E 0x46 +#define INTEL_FAM6_HASWELL_G 0x46 #define INTEL_FAM6_BROADWELL 0x3D -#define INTEL_FAM6_BROADWELL_GT3E 0x47 +#define INTEL_FAM6_BROADWELL_G 0x47 #define INTEL_FAM6_BROADWELL_X 0x4F #define INTEL_FAM6_BROADWELL_XEON_D 0x56 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index adf001d30b47..c297e6d6b915 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -595,10 +595,10 @@ static const struct x86_cpu_id deadline_match[] = { DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL, 0x22), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_L, 0x20), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_GT3E, 0x17), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_G, 0x17), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL, 0x25), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_GT3E, 0x17), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_G, 0x17), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_L, 0xb2), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE, 0xb2), diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index f435780fe45e..0b569f10d4a0 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1186,9 +1186,9 @@ static void override_cache_bits(struct cpuinfo_x86 *c) case INTEL_FAM6_IVYBRIDGE: case INTEL_FAM6_HASWELL: case INTEL_FAM6_HASWELL_L: - case INTEL_FAM6_HASWELL_GT3E: + case INTEL_FAM6_HASWELL_G: case INTEL_FAM6_BROADWELL: - case INTEL_FAM6_BROADWELL_GT3E: + case INTEL_FAM6_BROADWELL_G: case INTEL_FAM6_SKYLAKE_L: case INTEL_FAM6_SKYLAKE: case INTEL_FAM6_KABYLAKE_L: diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index bafa2735f541..1d2c64bcf6ab 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -150,12 +150,12 @@ static const struct sku_microcode spectre_bad_microcodes[] = { { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, { INTEL_FAM6_BROADWELL, 0x04, 0x28 }, - { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, + { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b }, { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 }, { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, { INTEL_FAM6_HASWELL_L, 0x01, 0x21 }, - { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 }, + { INTEL_FAM6_HASWELL_G, 0x01, 0x18 }, { INTEL_FAM6_HASWELL, 0x03, 0x23 }, { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 6e393aabdc13..99b9c01f8c1a 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1876,8 +1876,8 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs), ICPU(INTEL_FAM6_HASWELL_X, core_funcs), ICPU(INTEL_FAM6_HASWELL_L, core_funcs), - ICPU(INTEL_FAM6_HASWELL_GT3E, core_funcs), - ICPU(INTEL_FAM6_BROADWELL_GT3E, core_funcs), + ICPU(INTEL_FAM6_HASWELL_G, core_funcs), + ICPU(INTEL_FAM6_BROADWELL_G, core_funcs), ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs), ICPU(INTEL_FAM6_SKYLAKE_L, core_funcs), ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index d0e4f16b8f06..c4081324a02b 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1075,10 +1075,10 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw), INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw), INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw), - INTEL_CPU_FAM6(HASWELL_GT3E, idle_cpu_hsw), + INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw), INTEL_CPU_FAM6(ATOM_SILVERMONT_X, idle_cpu_avn), INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw), - INTEL_CPU_FAM6(BROADWELL_GT3E, idle_cpu_bdw), + INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_XEON_D, idle_cpu_bdw), INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl), diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index ac52a6ec4931..07af068f6ab6 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -959,11 +959,11 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(HASWELL, rapl_defaults_core), INTEL_CPU_FAM6(HASWELL_L, rapl_defaults_core), - INTEL_CPU_FAM6(HASWELL_GT3E, rapl_defaults_core), + INTEL_CPU_FAM6(HASWELL_G, rapl_defaults_core), INTEL_CPU_FAM6(HASWELL_X, rapl_defaults_hsw_server), INTEL_CPU_FAM6(BROADWELL, rapl_defaults_core), - INTEL_CPU_FAM6(BROADWELL_GT3E, rapl_defaults_core), + INTEL_CPU_FAM6(BROADWELL_G, rapl_defaults_core), INTEL_CPU_FAM6(BROADWELL_XEON_D, rapl_defaults_core), INTEL_CPU_FAM6(BROADWELL_X, rapl_defaults_hsw_server), diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index bb1bef6bf0b9..271cf18efbab 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -3209,9 +3209,9 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) break; case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSX */ - case INTEL_FAM6_HASWELL_GT3E: /* HSW */ + case INTEL_FAM6_HASWELL_G: /* HSW */ case INTEL_FAM6_BROADWELL: /* BDW */ - case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ + case INTEL_FAM6_BROADWELL_G: /* BDW */ case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ @@ -3405,9 +3405,9 @@ int has_config_tdp(unsigned int family, unsigned int model) case INTEL_FAM6_IVYBRIDGE: /* IVB */ case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSX */ - case INTEL_FAM6_HASWELL_GT3E: /* HSW */ + case INTEL_FAM6_HASWELL_G: /* HSW */ case INTEL_FAM6_BROADWELL: /* BDW */ - case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ + case INTEL_FAM6_BROADWELL_G: /* BDW */ case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ @@ -3841,9 +3841,9 @@ void rapl_probe_intel(unsigned int family, unsigned int model) case INTEL_FAM6_SANDYBRIDGE: case INTEL_FAM6_IVYBRIDGE: case INTEL_FAM6_HASWELL: /* HSW */ - case INTEL_FAM6_HASWELL_GT3E: /* HSW */ + case INTEL_FAM6_HASWELL_G: /* HSW */ case INTEL_FAM6_BROADWELL: /* BDW */ - case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ + case INTEL_FAM6_BROADWELL_G: /* BDW */ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); @@ -4032,7 +4032,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model) switch (model) { case INTEL_FAM6_HASWELL: /* HSW */ - case INTEL_FAM6_HASWELL_GT3E: /* HSW */ + case INTEL_FAM6_HASWELL_G: /* HSW */ do_gfx_perf_limit_reasons = 1; case INTEL_FAM6_HASWELL_X: /* HSX */ do_core_perf_limit_reasons = 1; @@ -4251,9 +4251,9 @@ int has_snb_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSW */ - case INTEL_FAM6_HASWELL_GT3E: /* HSW */ + case INTEL_FAM6_HASWELL_G: /* HSW */ case INTEL_FAM6_BROADWELL: /* BDW */ - case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ + case INTEL_FAM6_BROADWELL_G: /* BDW */ case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ From 5ebb34edbefa8ea6a7e109179d5fc7b3529dbeba Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 27 Aug 2019 21:48:24 +0200 Subject: [PATCH 19/25] x86/intel: Aggregate microserver naming Currently big microservers have _XEON_D while small microservers have _X, Make it uniformly: _D. for i in `git grep -l "\(INTEL_FAM6_\|VULNWL_INTEL\|INTEL_CPU_FAM6\).*_\(X\|XEON_D\)"` do sed -i -e 's/\(\(INTEL_FAM6_\|VULNWL_INTEL\|INTEL_CPU_FAM6\).*ATOM.*\)_X/\1_D/g' \ -e 's/\(\(INTEL_FAM6_\|VULNWL_INTEL\|INTEL_CPU_FAM6\).*\)_XEON_D/\1_D/g' ${i} done Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Tony Luck Cc: x86@kernel.org Cc: Dave Hansen Cc: Thomas Gleixner Cc: Borislav Petkov Link: https://lkml.kernel.org/r/20190827195122.677152989@infradead.org --- arch/x86/events/intel/core.c | 20 ++++++++++---------- arch/x86/events/intel/cstate.c | 12 ++++++------ arch/x86/events/intel/pt.c | 2 +- arch/x86/events/intel/rapl.c | 4 ++-- arch/x86/events/intel/uncore.c | 4 ++-- arch/x86/events/msr.c | 6 +++--- arch/x86/include/asm/intel-family.h | 10 +++++----- arch/x86/kernel/apic/apic.c | 2 +- arch/x86/kernel/cpu/common.c | 4 ++-- arch/x86/kernel/cpu/intel.c | 4 ++-- arch/x86/kernel/cpu/mce/intel.c | 2 +- arch/x86/kernel/tsc.c | 2 +- drivers/cpufreq/intel_pstate.c | 6 +++--- drivers/edac/i10nm_base.c | 4 ++-- drivers/edac/pnd2_edac.c | 2 +- drivers/edac/sb_edac.c | 2 +- drivers/idle/intel_idle.c | 8 ++++---- drivers/powercap/intel_rapl_common.c | 8 ++++---- tools/power/x86/turbostat/turbostat.c | 22 +++++++++++----------- 19 files changed, 62 insertions(+), 62 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 472b45c3eeb1..dce329c5725d 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3971,10 +3971,10 @@ static const struct x86_cpu_desc isolation_ucodes[] = { INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 2, 0x00000010), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 3, 0x07000009), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 4, 0x0f000009), - INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 5, 0x0e000002), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002), INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), @@ -4146,7 +4146,7 @@ static const struct x86_cpu_desc counter_freezing_ucodes[] = { INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 2, 0x0000000e), INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 9, 0x0000002e), INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 10, 0x00000008), - INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_X, 1, 0x00000028), + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_D, 1, 0x00000028), INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 1, 0x00000028), INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 8, 0x00000006), {} @@ -4643,7 +4643,7 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_ATOM_SILVERMONT: - case INTEL_FAM6_ATOM_SILVERMONT_X: + case INTEL_FAM6_ATOM_SILVERMONT_D: case INTEL_FAM6_ATOM_SILVERMONT_MID: case INTEL_FAM6_ATOM_AIRMONT: case INTEL_FAM6_ATOM_AIRMONT_MID: @@ -4665,7 +4665,7 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_ATOM_GOLDMONT: - case INTEL_FAM6_ATOM_GOLDMONT_X: + case INTEL_FAM6_ATOM_GOLDMONT_D: x86_add_quirk(intel_counter_freezing_quirk); memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -4721,7 +4721,7 @@ __init int intel_pmu_init(void) name = "goldmont_plus"; break; - case INTEL_FAM6_ATOM_TREMONT_X: + case INTEL_FAM6_ATOM_TREMONT_D: x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -4891,7 +4891,7 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_BROADWELL: - case INTEL_FAM6_BROADWELL_XEON_D: + case INTEL_FAM6_BROADWELL_D: case INTEL_FAM6_BROADWELL_G: case INTEL_FAM6_BROADWELL_X: x86_add_quirk(intel_pebs_isolation_quirk); @@ -5002,7 +5002,7 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_ICELAKE_X: - case INTEL_FAM6_ICELAKE_XEON_D: + case INTEL_FAM6_ICELAKE_D: pmem = true; /* fall through */ case INTEL_FAM6_ICELAKE_L: diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 03d7a4042bc5..104c093b282b 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -600,13 +600,13 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_L, hswult_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_D, slm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_G, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_D, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_G, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_L, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE, snb_cstates), @@ -621,7 +621,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_D, glm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates), diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index d0195d151938..26af1f19f153 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -205,7 +205,7 @@ static int __init pt_pmu_hw_init(void) /* model-specific quirks */ switch (boot_cpu_data.x86_model) { case INTEL_FAM6_BROADWELL: - case INTEL_FAM6_BROADWELL_XEON_D: + case INTEL_FAM6_BROADWELL_D: case INTEL_FAM6_BROADWELL_G: case INTEL_FAM6_BROADWELL_X: /* not setting BRANCH_EN will #GP, erratum BDM106 */ diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 82e2c0ea99e4..22f5843ef406 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -727,7 +727,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_G, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, model_hsx), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, model_hsx), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_D, model_hsx), X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, model_knl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, model_knl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_L, model_skl), @@ -737,7 +737,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE, model_skl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_L, model_skl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, model_hsw), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, model_hsw), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_D, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, model_hsw), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_L, model_skl), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE, model_skl), diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 5a2f237707b8..6fc2e06ab4c6 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1462,7 +1462,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_D, bdx_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE, skl_uncore_init), @@ -1473,7 +1473,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_L, icl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE, icl_uncore_init), - X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ATOM_TREMONT_X, snr_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ATOM_TREMONT_D, snr_uncore_init), {}, }; diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index e11fbdb3a035..ab79d3100241 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -65,16 +65,16 @@ static bool test_intel(int idx, void *data) case INTEL_FAM6_HASWELL_G: case INTEL_FAM6_BROADWELL: - case INTEL_FAM6_BROADWELL_XEON_D: + case INTEL_FAM6_BROADWELL_D: case INTEL_FAM6_BROADWELL_G: case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_ATOM_SILVERMONT: - case INTEL_FAM6_ATOM_SILVERMONT_X: + case INTEL_FAM6_ATOM_SILVERMONT_D: case INTEL_FAM6_ATOM_AIRMONT: case INTEL_FAM6_ATOM_GOLDMONT: - case INTEL_FAM6_ATOM_GOLDMONT_X: + case INTEL_FAM6_ATOM_GOLDMONT_D: case INTEL_FAM6_ATOM_GOLDMONT_PLUS: diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 0bc7f397b4a6..76dc9abb53e9 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -57,7 +57,7 @@ #define INTEL_FAM6_BROADWELL 0x3D #define INTEL_FAM6_BROADWELL_G 0x47 #define INTEL_FAM6_BROADWELL_X 0x4F -#define INTEL_FAM6_BROADWELL_XEON_D 0x56 +#define INTEL_FAM6_BROADWELL_D 0x56 #define INTEL_FAM6_SKYLAKE_L 0x4E #define INTEL_FAM6_SKYLAKE 0x5E @@ -68,7 +68,7 @@ #define INTEL_FAM6_CANNONLAKE_L 0x66 #define INTEL_FAM6_ICELAKE_X 0x6A -#define INTEL_FAM6_ICELAKE_XEON_D 0x6C +#define INTEL_FAM6_ICELAKE_D 0x6C #define INTEL_FAM6_ICELAKE 0x7D #define INTEL_FAM6_ICELAKE_L 0x7E #define INTEL_FAM6_ICELAKE_NNPI 0x9D @@ -83,17 +83,17 @@ #define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35 /* Cloverview */ #define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* Bay Trail, Valleyview */ -#define INTEL_FAM6_ATOM_SILVERMONT_X 0x4D /* Avaton, Rangely */ +#define INTEL_FAM6_ATOM_SILVERMONT_D 0x4D /* Avaton, Rangely */ #define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Merriefield */ #define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */ #define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */ #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ -#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ +#define INTEL_FAM6_ATOM_GOLDMONT_D 0x5F /* Denverton */ #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ -#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */ +#define INTEL_FAM6_ATOM_TREMONT_D 0x86 /* Jacobsville */ /* Xeon Phi */ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index c297e6d6b915..1026138eb830 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -590,7 +590,7 @@ static u32 skx_deadline_rev(void) static const struct x86_cpu_id deadline_match[] = { DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020), - DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev), + DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_D, bdx_deadline_rev), DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL, 0x22), diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f125bf7ecb6f..b6a9e27755d7 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1050,7 +1050,7 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION), VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), - VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), + VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), @@ -1061,7 +1061,7 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS), VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS), - VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS), + VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS), VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS), /* diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 1d2c64bcf6ab..f4b795ab1a9b 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -151,8 +151,8 @@ static const struct sku_microcode spectre_bad_microcodes[] = { { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, { INTEL_FAM6_BROADWELL, 0x04, 0x28 }, { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b }, - { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, - { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 }, + { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 }, + { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 }, { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, { INTEL_FAM6_HASWELL_L, 0x01, 0x21 }, { INTEL_FAM6_HASWELL_G, 0x01, 0x18 }, diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index e43eb6732630..88cd9598fa57 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -479,7 +479,7 @@ static void intel_ppin_init(struct cpuinfo_x86 *c) switch (c->x86_model) { case INTEL_FAM6_IVYBRIDGE_X: case INTEL_FAM6_HASWELL_X: - case INTEL_FAM6_BROADWELL_XEON_D: + case INTEL_FAM6_BROADWELL_D: case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_XEON_PHI_KNL: diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 57d87f79558f..c59454c382fd 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -638,7 +638,7 @@ unsigned long native_calibrate_tsc(void) * clock. */ if (crystal_khz == 0 && - boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_X) + boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_D) crystal_khz = 25000; /* diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 99b9c01f8c1a..886324041add 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1882,7 +1882,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(INTEL_FAM6_SKYLAKE_L, core_funcs), ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), ICPU(INTEL_FAM6_SKYLAKE, core_funcs), - ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), + ICPU(INTEL_FAM6_BROADWELL_D, core_funcs), ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs), ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs), ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs), @@ -1893,7 +1893,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { - ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), + ICPU(INTEL_FAM6_BROADWELL_D, core_funcs), ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), {} @@ -2624,7 +2624,7 @@ static inline void intel_pstate_request_control_from_smm(void) {} static const struct x86_cpu_id hwp_support_ids[] __initconst = { ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), - ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL), + ICPU_HWP(INTEL_FAM6_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), ICPU_HWP(X86_MODEL_ANY, 0), {} }; diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index 83392f2841de..c370d5457e6b 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -123,9 +123,9 @@ static int i10nm_get_all_munits(void) } static const struct x86_cpu_id i10nm_cpuids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_TREMONT_X, 0, 0 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_TREMONT_D, 0, 0 }, { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_X, 0, 0 }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_XEON_D, 0, 0 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_D, 0, 0 }, { } }; MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids); diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index ca25f8fe57ef..a6846be0d2eb 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -1533,7 +1533,7 @@ static struct dunit_ops dnv_ops = { static const struct x86_cpu_id pnd2_cpuids[] = { { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X, 0, (kernel_ulong_t)&dnv_ops }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_D, 0, (kernel_ulong_t)&dnv_ops }, { } }; MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids); diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 37746b045e18..f743502ca9b7 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -3429,7 +3429,7 @@ static const struct x86_cpu_id sbridge_cpuids[] = { INTEL_CPU_FAM6(IVYBRIDGE_X, pci_dev_descr_ibridge_table), INTEL_CPU_FAM6(HASWELL_X, pci_dev_descr_haswell_table), INTEL_CPU_FAM6(BROADWELL_X, pci_dev_descr_broadwell_table), - INTEL_CPU_FAM6(BROADWELL_XEON_D, pci_dev_descr_broadwell_table), + INTEL_CPU_FAM6(BROADWELL_D, pci_dev_descr_broadwell_table), INTEL_CPU_FAM6(XEON_PHI_KNL, pci_dev_descr_knl_table), INTEL_CPU_FAM6(XEON_PHI_KNM, pci_dev_descr_knl_table), { } diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index c4081324a02b..347b08b56042 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1076,11 +1076,11 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw), INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw), INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw), - INTEL_CPU_FAM6(ATOM_SILVERMONT_X, idle_cpu_avn), + INTEL_CPU_FAM6(ATOM_SILVERMONT_D, idle_cpu_avn), INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw), - INTEL_CPU_FAM6(BROADWELL_XEON_D, idle_cpu_bdw), + INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdw), INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl), INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl), INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl), @@ -1090,8 +1090,8 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { INTEL_CPU_FAM6(XEON_PHI_KNM, idle_cpu_knl), INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt), INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt), - INTEL_CPU_FAM6(ATOM_GOLDMONT_X, idle_cpu_dnv), - INTEL_CPU_FAM6(ATOM_TREMONT_X, idle_cpu_dnv), + INTEL_CPU_FAM6(ATOM_GOLDMONT_D, idle_cpu_dnv), + INTEL_CPU_FAM6(ATOM_TREMONT_D, idle_cpu_dnv), {} }; diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 07af068f6ab6..94ddd7d659c8 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -964,7 +964,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(BROADWELL, rapl_defaults_core), INTEL_CPU_FAM6(BROADWELL_G, rapl_defaults_core), - INTEL_CPU_FAM6(BROADWELL_XEON_D, rapl_defaults_core), + INTEL_CPU_FAM6(BROADWELL_D, rapl_defaults_core), INTEL_CPU_FAM6(BROADWELL_X, rapl_defaults_hsw_server), INTEL_CPU_FAM6(SKYLAKE, rapl_defaults_core), @@ -977,7 +977,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(ICELAKE, rapl_defaults_core), INTEL_CPU_FAM6(ICELAKE_NNPI, rapl_defaults_core), INTEL_CPU_FAM6(ICELAKE_X, rapl_defaults_hsw_server), - INTEL_CPU_FAM6(ICELAKE_XEON_D, rapl_defaults_hsw_server), + INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server), INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt), INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht), @@ -985,8 +985,8 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(ATOM_AIRMONT_MID, rapl_defaults_ann), INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core), - INTEL_CPU_FAM6(ATOM_GOLDMONT_X, rapl_defaults_core), - INTEL_CPU_FAM6(ATOM_TREMONT_X, rapl_defaults_core), + INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core), + INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core), INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server), INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server), diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 271cf18efbab..6eef0cee6d75 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -2150,7 +2150,7 @@ int has_turbo_ratio_group_limits(int family, int model) switch (model) { case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_SKYLAKE_X: - case INTEL_FAM6_ATOM_GOLDMONT_X: + case INTEL_FAM6_ATOM_GOLDMONT_D: return 1; } return 0; @@ -3224,7 +3224,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) break; case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ no_MSR_MISC_PWR_MGMT = 1; - case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */ + case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ pkg_cstate_limits = slv_pkg_cstate_limits; break; case INTEL_FAM6_ATOM_AIRMONT: /* AMT */ @@ -3236,7 +3236,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) break; case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: - case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */ + case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ pkg_cstate_limits = glm_pkg_cstate_limits; break; default: @@ -3279,7 +3279,7 @@ int is_dnv(unsigned int family, unsigned int model) return 0; switch (model) { - case INTEL_FAM6_ATOM_GOLDMONT_X: + case INTEL_FAM6_ATOM_GOLDMONT_D: return 1; } return 0; @@ -3792,7 +3792,7 @@ double get_tdp_intel(unsigned int model) switch (model) { case INTEL_FAM6_ATOM_SILVERMONT: - case INTEL_FAM6_ATOM_SILVERMONT_X: + case INTEL_FAM6_ATOM_SILVERMONT_D: return 30.0; default: return 135.0; @@ -3911,7 +3911,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model) } break; case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ - case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */ + case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ do_rapl = RAPL_PKG | RAPL_CORES; if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); @@ -3921,7 +3921,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model) BIC_PRESENT(BIC_CorWatt); } break; - case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */ + case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); @@ -4260,7 +4260,7 @@ int has_snb_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: - case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */ + case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ return 1; } return 0; @@ -4322,7 +4322,7 @@ int is_slm(unsigned int family, unsigned int model) return 0; switch (model) { case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ - case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */ + case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ return 1; } return 0; @@ -4572,7 +4572,7 @@ unsigned int intel_model_duplicates(unsigned int model) return INTEL_FAM6_HASWELL; case INTEL_FAM6_BROADWELL_X: - case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ + case INTEL_FAM6_BROADWELL_D: /* BDX-DE */ return INTEL_FAM6_BROADWELL_X; case INTEL_FAM6_SKYLAKE_L: @@ -4734,7 +4734,7 @@ void process_cpuid() case INTEL_FAM6_SKYLAKE_L: /* SKL */ crystal_hz = 24000000; /* 24.0 MHz */ break; - case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */ + case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ crystal_hz = 25000000; /* 25.0 MHz */ break; case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ From a3d8c0d13bdedf84fe74259f6949b2cdffd80e55 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 27 Aug 2019 21:48:25 +0200 Subject: [PATCH 20/25] x86/intel: Add common OPTDIFFs Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Tony Luck Cc: x86@kernel.org Cc: Dave Hansen Cc: Thomas Gleixner Cc: Borislav Petkov Link: https://lkml.kernel.org/r/20190827195122.731530141@infradead.org --- arch/x86/include/asm/intel-family.h | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 76dc9abb53e9..5c05b2d389c3 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -5,9 +5,6 @@ /* * "Big Core" Processors (Branded as Core, Xeon, etc...) * - * The "_X" parts are generally the EP and EX Xeons, or the - * "Extreme" ones, like Broadwell-E, or Atom microserver. - * * While adding a new CPUID for a new microarchitecture, add a new * group to keep logically sorted out in chronological order. Within * that group keep the CPUID for the variants sorted by model number. @@ -21,9 +18,19 @@ * MICROARCH Is the code name for the micro-architecture for this core. * N.B. Not the platform name. * OPTDIFF If needed, a short string to differentiate by market segment. - * Exact strings here will vary over time. _DESKTOP, _MOBILE, and - * _X (short for Xeon server) should be used when they are - * appropriate. + * + * Common OPTDIFFs: + * + * - regular client parts + * _L - regular mobile parts + * _G - parts with extra graphics on + * _X - regular server parts + * _D - micro server parts + * + * Historical OPTDIFFs: + * + * _EP - 2 socket server parts + * _EX - 4+ socket server parts * * The #define line may optionally include a comment including platform names. */ @@ -91,6 +98,8 @@ #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ #define INTEL_FAM6_ATOM_GOLDMONT_D 0x5F /* Denverton */ + +/* Note: the micro-architecture is "Goldmont Plus" */ #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ #define INTEL_FAM6_ATOM_TREMONT_D 0x86 /* Jacobsville */ From 32b1cbe380417f2ed80f758791179de6b05795ab Mon Sep 17 00:00:00 2001 From: Marco Ammon Date: Mon, 2 Sep 2019 14:02:59 +0200 Subject: [PATCH 21/25] x86: Correct misc typos Correct spelling typos in comments in different files under arch/x86/. [ bp: Merge into a single patch, massage. ] Signed-off-by: Marco Ammon Signed-off-by: Borislav Petkov Cc: Daniel Bristot de Oliveira Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Juergen Gross Cc: Masami Hiramatsu Cc: Nadav Amit Cc: Peter Zijlstra Cc: Pu Wen Cc: Rick Edgecombe Cc: "Steven Rostedt (VMware)" Cc: Thomas Gleixner Cc: trivial@kernel.org Cc: x86-ml Link: https://lkml.kernel.org/r/20190902102436.27396-1-marco.ammon@fau.de --- arch/x86/include/asm/text-patching.h | 4 ++-- arch/x86/kernel/alternative.c | 6 +++--- arch/x86/kernel/kprobes/opt.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index 70c09967a999..5e8319bb207a 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -45,8 +45,8 @@ extern void text_poke_early(void *addr, const void *opcode, size_t len); * no thread can be preempted in the instructions being modified (no iret to an * invalid instruction possible) or if the instructions are changed from a * consistent state to another consistent state atomically. - * On the local CPU you need to be protected again NMI or MCE handlers seeing an - * inconsistent instruction while you patch. + * On the local CPU you need to be protected against NMI or MCE handlers seeing + * an inconsistent instruction while you patch. */ extern void *text_poke(void *addr, const void *opcode, size_t len); extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index ccd32013c47a..9d3a971ea364 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -713,7 +713,7 @@ void __init alternative_instructions(void) * Don't stop machine check exceptions while patching. * MCEs only happen when something got corrupted and in this * case we must do something about the corruption. - * Ignoring it is worse than a unlikely patching race. + * Ignoring it is worse than an unlikely patching race. * Also machine checks tend to be broadcast and if one CPU * goes into machine check the others follow quickly, so we don't * expect a machine check to cause undue problems during to code @@ -753,8 +753,8 @@ void __init alternative_instructions(void) * When you use this code to patch more than one byte of an instruction * you need to make sure that other CPUs cannot execute this code in parallel. * Also no thread must be currently preempted in the middle of these - * instructions. And on the local CPU you need to be protected again NMI or MCE - * handlers seeing an inconsistent instruction while you patch. + * instructions. And on the local CPU you need to be protected against NMI or + * MCE handlers seeing an inconsistent instruction while you patch. */ void __init_or_module text_poke_early(void *addr, const void *opcode, size_t len) diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 9d4aedece363..b348dd506d58 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -403,7 +403,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, (u8 *)op->kp.addr + op->optinsn.size); len += RELATIVEJUMP_SIZE; - /* We have to use text_poke for instuction buffer because it is RO */ + /* We have to use text_poke() for instruction buffer because it is RO */ text_poke(slot, buf, len); ret = 0; out: From 6e1c32c5dbb4b90eea8f964c2869d0bde050dbe0 Mon Sep 17 00:00:00 2001 From: Gayatri Kammela Date: Thu, 5 Sep 2019 12:30:17 -0700 Subject: [PATCH 22/25] x86/cpu: Add Tiger Lake to Intel family Add the model numbers/CPUIDs of Tiger Lake mobile and desktop to the Intel family. Suggested-by: Tony Luck Signed-off-by: Gayatri Kammela Signed-off-by: Tony Luck Reviewed-by: Tony Luck Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rahul Tanwar Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/20190905193020.14707-2-tony.luck@intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/intel-family.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 5c05b2d389c3..7c2ef2e883d5 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -80,6 +80,9 @@ #define INTEL_FAM6_ICELAKE_L 0x7E #define INTEL_FAM6_ICELAKE_NNPI 0x9D +#define INTEL_FAM6_TIGERLAKE_L 0x8C +#define INTEL_FAM6_TIGERLAKE 0x8D + /* "Small Core" Processors (Atom) */ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ From 0f65605a8d744b3a205d0a2cd8f20707e31fc023 Mon Sep 17 00:00:00 2001 From: Gayatri Kammela Date: Thu, 5 Sep 2019 12:30:18 -0700 Subject: [PATCH 23/25] x86/cpu: Add Elkhart Lake to Intel family Add the model number/CPUID of atom based Elkhart Lake to the Intel family. Signed-off-by: Gayatri Kammela Signed-off-by: Tony Luck Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rahul Tanwar Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/20190905193020.14707-3-tony.luck@intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/intel-family.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 7c2ef2e883d5..55568afe798a 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -106,6 +106,7 @@ #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ #define INTEL_FAM6_ATOM_TREMONT_D 0x86 /* Jacobsville */ +#define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */ /* Xeon Phi */ From 855fa1f362cab2dc7574acb853b0963dd01d6b8d Mon Sep 17 00:00:00 2001 From: Rahul Tanwar Date: Thu, 5 Sep 2019 12:30:19 -0700 Subject: [PATCH 24/25] x86/cpu: Add new Airmont variant to Intel family Add new Airmont variant CPU model to Intel family. Signed-off-by: Rahul Tanwar Signed-off-by: Tony Luck Cc: Gayatri Kammela Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/20190905193020.14707-4-tony.luck@intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/intel-family.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 55568afe798a..f04622500da3 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -98,6 +98,7 @@ #define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */ #define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */ +#define INTEL_FAM6_ATOM_AIRMONT_NP 0x75 /* Lightning Mountain */ #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ #define INTEL_FAM6_ATOM_GOLDMONT_D 0x5F /* Denverton */ From 0cc5359d8fd45bc410906e009117e78e2b5b2322 Mon Sep 17 00:00:00 2001 From: Rahul Tanwar Date: Thu, 5 Sep 2019 12:30:20 -0700 Subject: [PATCH 25/25] x86/cpu: Update init data for new Airmont CPU model Update properties for newly added Airmont CPU variant. Signed-off-by: Rahul Tanwar Signed-off-by: Tony Luck Cc: Gayatri Kammela Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/20190905193020.14707-5-tony.luck@intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/common.c | 1 + arch/x86/kernel/cpu/intel.c | 1 + arch/x86/kernel/tsc_msr.c | 5 +++++ 3 files changed, 7 insertions(+) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b6a9e27755d7..030e52749a74 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1059,6 +1059,7 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL_INTEL(CORE_YONAH, NO_SSB), VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS), + VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS), VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS), VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS), diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index e2082ccccf13..c2fdc00df163 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -268,6 +268,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) case INTEL_FAM6_ATOM_SALTWELL_MID: case INTEL_FAM6_ATOM_SALTWELL_TABLET: case INTEL_FAM6_ATOM_SILVERMONT_MID: + case INTEL_FAM6_ATOM_AIRMONT_NP: set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); break; default: diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c index 067858fe4db8..e0cbe4f2af49 100644 --- a/arch/x86/kernel/tsc_msr.c +++ b/arch/x86/kernel/tsc_msr.c @@ -58,6 +58,10 @@ static const struct freq_desc freq_desc_ann = { 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 } }; +static const struct freq_desc freq_desc_lgm = { + 1, { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 } +}; + static const struct x86_cpu_id tsc_msr_cpu_ids[] = { INTEL_CPU_FAM6(ATOM_SALTWELL_MID, freq_desc_pnw), INTEL_CPU_FAM6(ATOM_SALTWELL_TABLET, freq_desc_clv), @@ -65,6 +69,7 @@ static const struct x86_cpu_id tsc_msr_cpu_ids[] = { INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, freq_desc_tng), INTEL_CPU_FAM6(ATOM_AIRMONT, freq_desc_cht), INTEL_CPU_FAM6(ATOM_AIRMONT_MID, freq_desc_ann), + INTEL_CPU_FAM6(ATOM_AIRMONT_NP, freq_desc_lgm), {} };