1
0
Fork 0

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "The biggest part is a series of reverts for the macro based GCC
  inlining workarounds. It caused regressions in distro build and other
  kernel tooling environments, and the GCC project was very receptive to
  fixing the underlying inliner weaknesses - so as time ran out we
  decided to do a reasonably straightforward revert of the patches. The
  plan is to rely on the 'asm inline' GCC 9 feature, which might be
  backported to GCC 8 and could thus become reasonably widely available
  on modern distros.

  Other than those reverts, there's misc fixes from all around the
  place.

  I wish our final x86 pull request for v4.20 was smaller..."

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  Revert "kbuild/Makefile: Prepare for using macros in inline assembly code to work around asm() related GCC inlining bugs"
  Revert "x86/objtool: Use asm macros to work around GCC inlining bugs"
  Revert "x86/refcount: Work around GCC inlining bug"
  Revert "x86/alternatives: Macrofy lock prefixes to work around GCC inlining bugs"
  Revert "x86/bug: Macrofy the BUG table section handling, to work around GCC inlining bugs"
  Revert "x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops"
  Revert "x86/extable: Macrofy inline assembly code to work around GCC inlining bugs"
  Revert "x86/cpufeature: Macrofy inline assembly code to work around GCC inlining bugs"
  Revert "x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs"
  x86/mtrr: Don't copy uninitialized gentry fields back to userspace
  x86/fsgsbase/64: Fix the base write helper functions
  x86/mm/cpa: Fix cpa_flush_array() TLB invalidation
  x86/vdso: Pass --eh-frame-hdr to the linker
  x86/mm: Fix decoy address handling vs 32-bit builds
  x86/intel_rdt: Ensure a CPU remains online for the region's pseudo-locking sequence
  x86/dump_pagetables: Fix LDT remap address marker
  x86/mm: Fix guard hole handling
hifive-unleashed-5.1
Linus Torvalds 2018-12-21 09:22:24 -08:00
commit 70ad6368e8
27 changed files with 385 additions and 392 deletions

View File

@ -1076,7 +1076,7 @@ scripts: scripts_basic scripts_dtc asm-generic gcc-plugins $(autoksyms_h)
# version.h and scripts_basic is processed / created.
# Listed in dependency order
PHONY += prepare archprepare macroprepare prepare0 prepare1 prepare2 prepare3
PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
# prepare3 is used to check if we are building in a separate output directory,
# and if so do:
@ -1099,9 +1099,7 @@ prepare2: prepare3 outputmakefile asm-generic
prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h
$(cmd_crmodverdir)
macroprepare: prepare1 archmacros
archprepare: archheaders archscripts macroprepare scripts_basic
archprepare: archheaders archscripts prepare1 scripts_basic
prepare0: archprepare gcc-plugins
$(Q)$(MAKE) $(build)=.
@ -1177,9 +1175,6 @@ archheaders:
PHONY += archscripts
archscripts:
PHONY += archmacros
archmacros:
PHONY += __headers
__headers: $(version_h) scripts_basic uapi-asm-generic archheaders archscripts
$(Q)$(MAKE) $(build)=scripts build_unifdef

View File

@ -232,13 +232,6 @@ archscripts: scripts_basic
archheaders:
$(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
archmacros:
$(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s
ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s
export ASM_MACRO_FLAGS
KBUILD_CFLAGS += $(ASM_MACRO_FLAGS)
###
# Kernel objects

View File

@ -352,7 +352,7 @@ For 32-bit we have the following conventions - kernel is built with
.macro CALL_enter_from_user_mode
#ifdef CONFIG_CONTEXT_TRACKING
#ifdef HAVE_JUMP_LABEL
STATIC_BRANCH_JMP l_yes=.Lafter_call_\@, key=context_tracking_enabled, branch=1
STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
#endif
call enter_from_user_mode
.Lafter_call_\@:

View File

@ -171,7 +171,8 @@ quiet_cmd_vdso = VDSO $@
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
$(call ld-option, --build-id) -Bsymbolic
$(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \
-Bsymbolic
GCOV_PROFILE := n
#

View File

@ -7,24 +7,16 @@
#include <asm/asm.h>
#ifdef CONFIG_SMP
.macro LOCK_PREFIX_HERE
.macro LOCK_PREFIX
672: lock
.pushsection .smp_locks,"a"
.balign 4
.long 671f - . # offset
.long 672b - .
.popsection
671:
.endm
.macro LOCK_PREFIX insn:vararg
LOCK_PREFIX_HERE
lock \insn
.endm
.endm
#else
.macro LOCK_PREFIX_HERE
.endm
.macro LOCK_PREFIX insn:vararg
.endm
.macro LOCK_PREFIX
.endm
#endif
/*

View File

@ -31,8 +31,15 @@
*/
#ifdef CONFIG_SMP
#define LOCK_PREFIX_HERE "LOCK_PREFIX_HERE\n\t"
#define LOCK_PREFIX "LOCK_PREFIX "
#define LOCK_PREFIX_HERE \
".pushsection .smp_locks,\"a\"\n" \
".balign 4\n" \
".long 671f - .\n" /* offset */ \
".popsection\n" \
"671:"
#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
#else /* ! CONFIG_SMP */
#define LOCK_PREFIX_HERE ""
#define LOCK_PREFIX ""

View File

@ -120,25 +120,12 @@
/* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
ASM_EXTABLE_HANDLE from to handler
.macro ASM_EXTABLE_HANDLE from:req to:req handler:req
.pushsection "__ex_table","a"
.balign 4
.long (\from) - .
.long (\to) - .
.long (\handler) - .
.pushsection "__ex_table","a" ; \
.balign 4 ; \
.long (from) - . ; \
.long (to) - . ; \
.long (handler) - . ; \
.popsection
.endm
#else /* __ASSEMBLY__ */
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
"ASM_EXTABLE_HANDLE from=" #from " to=" #to \
" handler=\"" #handler "\"\n\t"
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif /* __ASSEMBLY__ */
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
@ -161,7 +148,6 @@
_ASM_PTR (entry); \
.popsection
#ifdef __ASSEMBLY__
.macro ALIGN_DESTINATION
/* check for bad alignment of destination */
movl %edi,%ecx
@ -185,7 +171,34 @@
_ASM_EXTABLE_UA(100b, 103b)
_ASM_EXTABLE_UA(101b, 103b)
.endm
#endif /* __ASSEMBLY__ */
#else
# define _EXPAND_EXTABLE_HANDLE(x) #x
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
" .pushsection \"__ex_table\",\"a\"\n" \
" .balign 4\n" \
" .long (" #from ") - .\n" \
" .long (" #to ") - .\n" \
" .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \
" .popsection\n"
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
# define _ASM_EXTABLE_UA(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
# define _ASM_EXTABLE_EX(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
# define _ASM_EXTABLE_REFCOUNT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount)
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
#ifndef __ASSEMBLY__
/*

View File

@ -4,8 +4,6 @@
#include <linux/stringify.h>
#ifndef __ASSEMBLY__
/*
* Despite that some emulators terminate on UD2, we use it for WARN().
*
@ -22,15 +20,53 @@
#define LEN_UD2 2
#ifdef CONFIG_GENERIC_BUG
#ifdef CONFIG_X86_32
# define __BUG_REL(val) ".long " __stringify(val)
#else
# define __BUG_REL(val) ".long " __stringify(val) " - 2b"
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _BUG_FLAGS(ins, flags) \
do { \
asm volatile("ASM_BUG ins=\"" ins "\" file=%c0 line=%c1 " \
"flags=%c2 size=%c3" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (flags), \
asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
"\t.word %c1" "\t# bug_entry::line\n" \
"\t.word %c2" "\t# bug_entry::flags\n" \
"\t.org 2b+%c3\n" \
".popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (flags), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#else /* !CONFIG_DEBUG_BUGVERBOSE */
#define _BUG_FLAGS(ins, flags) \
do { \
asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t.word %c0" "\t# bug_entry::flags\n" \
"\t.org 2b+%c1\n" \
".popsection" \
: : "i" (flags), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#else
#define _BUG_FLAGS(ins, flags) asm volatile(ins)
#endif /* CONFIG_GENERIC_BUG */
#define HAVE_ARCH_BUG
#define BUG() \
do { \
@ -46,54 +82,4 @@ do { \
#include <asm-generic/bug.h>
#else /* __ASSEMBLY__ */
#ifdef CONFIG_GENERIC_BUG
#ifdef CONFIG_X86_32
.macro __BUG_REL val:req
.long \val
.endm
#else
.macro __BUG_REL val:req
.long \val - 2b
.endm
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
.macro ASM_BUG ins:req file:req line:req flags:req size:req
1: \ins
.pushsection __bug_table,"aw"
2: __BUG_REL val=1b # bug_entry::bug_addr
__BUG_REL val=\file # bug_entry::file
.word \line # bug_entry::line
.word \flags # bug_entry::flags
.org 2b+\size
.popsection
.endm
#else /* !CONFIG_DEBUG_BUGVERBOSE */
.macro ASM_BUG ins:req file:req line:req flags:req size:req
1: \ins
.pushsection __bug_table,"aw"
2: __BUG_REL val=1b # bug_entry::bug_addr
.word \flags # bug_entry::flags
.org 2b+\size
.popsection
.endm
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#else /* CONFIG_GENERIC_BUG */
.macro ASM_BUG ins:req file:req line:req flags:req size:req
\ins
.endm
#endif /* CONFIG_GENERIC_BUG */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_BUG_H */

View File

@ -2,10 +2,10 @@
#ifndef _ASM_X86_CPUFEATURE_H
#define _ASM_X86_CPUFEATURE_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
#include <asm/asm.h>
#include <linux/bitops.h>
@ -161,10 +161,37 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
*/
static __always_inline __pure bool _static_cpu_has(u16 bit)
{
asm_volatile_goto("STATIC_CPU_HAS bitnum=%[bitnum] "
"cap_byte=\"%[cap_byte]\" "
"feature=%P[feature] t_yes=%l[t_yes] "
"t_no=%l[t_no] always=%P[always]"
asm_volatile_goto("1: jmp 6f\n"
"2:\n"
".skip -(((5f-4f) - (2b-1b)) > 0) * "
"((5f-4f) - (2b-1b)),0x90\n"
"3:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 4f - .\n" /* repl offset */
" .word %P[always]\n" /* always replace */
" .byte 3b - 1b\n" /* src len */
" .byte 5f - 4f\n" /* repl len */
" .byte 3b - 2b\n" /* pad len */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"4: jmp %l[t_no]\n"
"5:\n"
".previous\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 0\n" /* no replacement */
" .word %P[feature]\n" /* feature bit */
" .byte 3b - 1b\n" /* src len */
" .byte 0\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n"
".section .altinstr_aux,\"ax\"\n"
"6:\n"
" testb %[bitnum],%[cap_byte]\n"
" jnz %l[t_yes]\n"
" jmp %l[t_no]\n"
".previous\n"
: : [feature] "i" (bit),
[always] "i" (X86_FEATURE_ALWAYS),
[bitnum] "i" (1 << (bit & 7)),
@ -199,44 +226,5 @@ t_no:
#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
boot_cpu_data.x86_model
#else /* __ASSEMBLY__ */
.macro STATIC_CPU_HAS bitnum:req cap_byte:req feature:req t_yes:req t_no:req always:req
1:
jmp 6f
2:
.skip -(((5f-4f) - (2b-1b)) > 0) * ((5f-4f) - (2b-1b)),0x90
3:
.section .altinstructions,"a"
.long 1b - . /* src offset */
.long 4f - . /* repl offset */
.word \always /* always replace */
.byte 3b - 1b /* src len */
.byte 5f - 4f /* repl len */
.byte 3b - 2b /* pad len */
.previous
.section .altinstr_replacement,"ax"
4:
jmp \t_no
5:
.previous
.section .altinstructions,"a"
.long 1b - . /* src offset */
.long 0 /* no replacement */
.word \feature /* feature bit */
.byte 3b - 1b /* src len */
.byte 0 /* repl len */
.byte 0 /* pad len */
.previous
.section .altinstr_aux,"ax"
6:
testb \bitnum,\cap_byte
jnz \t_yes
jmp \t_no
.previous
.endm
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
#endif /* _ASM_X86_CPUFEATURE_H */

View File

@ -16,8 +16,8 @@
*/
extern unsigned long x86_fsbase_read_task(struct task_struct *task);
extern unsigned long x86_gsbase_read_task(struct task_struct *task);
extern int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase);
extern int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase);
extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase);
extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase);
/* Helper functions for reading/writing FS/GS base */
@ -39,8 +39,15 @@ static inline unsigned long x86_gsbase_read_cpu_inactive(void)
return gsbase;
}
extern void x86_fsbase_write_cpu(unsigned long fsbase);
extern void x86_gsbase_write_cpu_inactive(unsigned long gsbase);
static inline void x86_fsbase_write_cpu(unsigned long fsbase)
{
wrmsrl(MSR_FS_BASE, fsbase);
}
static inline void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
{
wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
}
#endif /* CONFIG_X86_64 */

View File

@ -2,6 +2,19 @@
#ifndef _ASM_X86_JUMP_LABEL_H
#define _ASM_X86_JUMP_LABEL_H
#ifndef HAVE_JUMP_LABEL
/*
* For better or for worse, if jump labels (the gcc extension) are missing,
* then the entire static branch patching infrastructure is compiled out.
* If that happens, the code in here will malfunction. Raise a compiler
* error instead.
*
* In theory, jump labels and the static branch patching infrastructure
* could be decoupled to fix this.
*/
#error asm/jump_label.h included on a non-jump-label kernel
#endif
#define JUMP_LABEL_NOP_SIZE 5
#ifdef CONFIG_X86_64
@ -20,9 +33,15 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("STATIC_BRANCH_NOP l_yes=\"%l[l_yes]\" key=\"%c0\" "
"branch=\"%c1\""
: : "i" (key), "i" (branch) : : l_yes);
asm_volatile_goto("1:"
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes);
return false;
l_yes:
return true;
@ -30,8 +49,14 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("STATIC_BRANCH_JMP l_yes=\"%l[l_yes]\" key=\"%c0\" "
"branch=\"%c1\""
asm_volatile_goto("1:"
".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
"2:\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes);
return false;
@ -41,26 +66,37 @@ l_yes:
#else /* __ASSEMBLY__ */
.macro STATIC_BRANCH_NOP l_yes:req key:req branch:req
.Lstatic_branch_nop_\@:
.byte STATIC_KEY_INIT_NOP
.Lstatic_branch_no_after_\@:
.macro STATIC_JUMP_IF_TRUE target, key, def
.Lstatic_jump_\@:
.if \def
/* Equivalent to "jmp.d32 \target" */
.byte 0xe9
.long \target - .Lstatic_jump_after_\@
.Lstatic_jump_after_\@:
.else
.byte STATIC_KEY_INIT_NOP
.endif
.pushsection __jump_table, "aw"
_ASM_ALIGN
.long .Lstatic_branch_nop_\@ - ., \l_yes - .
_ASM_PTR \key + \branch - .
.long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key - .
.popsection
.endm
.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req
.Lstatic_branch_jmp_\@:
.byte 0xe9
.long \l_yes - .Lstatic_branch_jmp_after_\@
.Lstatic_branch_jmp_after_\@:
.macro STATIC_JUMP_IF_FALSE target, key, def
.Lstatic_jump_\@:
.if \def
.byte STATIC_KEY_INIT_NOP
.else
/* Equivalent to "jmp.d32 \target" */
.byte 0xe9
.long \target - .Lstatic_jump_after_\@
.Lstatic_jump_after_\@:
.endif
.pushsection __jump_table, "aw"
_ASM_ALIGN
.long .Lstatic_branch_jmp_\@ - ., \l_yes - .
_ASM_PTR \key + \branch - .
.long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key + 1 - .
.popsection
.endm

View File

@ -348,11 +348,23 @@ extern struct paravirt_patch_template pv_ops;
#define paravirt_clobber(clobber) \
[paravirt_clobber] "i" (clobber)
/*
* Generate some code, and mark it as patchable by the
* apply_paravirt() alternate instruction patcher.
*/
#define _paravirt_alt(insn_string, type, clobber) \
"771:\n\t" insn_string "\n" "772:\n" \
".pushsection .parainstructions,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR " 771b\n" \
" .byte " type "\n" \
" .byte 772b-771b\n" \
" .short " clobber "\n" \
".popsection\n"
/* Generate patchable code, with the default asm parameters. */
#define paravirt_call \
"PARAVIRT_CALL type=\"%c[paravirt_typenum]\"" \
" clobber=\"%c[paravirt_clobber]\"" \
" pv_opptr=\"%c[paravirt_opptr]\";"
#define paravirt_alt(insn_string) \
_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
/* Simple instruction patching code. */
#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
@ -372,6 +384,16 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len);
int paravirt_disable_iospace(void);
/*
* This generates an indirect call based on the operation type number.
* The type number, computed in PARAVIRT_PATCH, is derived from the
* offset into the paravirt_patch_template structure, and can therefore be
* freely converted back into a structure offset.
*/
#define PARAVIRT_CALL \
ANNOTATE_RETPOLINE_SAFE \
"call *%c[paravirt_opptr];"
/*
* These macros are intended to wrap calls through one of the paravirt
* ops structs, so that they can be later identified and patched at
@ -509,7 +531,7 @@ int paravirt_disable_iospace(void);
/* since this condition will never hold */ \
if (sizeof(rettype) > sizeof(unsigned long)) { \
asm volatile(pre \
paravirt_call \
paravirt_alt(PARAVIRT_CALL) \
post \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
@ -519,7 +541,7 @@ int paravirt_disable_iospace(void);
__ret = (rettype)((((u64)__edx) << 32) | __eax); \
} else { \
asm volatile(pre \
paravirt_call \
paravirt_alt(PARAVIRT_CALL) \
post \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
@ -546,7 +568,7 @@ int paravirt_disable_iospace(void);
PVOP_VCALL_ARGS; \
PVOP_TEST_NULL(op); \
asm volatile(pre \
paravirt_call \
paravirt_alt(PARAVIRT_CALL) \
post \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
@ -664,26 +686,6 @@ struct paravirt_patch_site {
extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[];
#else /* __ASSEMBLY__ */
/*
* This generates an indirect call based on the operation type number.
* The type number, computed in PARAVIRT_PATCH, is derived from the
* offset into the paravirt_patch_template structure, and can therefore be
* freely converted back into a structure offset.
*/
.macro PARAVIRT_CALL type:req clobber:req pv_opptr:req
771: ANNOTATE_RETPOLINE_SAFE
call *\pv_opptr
772: .pushsection .parainstructions,"a"
_ASM_ALIGN
_ASM_PTR 771b
.byte \type
.byte 772b-771b
.short \clobber
.popsection
.endm
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PARAVIRT_TYPES_H */

View File

@ -111,6 +111,11 @@ extern unsigned int ptrs_per_p4d;
*/
#define MAXMEM (1UL << MAX_PHYSMEM_BITS)
#define GUARD_HOLE_PGD_ENTRY -256UL
#define GUARD_HOLE_SIZE (16UL << PGDIR_SHIFT)
#define GUARD_HOLE_BASE_ADDR (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
#define GUARD_HOLE_END_ADDR (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
#define LDT_PGD_ENTRY -240UL
#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
#define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)

View File

@ -4,41 +4,6 @@
* x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from
* PaX/grsecurity.
*/
#ifdef __ASSEMBLY__
#include <asm/asm.h>
#include <asm/bug.h>
.macro REFCOUNT_EXCEPTION counter:req
.pushsection .text..refcount
111: lea \counter, %_ASM_CX
112: ud2
ASM_UNREACHABLE
.popsection
113: _ASM_EXTABLE_REFCOUNT(112b, 113b)
.endm
/* Trigger refcount exception if refcount result is negative. */
.macro REFCOUNT_CHECK_LT_ZERO counter:req
js 111f
REFCOUNT_EXCEPTION counter="\counter"
.endm
/* Trigger refcount exception if refcount result is zero or negative. */
.macro REFCOUNT_CHECK_LE_ZERO counter:req
jz 111f
REFCOUNT_CHECK_LT_ZERO counter="\counter"
.endm
/* Trigger refcount exception unconditionally. */
.macro REFCOUNT_ERROR counter:req
jmp 111f
REFCOUNT_EXCEPTION counter="\counter"
.endm
#else /* __ASSEMBLY__ */
#include <linux/refcount.h>
#include <asm/bug.h>
@ -50,12 +15,35 @@
* central refcount exception. The fixup address for the exception points
* back to the regular execution flow in .text.
*/
#define _REFCOUNT_EXCEPTION \
".pushsection .text..refcount\n" \
"111:\tlea %[var], %%" _ASM_CX "\n" \
"112:\t" ASM_UD2 "\n" \
ASM_UNREACHABLE \
".popsection\n" \
"113:\n" \
_ASM_EXTABLE_REFCOUNT(112b, 113b)
/* Trigger refcount exception if refcount result is negative. */
#define REFCOUNT_CHECK_LT_ZERO \
"js 111f\n\t" \
_REFCOUNT_EXCEPTION
/* Trigger refcount exception if refcount result is zero or negative. */
#define REFCOUNT_CHECK_LE_ZERO \
"jz 111f\n\t" \
REFCOUNT_CHECK_LT_ZERO
/* Trigger refcount exception unconditionally. */
#define REFCOUNT_ERROR \
"jmp 111f\n\t" \
_REFCOUNT_EXCEPTION
static __always_inline void refcount_add(unsigned int i, refcount_t *r)
{
asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
"REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\""
: [counter] "+m" (r->refs.counter)
REFCOUNT_CHECK_LT_ZERO
: [var] "+m" (r->refs.counter)
: "ir" (i)
: "cc", "cx");
}
@ -63,32 +51,31 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r)
static __always_inline void refcount_inc(refcount_t *r)
{
asm volatile(LOCK_PREFIX "incl %0\n\t"
"REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\""
: [counter] "+m" (r->refs.counter)
REFCOUNT_CHECK_LT_ZERO
: [var] "+m" (r->refs.counter)
: : "cc", "cx");
}
static __always_inline void refcount_dec(refcount_t *r)
{
asm volatile(LOCK_PREFIX "decl %0\n\t"
"REFCOUNT_CHECK_LE_ZERO counter=\"%[counter]\""
: [counter] "+m" (r->refs.counter)
REFCOUNT_CHECK_LE_ZERO
: [var] "+m" (r->refs.counter)
: : "cc", "cx");
}
static __always_inline __must_check
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
"REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"",
REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, e, "er", i, "cx");
}
static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
"REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"",
REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, e, "cx");
}
@ -106,8 +93,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
/* Did we try to increment from/to an undesirable state? */
if (unlikely(c < 0 || c == INT_MAX || result < c)) {
asm volatile("REFCOUNT_ERROR counter=\"%[counter]\""
: : [counter] "m" (r->refs.counter)
asm volatile(REFCOUNT_ERROR
: : [var] "m" (r->refs.counter)
: "cc", "cx");
break;
}
@ -122,6 +109,4 @@ static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r)
return refcount_add_not_zero(1, r);
}
#endif /* __ASSEMBLY__ */
#endif

View File

@ -23,6 +23,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpu.h>
#include <linux/kernfs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@ -310,9 +311,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
return -EINVAL;
buf[nbytes - 1] = '\0';
cpus_read_lock();
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (!rdtgrp) {
rdtgroup_kn_unlock(of->kn);
cpus_read_unlock();
return -ENOENT;
}
rdt_last_cmd_clear();
@ -367,6 +370,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
out:
rdtgroup_kn_unlock(of->kn);
cpus_read_unlock();
return ret ?: nbytes;
}

View File

@ -165,6 +165,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
struct mtrr_gentry gentry;
void __user *arg = (void __user *) __arg;
memset(&gentry, 0, sizeof(gentry));
switch (cmd) {
case MTRRIOC_ADD_ENTRY:
case MTRRIOC_SET_ENTRY:

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file includes headers whose assembly part includes macros which are
* commonly used. The macros are precompiled into assmebly file which is later
* assembled together with each compiled file.
*/
#include <linux/compiler.h>
#include <asm/refcount.h>
#include <asm/alternative-asm.h>
#include <asm/bug.h>
#include <asm/paravirt.h>
#include <asm/asm.h>
#include <asm/cpufeature.h>
#include <asm/jump_label.h>

View File

@ -339,24 +339,6 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
return base;
}
void x86_fsbase_write_cpu(unsigned long fsbase)
{
/*
* Set the selector to 0 as a notion, that the segment base is
* overwritten, which will be checked for skipping the segment load
* during context switch.
*/
loadseg(FS, 0);
wrmsrl(MSR_FS_BASE, fsbase);
}
void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
{
/* Set the selector to 0 for the same reason as %fs above. */
loadseg(GS, 0);
wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
}
unsigned long x86_fsbase_read_task(struct task_struct *task)
{
unsigned long fsbase;
@ -385,38 +367,18 @@ unsigned long x86_gsbase_read_task(struct task_struct *task)
return gsbase;
}
int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
{
/*
* Not strictly needed for %fs, but do it for symmetry
* with %gs
*/
if (unlikely(fsbase >= TASK_SIZE_MAX))
return -EPERM;
WARN_ON_ONCE(task == current);
preempt_disable();
task->thread.fsbase = fsbase;
if (task == current)
x86_fsbase_write_cpu(fsbase);
task->thread.fsindex = 0;
preempt_enable();
return 0;
}
int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
{
if (unlikely(gsbase >= TASK_SIZE_MAX))
return -EPERM;
WARN_ON_ONCE(task == current);
preempt_disable();
task->thread.gsbase = gsbase;
if (task == current)
x86_gsbase_write_cpu_inactive(gsbase);
task->thread.gsindex = 0;
preempt_enable();
return 0;
}
int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
@ -754,11 +716,60 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
switch (option) {
case ARCH_SET_GS: {
ret = x86_gsbase_write_task(task, arg2);
if (unlikely(arg2 >= TASK_SIZE_MAX))
return -EPERM;
preempt_disable();
/*
* ARCH_SET_GS has always overwritten the index
* and the base. Zero is the most sensible value
* to put in the index, and is the only value that
* makes any sense if FSGSBASE is unavailable.
*/
if (task == current) {
loadseg(GS, 0);
x86_gsbase_write_cpu_inactive(arg2);
/*
* On non-FSGSBASE systems, save_base_legacy() expects
* that we also fill in thread.gsbase.
*/
task->thread.gsbase = arg2;
} else {
task->thread.gsindex = 0;
x86_gsbase_write_task(task, arg2);
}
preempt_enable();
break;
}
case ARCH_SET_FS: {
ret = x86_fsbase_write_task(task, arg2);
/*
* Not strictly needed for %fs, but do it for symmetry
* with %gs
*/
if (unlikely(arg2 >= TASK_SIZE_MAX))
return -EPERM;
preempt_disable();
/*
* Set the selector to 0 for the same reason
* as %gs above.
*/
if (task == current) {
loadseg(FS, 0);
x86_fsbase_write_cpu(arg2);
/*
* On non-FSGSBASE systems, save_base_legacy() expects
* that we also fill in thread.fsbase.
*/
task->thread.fsbase = arg2;
} else {
task->thread.fsindex = 0;
x86_fsbase_write_task(task, arg2);
}
preempt_enable();
break;
}
case ARCH_GET_FS: {

View File

@ -397,11 +397,12 @@ static int putreg(struct task_struct *child,
if (value >= TASK_SIZE_MAX)
return -EIO;
/*
* When changing the FS base, use the same
* mechanism as for do_arch_prctl_64().
* When changing the FS base, use do_arch_prctl_64()
* to set the index to zero and to set the base
* as requested.
*/
if (child->thread.fsbase != value)
return x86_fsbase_write_task(child, value);
return do_arch_prctl_64(child, ARCH_SET_FS, value);
return 0;
case offsetof(struct user_regs_struct,gs_base):
/*
@ -410,7 +411,7 @@ static int putreg(struct task_struct *child,
if (value >= TASK_SIZE_MAX)
return -EIO;
if (child->thread.gsbase != value)
return x86_gsbase_write_task(child, value);
return do_arch_prctl_64(child, ARCH_SET_GS, value);
return 0;
#endif
}

View File

@ -55,10 +55,10 @@ struct addr_marker {
enum address_markers_idx {
USER_SPACE_NR = 0,
KERNEL_SPACE_NR,
LOW_KERNEL_NR,
#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
#ifdef CONFIG_MODIFY_LDT_SYSCALL
LDT_NR,
#endif
LOW_KERNEL_NR,
VMALLOC_START_NR,
VMEMMAP_START_NR,
#ifdef CONFIG_KASAN
@ -66,9 +66,6 @@ enum address_markers_idx {
KASAN_SHADOW_END_NR,
#endif
CPU_ENTRY_AREA_NR,
#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
LDT_NR,
#endif
#ifdef CONFIG_X86_ESPFIX64
ESPFIX_START_NR,
#endif
@ -512,11 +509,11 @@ static inline bool is_hypervisor_range(int idx)
{
#ifdef CONFIG_X86_64
/*
* ffff800000000000 - ffff87ffffffffff is reserved for
* the hypervisor.
* A hole in the beginning of kernel address space reserved
* for a hypervisor.
*/
return (idx >= pgd_index(__PAGE_OFFSET) - 16) &&
(idx < pgd_index(__PAGE_OFFSET));
return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
(idx < pgd_index(GUARD_HOLE_END_ADDR));
#else
return false;
#endif

View File

@ -285,20 +285,16 @@ static void cpa_flush_all(unsigned long cache)
on_each_cpu(__cpa_flush_all, (void *) cache, 1);
}
static bool __cpa_flush_range(unsigned long start, int numpages, int cache)
static bool __inv_flush_all(int cache)
{
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
WARN_ON(PAGE_ALIGN(start) != start);
if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
cpa_flush_all(cache);
return true;
}
flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
return !cache;
return false;
}
static void cpa_flush_range(unsigned long start, int numpages, int cache)
@ -306,7 +302,14 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
unsigned int i, level;
unsigned long addr;
if (__cpa_flush_range(start, numpages, cache))
WARN_ON(PAGE_ALIGN(start) != start);
if (__inv_flush_all(cache))
return;
flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
if (!cache)
return;
/*
@ -332,7 +335,12 @@ static void cpa_flush_array(unsigned long baddr, unsigned long *start,
{
unsigned int i, level;
if (__cpa_flush_range(baddr, numpages, cache))
if (__inv_flush_all(cache))
return;
flush_tlb_all();
if (!cache)
return;
/*

View File

@ -519,8 +519,13 @@ static u64 sanitize_phys(u64 address)
* for a "decoy" virtual address (bit 63 clear) passed to
* set_memory_X(). __pa() on a "decoy" address results in a
* physical address with bit 63 set.
*
* Decoy addresses are not present for 32-bit builds, see
* set_mce_nospec().
*/
return address & __PHYSICAL_MASK;
if (IS_ENABLED(CONFIG_X86_64))
return address & __PHYSICAL_MASK;
return address;
}
/*
@ -546,7 +551,11 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
start = sanitize_phys(start);
end = sanitize_phys(end);
BUG_ON(start >= end); /* end is exclusive */
if (start >= end) {
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
start, end - 1, cattr_name(req_type));
return -EINVAL;
}
if (!pat_enabled()) {
/* This is identical to page table setting without PAT */

View File

@ -648,19 +648,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
unsigned long limit)
{
int i, nr, flush = 0;
unsigned hole_low, hole_high;
unsigned hole_low = 0, hole_high = 0;
/* The limit is the last byte to be touched */
limit--;
BUG_ON(limit >= FIXADDR_TOP);
#ifdef CONFIG_X86_64
/*
* 64-bit has a great big hole in the middle of the address
* space, which contains the Xen mappings. On 32-bit these
* will end up making a zero-sized hole and so is a no-op.
* space, which contains the Xen mappings.
*/
hole_low = pgd_index(USER_LIMIT);
hole_high = pgd_index(PAGE_OFFSET);
hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
hole_high = pgd_index(GUARD_HOLE_END_ADDR);
#endif
nr = pgd_index(limit) + 1;
for (i = 0; i < nr; i++) {

View File

@ -17,8 +17,10 @@
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
struct bug_entry {
#ifdef CONFIG_BUG
#ifdef CONFIG_GENERIC_BUG
struct bug_entry {
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
unsigned long bug_addr;
#else
@ -33,10 +35,8 @@ struct bug_entry {
unsigned short line;
#endif
unsigned short flags;
#endif /* CONFIG_GENERIC_BUG */
};
#ifdef CONFIG_BUG
#endif /* CONFIG_GENERIC_BUG */
/*
* Don't use BUG() or BUG_ON() unless there's really no way out; one

View File

@ -99,13 +99,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* unique, to convince GCC not to merge duplicate inline asm statements.
*/
#define annotate_reachable() ({ \
asm volatile("ANNOTATE_REACHABLE counter=%c0" \
: : "i" (__COUNTER__)); \
asm volatile("%c0:\n\t" \
".pushsection .discard.reachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
})
#define annotate_unreachable() ({ \
asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \
: : "i" (__COUNTER__)); \
asm volatile("%c0:\n\t" \
".pushsection .discard.unreachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
})
#define ASM_UNREACHABLE \
"999:\n\t" \
".pushsection .discard.unreachable\n\t" \
".long 999b - .\n\t" \
".popsection\n\t"
#else
#define annotate_reachable()
#define annotate_unreachable()
@ -293,45 +302,6 @@ static inline void *offset_to_ptr(const int *off)
return (void *)((unsigned long)off + *off);
}
#else /* __ASSEMBLY__ */
#ifdef __KERNEL__
#ifndef LINKER_SCRIPT
#ifdef CONFIG_STACK_VALIDATION
.macro ANNOTATE_UNREACHABLE counter:req
\counter:
.pushsection .discard.unreachable
.long \counter\()b -.
.popsection
.endm
.macro ANNOTATE_REACHABLE counter:req
\counter:
.pushsection .discard.reachable
.long \counter\()b -.
.popsection
.endm
.macro ASM_UNREACHABLE
999:
.pushsection .discard.unreachable
.long 999b - .
.popsection
.endm
#else /* CONFIG_STACK_VALIDATION */
.macro ANNOTATE_UNREACHABLE counter:req
.endm
.macro ANNOTATE_REACHABLE counter:req
.endm
.macro ASM_UNREACHABLE
.endm
#endif /* CONFIG_STACK_VALIDATION */
#endif /* LINKER_SCRIPT */
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
/* Compile time object size, -1 for unknown */

View File

@ -115,9 +115,7 @@ __cc-option = $(call try-run,\
# Do not attempt to build with gcc plugins during cc-option tests.
# (And this uses delayed resolution so the flags will be up to date.)
# In addition, do not include the asm macros which are built later.
CC_OPTION_FILTERED = $(GCC_PLUGINS_CFLAGS) $(ASM_MACRO_FLAGS)
CC_OPTION_CFLAGS = $(filter-out $(CC_OPTION_FILTERED),$(KBUILD_CFLAGS))
CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
# cc-option
# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)

View File

@ -4,8 +4,6 @@ OBJECT_FILES_NON_STANDARD := y
hostprogs-y := modpost mk_elfconfig
always := $(hostprogs-y) empty.o
CFLAGS_REMOVE_empty.o := $(ASM_MACRO_FLAGS)
modpost-objs := modpost.o file2alias.o sumversion.o
devicetable-offsets-file := devicetable-offsets.h