Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:

 - Prevent an out-of-bounds access in mtrr_write()

 - Break a circular dependency in the new hyperv IPI acceleration code

 - Address the build breakage related to inline functions by enforcing
   gnu_inline and explicitly bringing native_save_fl() out of line,
   which also adds a set of _ARM_ARG macros which provide 32/64bit
   safety.

 - Initialize the shadow CR4 per cpu variable before using it.

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mtrr: Don't copy out-of-bounds data in mtrr_write
  x86/hyper-v: Fix the circular dependency in IPI enlightenment
  x86/paravirt: Make native_save_fl() extern inline
  x86/asm: Add _ASM_ARG* constants for argument registers to <asm/asm.h>
  compiler-gcc.h: Add __attribute__((gnu_inline)) to all inline declarations
  x86/mm/32: Initialize the CR4 shadow before __flush_tlb_all()
This commit is contained in:
Linus Torvalds 2018-07-08 13:26:55 -07:00
commit 6f27a64092
10 changed files with 129 additions and 11 deletions

View file

@ -114,6 +114,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
}
if (nr_bank < 0)
goto ipi_mask_ex_done;
if (!nr_bank)
ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
@ -158,6 +160,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
for_each_cpu(cur_cpu, mask) {
vcpu = hv_cpu_number_to_vp_number(cur_cpu);
if (vcpu == VP_INVAL)
goto ipi_mask_done;
/*
* This particular version of the IPI hypercall can
* only target upto 64 CPUs.

View file

@ -265,7 +265,7 @@ void __init hyperv_init(void)
{
u64 guest_id, required_msrs;
union hv_x64_msr_hypercall_contents hypercall_msr;
int cpuhp;
int cpuhp, i;
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
return;
@ -293,6 +293,9 @@ void __init hyperv_init(void)
if (!hv_vp_index)
return;
for (i = 0; i < num_possible_cpus(); i++)
hv_vp_index[i] = VP_INVAL;
hv_vp_assist_page = kcalloc(num_possible_cpus(),
sizeof(*hv_vp_assist_page), GFP_KERNEL);
if (!hv_vp_assist_page) {

View file

@ -46,6 +46,65 @@
#define _ASM_SI __ASM_REG(si)
#define _ASM_DI __ASM_REG(di)
#ifndef __x86_64__
/* 32 bit */
#define _ASM_ARG1 _ASM_AX
#define _ASM_ARG2 _ASM_DX
#define _ASM_ARG3 _ASM_CX
#define _ASM_ARG1L eax
#define _ASM_ARG2L edx
#define _ASM_ARG3L ecx
#define _ASM_ARG1W ax
#define _ASM_ARG2W dx
#define _ASM_ARG3W cx
#define _ASM_ARG1B al
#define _ASM_ARG2B dl
#define _ASM_ARG3B cl
#else
/* 64 bit */
#define _ASM_ARG1 _ASM_DI
#define _ASM_ARG2 _ASM_SI
#define _ASM_ARG3 _ASM_DX
#define _ASM_ARG4 _ASM_CX
#define _ASM_ARG5 r8
#define _ASM_ARG6 r9
#define _ASM_ARG1Q rdi
#define _ASM_ARG2Q rsi
#define _ASM_ARG3Q rdx
#define _ASM_ARG4Q rcx
#define _ASM_ARG5Q r8
#define _ASM_ARG6Q r9
#define _ASM_ARG1L edi
#define _ASM_ARG2L esi
#define _ASM_ARG3L edx
#define _ASM_ARG4L ecx
#define _ASM_ARG5L r8d
#define _ASM_ARG6L r9d
#define _ASM_ARG1W di
#define _ASM_ARG2W si
#define _ASM_ARG3W dx
#define _ASM_ARG4W cx
#define _ASM_ARG5W r8w
#define _ASM_ARG6W r9w
#define _ASM_ARG1B dil
#define _ASM_ARG2B sil
#define _ASM_ARG3B dl
#define _ASM_ARG4B cl
#define _ASM_ARG5B r8b
#define _ASM_ARG6B r9b
#endif
/*
* Macros to generate condition code outputs from inline assembly,
* The output operand must be type "bool".

View file

@ -13,7 +13,7 @@
* Interrupt control:
*/
static inline unsigned long native_save_fl(void)
extern inline unsigned long native_save_fl(void)
{
unsigned long flags;

View file

@ -9,6 +9,8 @@
#include <asm/hyperv-tlfs.h>
#include <asm/nospec-branch.h>
#define VP_INVAL U32_MAX
struct ms_hyperv_info {
u32 features;
u32 misc_features;
@ -20,7 +22,6 @@ struct ms_hyperv_info {
extern struct ms_hyperv_info ms_hyperv;
/*
* Generate the guest ID.
*/
@ -281,6 +282,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
*/
for_each_cpu(cpu, cpus) {
vcpu = hv_cpu_number_to_vp_number(cpu);
if (vcpu == VP_INVAL)
return -1;
vcpu_bank = vcpu / 64;
vcpu_offset = vcpu % 64;
__set_bit(vcpu_offset, (unsigned long *)

View file

@ -61,6 +61,7 @@ obj-y += alternative.o i8253.o hw_breakpoint.o
obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
obj-y += irqflags.o
obj-y += process.o
obj-y += fpu/

View file

@ -106,7 +106,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
memset(line, 0, LINE_SIZE);
length = strncpy_from_user(line, buf, LINE_SIZE - 1);
len = min_t(size_t, len, LINE_SIZE - 1);
length = strncpy_from_user(line, buf, len);
if (length < 0)
return length;

View file

@ -0,0 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/asm.h>
#include <asm/export.h>
#include <linux/linkage.h>
/*
* unsigned long native_save_fl(void)
*/
ENTRY(native_save_fl)
pushf
pop %_ASM_AX
ret
ENDPROC(native_save_fl)
EXPORT_SYMBOL(native_save_fl)
/*
* void native_restore_fl(unsigned long flags)
* %eax/%rdi: flags
*/
ENTRY(native_restore_fl)
push %_ASM_ARG1
popf
ret
ENDPROC(native_restore_fl)
EXPORT_SYMBOL(native_restore_fl)

View file

@ -221,6 +221,11 @@ static void notrace start_secondary(void *unused)
#ifdef CONFIG_X86_32
/* switch away from the initial page table */
load_cr3(swapper_pg_dir);
/*
* Initialize the CR4 shadow before doing anything that could
* try to read it.
*/
cr4_init_shadow();
__flush_tlb_all();
#endif
load_current_idt();

View file

@ -65,6 +65,18 @@
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
#endif
/*
* Feature detection for gnu_inline (gnu89 extern inline semantics). Either
* __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
* and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
* defined so the gnu89 semantics are the default.
*/
#ifdef __GNUC_STDC_INLINE__
# define __gnu_inline __attribute__((gnu_inline))
#else
# define __gnu_inline
#endif
/*
* Force always-inline if the user requests it so via the .config,
* or if gcc is too old.
@ -72,19 +84,22 @@
* -Wunused-function. This turns out to avoid the need for complex #ifdef
* directives. Suppress the warning in clang as well by using "unused"
* function attribute, which is redundant but not harmful for gcc.
* Prefer gnu_inline, so that extern inline functions do not emit an
* externally visible function. This makes extern inline behave as per gnu89
* semantics rather than c99. This prevents multiple symbol definition errors
* of extern inline functions at link time.
* A lot of inline functions can cause havoc with function tracing.
*/
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
#define inline inline __attribute__((always_inline,unused)) notrace
#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
#define __inline __inline __attribute__((always_inline,unused)) notrace
#define inline \
inline __attribute__((always_inline, unused)) notrace __gnu_inline
#else
/* A lot of inline functions can cause havoc with function tracing */
#define inline inline __attribute__((unused)) notrace
#define __inline__ __inline__ __attribute__((unused)) notrace
#define __inline __inline __attribute__((unused)) notrace
#define inline inline __attribute__((unused)) notrace __gnu_inline
#endif
#define __inline__ inline
#define __inline inline
#define __always_inline inline __attribute__((always_inline))
#define noinline __attribute__((noinline))