From e05196401657cff3178dc392b739e520b26d4aef Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 Apr 2019 15:41:16 +0200 Subject: [PATCH 1/8] x86/paravirt: Remove bogus extern declarations These functions are already declared in asm/paravirt.h Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Link: http://lkml.kernel.org/r/20190424134223.501598258@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/paravirt_patch_32.c | 3 --- arch/x86/kernel/paravirt_patch_64.c | 3 --- 2 files changed, 6 deletions(-) diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index de138d3912e4..05d771f81e74 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -23,9 +23,6 @@ DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); #endif -extern bool pv_is_native_spin_unlock(void); -extern bool pv_is_native_vcpu_is_preempted(void); - unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) { #define PATCH_SITE(ops, x) \ diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 9d9e04b31077..bd1558f90cfb 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -29,9 +29,6 @@ DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)"); DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); #endif -extern bool pv_is_native_spin_unlock(void); -extern bool pv_is_native_vcpu_is_preempted(void); - unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) { #define PATCH_SITE(ops, x) \ From 2777cae2b19d4a08ad233b3504c19c6f7a6a2ef3 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 25 Apr 2019 11:17:17 +0200 Subject: [PATCH 2/8] x86/paravirt: Detect over-sized patching bugs in paravirt_patch_insns() So paravirt_patch_insns() contains this gem of logic: unsigned paravirt_patch_insns(void *insnbuf, unsigned len, const char *start, const char *end) { unsigned insn_len = end - start; if (insn_len > len || start == NULL) insn_len = len; else memcpy(insnbuf, start, insn_len); return insn_len; } Note how 'len' (size of the original instruction) is checked against the new instruction, and silently discarded with no warning printed whatsoever. This crashes the kernel in funny ways if the patching template is buggy, and usually in much later places. Instead do a direct BUG_ON(), there's no way to continue successfully at that point. I've tested this patch, with the vanilla kernel check never triggers, and if I intentionally increase the size of one of the patch templates to a too high value the assert triggers: [ 0.164385] kernel BUG at arch/x86/kernel/paravirt.c:167! Without this patch a broken kernel randomly crashes in later places, after the silent patching failure. Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20190425091717.GA72229@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/paravirt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index c0e0101133f3..7f9121f2fdac 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -163,10 +163,10 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len, { unsigned insn_len = end - start; - if (insn_len > len || start == NULL) - insn_len = len; - else - memcpy(insnbuf, start, insn_len); + /* Alternative instruction is too large for the patch site and we cannot continue: */ + BUG_ON(insn_len > len || start == NULL); + + memcpy(insnbuf, start, insn_len); return insn_len; } From 11e86dc7f2746210f9c7dc10deaa7658f8dc8350 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 25 Apr 2019 11:50:39 +0200 Subject: [PATCH 3/8] x86/paravirt: Detect over-sized patching bugs in paravirt_patch_call() paravirt_patch_call() currently handles patching failures inconsistently: we generate a warning in the retpoline case, but don't in other cases where we might end up with a non-working kernel as well. So just convert it all to a BUG_ON(), these patching calls are *not* supposed to fail, and if they do we want to know it immediately. This also makes the kernel smaller and removes an #ifdef ugly. I tried it with a richly paravirt-enabled kernel and no patching bugs were detected. Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20190425095039.GC115378@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/paravirt.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 7f9121f2fdac..544d386ded45 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -73,21 +73,21 @@ struct branch { static unsigned paravirt_patch_call(void *insnbuf, const void *target, unsigned long addr, unsigned len) { + const int call_len = 5; struct branch *b = insnbuf; - unsigned long delta = (unsigned long)target - (addr+5); + unsigned long delta = (unsigned long)target - (addr+call_len); - if (len < 5) { -#ifdef CONFIG_RETPOLINE - WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr); -#endif - return len; /* call too long for patch site */ + if (len < call_len) { + pr_warn("paravirt: Failed to patch indirect CALL at %ps\n", (void *)addr); + /* Kernel might not be viable if patching fails, bail out: */ + BUG_ON(1); } b->opcode = 0xe8; /* call */ b->delta = delta; - BUILD_BUG_ON(sizeof(*b) != 5); + BUILD_BUG_ON(sizeof(*b) != call_len); - return 5; + return call_len; } #ifdef CONFIG_PARAVIRT_XXL From fb2af0712fe8831dc152b0b5dd8bc516970da336 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 Apr 2019 15:41:17 +0200 Subject: [PATCH 4/8] x86/paravirt: Unify the 32/64 bit paravirt patching code Large parts of these two files are identical. Merge them together. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Link: http://lkml.kernel.org/r/20190424134223.603491680@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 4 +- .../{paravirt_patch_64.c => paravirt_patch.c} | 64 ++++++++++++++----- arch/x86/kernel/paravirt_patch_32.c | 64 ------------------- 3 files changed, 51 insertions(+), 81 deletions(-) rename arch/x86/kernel/{paravirt_patch_64.c => paravirt_patch.c} (59%) delete mode 100644 arch/x86/kernel/paravirt_patch_32.c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 00b7e27bc2b7..62e78a3fd31e 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -30,7 +30,7 @@ KASAN_SANITIZE_paravirt.o := n OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y OBJECT_FILES_NON_STANDARD_test_nx.o := y -OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o := y +OBJECT_FILES_NON_STANDARD_paravirt_patch.o := y ifdef CONFIG_FRAME_POINTER OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y @@ -112,7 +112,7 @@ obj-$(CONFIG_AMD_NB) += amd_nb.o obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o obj-$(CONFIG_KVM_GUEST) += kvm.o kvmclock.o -obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o +obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch.c similarity index 59% rename from arch/x86/kernel/paravirt_patch_64.c rename to arch/x86/kernel/paravirt_patch.c index bd1558f90cfb..a47899db9932 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch.c @@ -1,9 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -#include -#include #include -#ifdef CONFIG_PARAVIRT_XXL +#include +#include + +#ifdef CONFIG_X86_64 +# ifdef CONFIG_PARAVIRT_XXL DEF_NATIVE(irq, irq_disable, "cli"); DEF_NATIVE(irq, irq_enable, "sti"); DEF_NATIVE(irq, restore_fl, "pushq %rdi; popfq"); @@ -12,24 +14,49 @@ DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax"); DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax"); DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3"); DEF_NATIVE(cpu, wbinvd, "wbinvd"); - DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); DEF_NATIVE(cpu, swapgs, "swapgs"); DEF_NATIVE(, mov64, "mov %rdi, %rax"); -unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) +unsigned int paravirt_patch_ident_64(void *insnbuf, unsigned int len) { - return paravirt_patch_insns(insnbuf, len, - start__mov64, end__mov64); + return paravirt_patch_insns(insnbuf, len, start__mov64, end__mov64); } -#endif +# endif /* CONFIG_PARAVIRT_XXL */ -#if defined(CONFIG_PARAVIRT_SPINLOCKS) +# ifdef CONFIG_PARAVIRT_SPINLOCKS DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)"); DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); -#endif +# endif -unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) +#else /* CONFIG_X86_64 */ + +# ifdef CONFIG_PARAVIRT_XXL +DEF_NATIVE(irq, irq_disable, "cli"); +DEF_NATIVE(irq, irq_enable, "sti"); +DEF_NATIVE(irq, restore_fl, "push %eax; popf"); +DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); +DEF_NATIVE(cpu, iret, "iret"); +DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); +DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); +DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); + +unsigned int paravirt_patch_ident_64(void *insnbuf, unsigned int len) +{ + /* arg in %edx:%eax, return in %edx:%eax */ + return 0; +} +# endif /* CONFIG_PARAVIRT_XXL */ + +# ifdef CONFIG_PARAVIRT_SPINLOCKS +DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); +DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); +# endif + +#endif /* !CONFIG_X86_64 */ + +unsigned int native_patch(u8 type, void *ibuf, unsigned long addr, + unsigned int len) { #define PATCH_SITE(ops, x) \ case PARAVIRT_PATCH(ops.x): \ @@ -41,14 +68,21 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(irq, save_fl); PATCH_SITE(irq, irq_enable); PATCH_SITE(irq, irq_disable); - PATCH_SITE(cpu, usergs_sysret64); - PATCH_SITE(cpu, swapgs); - PATCH_SITE(cpu, wbinvd); + PATCH_SITE(mmu, read_cr2); PATCH_SITE(mmu, read_cr3); PATCH_SITE(mmu, write_cr3); + +# ifdef CONFIG_X86_64 + PATCH_SITE(cpu, usergs_sysret64); + PATCH_SITE(cpu, swapgs); + PATCH_SITE(cpu, wbinvd); +# else + PATCH_SITE(cpu, iret); +# endif #endif -#if defined(CONFIG_PARAVIRT_SPINLOCKS) + +#ifdef CONFIG_PARAVIRT_SPINLOCKS case PARAVIRT_PATCH(lock.queued_spin_unlock): if (pv_is_native_spin_unlock()) return paravirt_patch_insns(ibuf, len, diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c deleted file mode 100644 index 05d771f81e74..000000000000 --- a/arch/x86/kernel/paravirt_patch_32.c +++ /dev/null @@ -1,64 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include - -#ifdef CONFIG_PARAVIRT_XXL -DEF_NATIVE(irq, irq_disable, "cli"); -DEF_NATIVE(irq, irq_enable, "sti"); -DEF_NATIVE(irq, restore_fl, "push %eax; popf"); -DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); -DEF_NATIVE(cpu, iret, "iret"); -DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); -DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); -DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); - -unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) -{ - /* arg in %edx:%eax, return in %edx:%eax */ - return 0; -} -#endif - -#if defined(CONFIG_PARAVIRT_SPINLOCKS) -DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); -DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); -#endif - -unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) -{ -#define PATCH_SITE(ops, x) \ - case PARAVIRT_PATCH(ops.x): \ - return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x) - - switch (type) { -#ifdef CONFIG_PARAVIRT_XXL - PATCH_SITE(irq, irq_disable); - PATCH_SITE(irq, irq_enable); - PATCH_SITE(irq, restore_fl); - PATCH_SITE(irq, save_fl); - PATCH_SITE(cpu, iret); - PATCH_SITE(mmu, read_cr2); - PATCH_SITE(mmu, read_cr3); - PATCH_SITE(mmu, write_cr3); -#endif -#if defined(CONFIG_PARAVIRT_SPINLOCKS) - case PARAVIRT_PATCH(lock.queued_spin_unlock): - if (pv_is_native_spin_unlock()) - return paravirt_patch_insns(ibuf, len, - start_lock_queued_spin_unlock, - end_lock_queued_spin_unlock); - break; - - case PARAVIRT_PATCH(lock.vcpu_is_preempted): - if (pv_is_native_vcpu_is_preempted()) - return paravirt_patch_insns(ibuf, len, - start_lock_vcpu_is_preempted, - end_lock_vcpu_is_preempted); - break; -#endif - - default: - break; - } -#undef PATCH_SITE - return paravirt_patch_default(type, ibuf, addr, len); -} From 0b9d2fc1d0d628c94c6866a2ed3005c6730db512 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 Apr 2019 15:41:18 +0200 Subject: [PATCH 5/8] x86/paravirt: Replace the paravirt patch asm magic The magic macro DEF_NATIVE() in the paravirt patching code uses inline assembly to generate a data table for patching in the native instructions. While clever this is falling apart with LTO and even aside of LTO the construct is just working by chance according to GCC folks. Aside of that the tables are constant data and not some form of magic text. As these constructs are not subject to frequent changes it is not a maintenance issue to convert them to regular data tables which are initialized with hex bytes. Create a new set of macros and data structures to store the instruction sequences and convert the code over. Reported-by: Andi Kleen Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Link: http://lkml.kernel.org/r/20190424134223.690835713@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/paravirt_types.h | 4 - arch/x86/kernel/paravirt_patch.c | 144 +++++++++++++++----------- 2 files changed, 82 insertions(+), 66 deletions(-) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 2474e434a6f7..ae8d6ddfe39a 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -370,10 +370,6 @@ extern struct paravirt_patch_template pv_ops; /* Simple instruction patching code. */ #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" -#define DEF_NATIVE(ops, name, code) \ - __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \ - asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name)) - unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); unsigned paravirt_patch_default(u8 type, void *insnbuf, unsigned long addr, unsigned len); diff --git a/arch/x86/kernel/paravirt_patch.c b/arch/x86/kernel/paravirt_patch.c index a47899db9932..60e7a5e236c0 100644 --- a/arch/x86/kernel/paravirt_patch.c +++ b/arch/x86/kernel/paravirt_patch.c @@ -4,103 +4,123 @@ #include #include -#ifdef CONFIG_X86_64 -# ifdef CONFIG_PARAVIRT_XXL -DEF_NATIVE(irq, irq_disable, "cli"); -DEF_NATIVE(irq, irq_enable, "sti"); -DEF_NATIVE(irq, restore_fl, "pushq %rdi; popfq"); -DEF_NATIVE(irq, save_fl, "pushfq; popq %rax"); -DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax"); -DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax"); -DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3"); -DEF_NATIVE(cpu, wbinvd, "wbinvd"); -DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); -DEF_NATIVE(cpu, swapgs, "swapgs"); -DEF_NATIVE(, mov64, "mov %rdi, %rax"); +#define PSTART(d, m) \ + patch_data_##d.m -unsigned int paravirt_patch_ident_64(void *insnbuf, unsigned int len) -{ - return paravirt_patch_insns(insnbuf, len, start__mov64, end__mov64); -} -# endif /* CONFIG_PARAVIRT_XXL */ +#define PEND(d, m) \ + (PSTART(d, m) + sizeof(patch_data_##d.m)) -# ifdef CONFIG_PARAVIRT_SPINLOCKS -DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)"); -DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); +#define PATCH(d, m, ibuf, len) \ + paravirt_patch_insns(ibuf, len, PSTART(d, m), PEND(d, m)) + +#define PATCH_CASE(ops, m, data, ibuf, len) \ + case PARAVIRT_PATCH(ops.m): \ + return PATCH(data, ops##_##m, ibuf, len) + +#ifdef CONFIG_PARAVIRT_XXL +struct patch_xxl { + const unsigned char irq_irq_disable[1]; + const unsigned char irq_irq_enable[1]; + const unsigned char irq_restore_fl[2]; + const unsigned char irq_save_fl[2]; + const unsigned char mmu_read_cr2[3]; + const unsigned char mmu_read_cr3[3]; + const unsigned char mmu_write_cr3[3]; +# ifdef CONFIG_X86_64 + const unsigned char cpu_wbinvd[2]; + const unsigned char cpu_usergs_sysret64[6]; + const unsigned char cpu_swapgs[3]; + const unsigned char mov64[3]; +# else + const unsigned char cpu_iret[1]; # endif +}; -#else /* CONFIG_X86_64 */ - -# ifdef CONFIG_PARAVIRT_XXL -DEF_NATIVE(irq, irq_disable, "cli"); -DEF_NATIVE(irq, irq_enable, "sti"); -DEF_NATIVE(irq, restore_fl, "push %eax; popf"); -DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); -DEF_NATIVE(cpu, iret, "iret"); -DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); -DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); -DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); +static const struct patch_xxl patch_data_xxl = { + .irq_irq_disable = { 0xfa }, // cli + .irq_irq_enable = { 0xfb }, // sti + .irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax + .mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax + .mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax +# ifdef CONFIG_X86_64 + .irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq + .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3 + .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd + .cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8, + 0x48, 0x0f, 0x07 }, // swapgs; sysretq + .cpu_swapgs = { 0x0f, 0x01, 0xf8 }, // swapgs + .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax +# else + .irq_restore_fl = { 0x50, 0x9d }, // push %eax; popf + .mmu_write_cr3 = { 0x0f, 0x22, 0xd8 }, // mov %eax, %cr3 + .cpu_iret = { 0xcf }, // iret +# endif +}; unsigned int paravirt_patch_ident_64(void *insnbuf, unsigned int len) { - /* arg in %edx:%eax, return in %edx:%eax */ +#ifdef CONFIG_X86_64 + return PATCH(xxl, mov64, insnbuf, len); +#endif return 0; } # endif /* CONFIG_PARAVIRT_XXL */ -# ifdef CONFIG_PARAVIRT_SPINLOCKS -DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); -DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); -# endif +#ifdef CONFIG_PARAVIRT_SPINLOCKS +struct patch_lock { + unsigned char queued_spin_unlock[3]; + unsigned char vcpu_is_preempted[2]; +}; -#endif /* !CONFIG_X86_64 */ +static const struct patch_lock patch_data_lock = { + .vcpu_is_preempted = { 0x31, 0xc0 }, // xor %eax, %eax + +# ifdef CONFIG_X86_64 + .queued_spin_unlock = { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi) +# else + .queued_spin_unlock = { 0xc6, 0x00, 0x00 }, // movb $0, (%eax) +# endif +}; +#endif /* CONFIG_PARAVIRT_SPINLOCKS */ unsigned int native_patch(u8 type, void *ibuf, unsigned long addr, unsigned int len) { -#define PATCH_SITE(ops, x) \ - case PARAVIRT_PATCH(ops.x): \ - return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x) - switch (type) { -#ifdef CONFIG_PARAVIRT_XXL - PATCH_SITE(irq, restore_fl); - PATCH_SITE(irq, save_fl); - PATCH_SITE(irq, irq_enable); - PATCH_SITE(irq, irq_disable); - PATCH_SITE(mmu, read_cr2); - PATCH_SITE(mmu, read_cr3); - PATCH_SITE(mmu, write_cr3); +#ifdef CONFIG_PARAVIRT_XXL + PATCH_CASE(irq, restore_fl, xxl, ibuf, len); + PATCH_CASE(irq, save_fl, xxl, ibuf, len); + PATCH_CASE(irq, irq_enable, xxl, ibuf, len); + PATCH_CASE(irq, irq_disable, xxl, ibuf, len); + + PATCH_CASE(mmu, read_cr2, xxl, ibuf, len); + PATCH_CASE(mmu, read_cr3, xxl, ibuf, len); + PATCH_CASE(mmu, write_cr3, xxl, ibuf, len); # ifdef CONFIG_X86_64 - PATCH_SITE(cpu, usergs_sysret64); - PATCH_SITE(cpu, swapgs); - PATCH_SITE(cpu, wbinvd); + PATCH_CASE(cpu, usergs_sysret64, xxl, ibuf, len); + PATCH_CASE(cpu, swapgs, xxl, ibuf, len); + PATCH_CASE(cpu, wbinvd, xxl, ibuf, len); # else - PATCH_SITE(cpu, iret); + PATCH_CASE(cpu, iret, xxl, ibuf, len); # endif #endif #ifdef CONFIG_PARAVIRT_SPINLOCKS case PARAVIRT_PATCH(lock.queued_spin_unlock): if (pv_is_native_spin_unlock()) - return paravirt_patch_insns(ibuf, len, - start_lock_queued_spin_unlock, - end_lock_queued_spin_unlock); + return PATCH(lock, queued_spin_unlock, ibuf, len); break; case PARAVIRT_PATCH(lock.vcpu_is_preempted): if (pv_is_native_vcpu_is_preempted()) - return paravirt_patch_insns(ibuf, len, - start_lock_vcpu_is_preempted, - end_lock_vcpu_is_preempted); + return PATCH(lock, vcpu_is_preempted, ibuf, len); break; #endif - default: break; } -#undef PATCH_SITE + return paravirt_patch_default(type, ibuf, addr, len); } From fc93dfd9345bb8b29a62b21cb0447dd1a3815f91 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 25 Apr 2019 10:10:12 +0200 Subject: [PATCH 6/8] x86/paravirt: Match paravirt patchlet field definition ordering to initialization ordering Here's the objdump -D output of the PATCH_XXL data table: 0000000000000010 : 10: fa cli 11: fb sti 12: 57 push %rdi 13: 9d popfq 14: 9c pushfq 15: 58 pop %rax 16: 0f 20 d0 mov %cr2,%rax 19: 0f 20 d8 mov %cr3,%rax 1c: 0f 22 df mov %rdi,%cr3 1f: 0f 09 wbinvd 21: 0f 01 f8 swapgs 24: 48 0f 07 sysretq 27: 0f 01 f8 swapgs 2a: 48 89 f8 mov %rdi,%rax Note how this doesn't match up to the source code: static const struct patch_xxl patch_data_xxl = { .irq_irq_disable = { 0xfa }, // cli .irq_irq_enable = { 0xfb }, // sti .irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax .mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax .mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax .irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3 .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd .cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8, 0x48, 0x0f, 0x07 }, // swapgs; sysretq .cpu_swapgs = { 0x0f, 0x01, 0xf8 }, // swapgs .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax .irq_restore_fl = { 0x50, 0x9d }, // push %eax; popf .mmu_write_cr3 = { 0x0f, 0x22, 0xd8 }, // mov %eax, %cr3 .cpu_iret = { 0xcf }, // iret }; Note how they are reordered: in the generated code .irq_restore_fl comes before .irq_save_fl, etc. This is because the field ordering in struct patch_xxl does not match the initialization ordering of patch_data_xxl. Match up the initialization order with the definition order - this makes the disassembly easily reviewable: 0000000000000010 : 10: fa cli 11: fb sti 12: 9c pushfq 13: 58 pop %rax 14: 0f 20 d0 mov %cr2,%rax 17: 0f 20 d8 mov %cr3,%rax 1a: 0f 22 df mov %rdi,%cr3 1d: 57 push %rdi 1e: 9d popfq 1f: 0f 09 wbinvd 21: 0f 01 f8 swapgs 24: 48 0f 07 sysretq 27: 0f 01 f8 swapgs 2a: 48 89 f8 mov %rdi,%rax Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20190425081012.GA115378@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/paravirt_patch.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/paravirt_patch.c b/arch/x86/kernel/paravirt_patch.c index 60e7a5e236c0..37b1d43d1e17 100644 --- a/arch/x86/kernel/paravirt_patch.c +++ b/arch/x86/kernel/paravirt_patch.c @@ -21,11 +21,11 @@ struct patch_xxl { const unsigned char irq_irq_disable[1]; const unsigned char irq_irq_enable[1]; - const unsigned char irq_restore_fl[2]; const unsigned char irq_save_fl[2]; const unsigned char mmu_read_cr2[3]; const unsigned char mmu_read_cr3[3]; const unsigned char mmu_write_cr3[3]; + const unsigned char irq_restore_fl[2]; # ifdef CONFIG_X86_64 const unsigned char cpu_wbinvd[2]; const unsigned char cpu_usergs_sysret64[6]; @@ -43,16 +43,16 @@ static const struct patch_xxl patch_data_xxl = { .mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax .mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax # ifdef CONFIG_X86_64 - .irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3 + .irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd .cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8, 0x48, 0x0f, 0x07 }, // swapgs; sysretq .cpu_swapgs = { 0x0f, 0x01, 0xf8 }, // swapgs .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax # else - .irq_restore_fl = { 0x50, 0x9d }, // push %eax; popf .mmu_write_cr3 = { 0x0f, 0x22, 0xd8 }, // mov %eax, %cr3 + .irq_restore_fl = { 0x50, 0x9d }, // push %eax; popf .cpu_iret = { 0xcf }, // iret # endif }; From 1fc654cf6e04b402ba9c4327b2919ea864037e7a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 25 Apr 2019 13:03:31 +0200 Subject: [PATCH 7/8] x86/paravirt: Standardize 'insn_buff' variable names We currently have 6 (!) separate naming variants to name temporary instruction buffers that are used for code patching: - insnbuf - insnbuff - insn_buff - insn_buffer - ibuf - ibuffer These are used as local variables, percpu fields and function parameters. Standardize all the names to a single variant: 'insn_buff'. Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/events/intel/ds.c | 8 ++--- arch/x86/include/asm/paravirt_types.h | 13 +++---- arch/x86/kernel/alternative.c | 52 +++++++++++++-------------- arch/x86/kernel/kprobes/opt.c | 16 ++++----- arch/x86/kernel/paravirt.c | 22 ++++++------ arch/x86/kernel/paravirt_patch.c | 42 +++++++++++----------- arch/x86/tools/insn_decoder_test.c | 8 ++--- arch/x86/tools/insn_sanity.c | 28 +++++++-------- 8 files changed, 93 insertions(+), 96 deletions(-) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 10c99ce1fead..50f647e131bc 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -337,7 +337,7 @@ static int alloc_pebs_buffer(int cpu) struct debug_store *ds = hwev->ds; size_t bsiz = x86_pmu.pebs_buffer_size; int max, node = cpu_to_node(cpu); - void *buffer, *ibuffer, *cea; + void *buffer, *insn_buff, *cea; if (!x86_pmu.pebs) return 0; @@ -351,12 +351,12 @@ static int alloc_pebs_buffer(int cpu) * buffer then. */ if (x86_pmu.intel_cap.pebs_format < 2) { - ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node); - if (!ibuffer) { + insn_buff = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node); + if (!insn_buff) { dsfree_pages(buffer, bsiz); return -ENOMEM; } - per_cpu(insn_buffer, cpu) = ibuffer; + per_cpu(insn_buffer, cpu) = insn_buff; } hwev->ds_pebs_vaddr = buffer; /* Update the cpu entry area mapping */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index ae8d6ddfe39a..94b7281e7815 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -88,7 +88,7 @@ struct pv_init_ops { * the number of bytes of code generated, as we nop pad the * rest in generic code. */ - unsigned (*patch)(u8 type, void *insnbuf, + unsigned (*patch)(u8 type, void *insn_buff, unsigned long addr, unsigned len); } __no_randomize_layout; @@ -370,14 +370,11 @@ extern struct paravirt_patch_template pv_ops; /* Simple instruction patching code. */ #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" -unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); -unsigned paravirt_patch_default(u8 type, void *insnbuf, - unsigned long addr, unsigned len); +unsigned paravirt_patch_ident_64(void *insn_buff, unsigned len); +unsigned paravirt_patch_default(u8 type, void *insn_buff, unsigned long addr, unsigned len); +unsigned paravirt_patch_insns(void *insn_buff, unsigned len, const char *start, const char *end); -unsigned paravirt_patch_insns(void *insnbuf, unsigned len, - const char *start, const char *end); - -unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len); +unsigned native_patch(u8 type, void *insn_buff, unsigned long addr, unsigned len); int paravirt_disable_iospace(void); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 9a79c7808f9c..92eafd1d3493 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -275,7 +275,7 @@ static inline bool is_jmp(const u8 opcode) } static void __init_or_module -recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) +recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff) { u8 *next_rip, *tgt_rip; s32 n_dspl, o_dspl; @@ -284,7 +284,7 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) if (a->replacementlen != 5) return; - o_dspl = *(s32 *)(insnbuf + 1); + o_dspl = *(s32 *)(insn_buff + 1); /* next_rip of the replacement JMP */ next_rip = repl_insn + a->replacementlen; @@ -310,9 +310,9 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) two_byte_jmp: n_dspl -= 2; - insnbuf[0] = 0xeb; - insnbuf[1] = (s8)n_dspl; - add_nops(insnbuf + 2, 3); + insn_buff[0] = 0xeb; + insn_buff[1] = (s8)n_dspl; + add_nops(insn_buff + 2, 3); repl_len = 2; goto done; @@ -320,8 +320,8 @@ two_byte_jmp: five_byte_jmp: n_dspl -= 5; - insnbuf[0] = 0xe9; - *(s32 *)&insnbuf[1] = n_dspl; + insn_buff[0] = 0xe9; + *(s32 *)&insn_buff[1] = n_dspl; repl_len = 5; @@ -368,7 +368,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, { struct alt_instr *a; u8 *instr, *replacement; - u8 insnbuf[MAX_PATCH_LEN]; + u8 insn_buff[MAX_PATCH_LEN]; DPRINTK("alt table %px, -> %px", start, end); /* @@ -381,11 +381,11 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, * order. */ for (a = start; a < end; a++) { - int insnbuf_sz = 0; + int insn_buff_sz = 0; instr = (u8 *)&a->instr_offset + a->instr_offset; replacement = (u8 *)&a->repl_offset + a->repl_offset; - BUG_ON(a->instrlen > sizeof(insnbuf)); + BUG_ON(a->instrlen > sizeof(insn_buff)); BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); if (!boot_cpu_has(a->cpuid)) { if (a->padlen > 1) @@ -403,8 +403,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement); - memcpy(insnbuf, replacement, a->replacementlen); - insnbuf_sz = a->replacementlen; + memcpy(insn_buff, replacement, a->replacementlen); + insn_buff_sz = a->replacementlen; /* * 0xe8 is a relative jump; fix the offset. @@ -412,24 +412,24 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, * Instruction length is checked before the opcode to avoid * accessing uninitialized bytes for zero-length replacements. */ - if (a->replacementlen == 5 && *insnbuf == 0xe8) { - *(s32 *)(insnbuf + 1) += replacement - instr; + if (a->replacementlen == 5 && *insn_buff == 0xe8) { + *(s32 *)(insn_buff + 1) += replacement - instr; DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", - *(s32 *)(insnbuf + 1), - (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5); + *(s32 *)(insn_buff + 1), + (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5); } if (a->replacementlen && is_jmp(replacement[0])) - recompute_jump(a, instr, replacement, insnbuf); + recompute_jump(a, instr, replacement, insn_buff); if (a->instrlen > a->replacementlen) { - add_nops(insnbuf + a->replacementlen, + add_nops(insn_buff + a->replacementlen, a->instrlen - a->replacementlen); - insnbuf_sz += a->instrlen - a->replacementlen; + insn_buff_sz += a->instrlen - a->replacementlen; } - DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr); + DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr); - text_poke_early(instr, insnbuf, insnbuf_sz); + text_poke_early(instr, insn_buff, insn_buff_sz); } } @@ -591,22 +591,22 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, struct paravirt_patch_site *end) { struct paravirt_patch_site *p; - char insnbuf[MAX_PATCH_LEN]; + char insn_buff[MAX_PATCH_LEN]; for (p = start; p < end; p++) { unsigned int used; BUG_ON(p->len > MAX_PATCH_LEN); /* prep the buffer with the original instructions */ - memcpy(insnbuf, p->instr, p->len); - used = pv_ops.init.patch(p->instrtype, insnbuf, + memcpy(insn_buff, p->instr, p->len); + used = pv_ops.init.patch(p->instrtype, insn_buff, (unsigned long)p->instr, p->len); BUG_ON(used > p->len); /* Pad the rest with nops */ - add_nops(insnbuf + used, p->len - used); - text_poke_early(p->instr, insnbuf, p->len); + add_nops(insn_buff + used, p->len - used); + text_poke_early(p->instr, insn_buff, p->len); } } extern struct paravirt_patch_site __start_parainstructions[], diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index f14262952015..e77a895a9ecc 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -431,7 +431,7 @@ err: void arch_optimize_kprobes(struct list_head *oplist) { struct optimized_kprobe *op, *tmp; - u8 insn_buf[RELATIVEJUMP_SIZE]; + u8 insn_buff[RELATIVEJUMP_SIZE]; list_for_each_entry_safe(op, tmp, oplist, list) { s32 rel = (s32)((long)op->optinsn.insn - @@ -443,10 +443,10 @@ void arch_optimize_kprobes(struct list_head *oplist) memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, RELATIVE_ADDR_SIZE); - insn_buf[0] = RELATIVEJUMP_OPCODE; - *(s32 *)(&insn_buf[1]) = rel; + insn_buff[0] = RELATIVEJUMP_OPCODE; + *(s32 *)(&insn_buff[1]) = rel; - text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, + text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE, op->optinsn.insn); list_del_init(&op->list); @@ -456,12 +456,12 @@ void arch_optimize_kprobes(struct list_head *oplist) /* Replace a relative jump with a breakpoint (int3). */ void arch_unoptimize_kprobe(struct optimized_kprobe *op) { - u8 insn_buf[RELATIVEJUMP_SIZE]; + u8 insn_buff[RELATIVEJUMP_SIZE]; /* Set int3 to first byte for kprobes */ - insn_buf[0] = BREAKPOINT_INSTRUCTION; - memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); - text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, + insn_buff[0] = BREAKPOINT_INSTRUCTION; + memcpy(insn_buff + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); + text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE, op->optinsn.insn); } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 544d386ded45..b7d22912e20b 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -70,11 +70,11 @@ struct branch { u32 delta; } __attribute__((packed)); -static unsigned paravirt_patch_call(void *insnbuf, const void *target, +static unsigned paravirt_patch_call(void *insn_buff, const void *target, unsigned long addr, unsigned len) { const int call_len = 5; - struct branch *b = insnbuf; + struct branch *b = insn_buff; unsigned long delta = (unsigned long)target - (addr+call_len); if (len < call_len) { @@ -97,10 +97,10 @@ u64 notrace _paravirt_ident_64(u64 x) return x; } -static unsigned paravirt_patch_jmp(void *insnbuf, const void *target, +static unsigned paravirt_patch_jmp(void *insn_buff, const void *target, unsigned long addr, unsigned len) { - struct branch *b = insnbuf; + struct branch *b = insn_buff; unsigned long delta = (unsigned long)target - (addr+5); if (len < 5) { @@ -125,7 +125,7 @@ void __init native_pv_lock_init(void) static_branch_disable(&virt_spin_lock_key); } -unsigned paravirt_patch_default(u8 type, void *insnbuf, +unsigned paravirt_patch_default(u8 type, void *insn_buff, unsigned long addr, unsigned len) { /* @@ -137,28 +137,28 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf, if (opfunc == NULL) /* If there's no function, patch it with a ud2a (BUG) */ - ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); + ret = paravirt_patch_insns(insn_buff, len, ud2a, ud2a+sizeof(ud2a)); else if (opfunc == _paravirt_nop) ret = 0; #ifdef CONFIG_PARAVIRT_XXL /* identity functions just return their single argument */ else if (opfunc == _paravirt_ident_64) - ret = paravirt_patch_ident_64(insnbuf, len); + ret = paravirt_patch_ident_64(insn_buff, len); else if (type == PARAVIRT_PATCH(cpu.iret) || type == PARAVIRT_PATCH(cpu.usergs_sysret64)) /* If operation requires a jmp, then jmp */ - ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); + ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len); #endif else /* Otherwise call the function. */ - ret = paravirt_patch_call(insnbuf, opfunc, addr, len); + ret = paravirt_patch_call(insn_buff, opfunc, addr, len); return ret; } -unsigned paravirt_patch_insns(void *insnbuf, unsigned len, +unsigned paravirt_patch_insns(void *insn_buff, unsigned len, const char *start, const char *end) { unsigned insn_len = end - start; @@ -166,7 +166,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len, /* Alternative instruction is too large for the patch site and we cannot continue: */ BUG_ON(insn_len > len || start == NULL); - memcpy(insnbuf, start, insn_len); + memcpy(insn_buff, start, insn_len); return insn_len; } diff --git a/arch/x86/kernel/paravirt_patch.c b/arch/x86/kernel/paravirt_patch.c index 37b1d43d1e17..3eff63c090d2 100644 --- a/arch/x86/kernel/paravirt_patch.c +++ b/arch/x86/kernel/paravirt_patch.c @@ -10,12 +10,12 @@ #define PEND(d, m) \ (PSTART(d, m) + sizeof(patch_data_##d.m)) -#define PATCH(d, m, ibuf, len) \ - paravirt_patch_insns(ibuf, len, PSTART(d, m), PEND(d, m)) +#define PATCH(d, m, insn_buff, len) \ + paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m)) -#define PATCH_CASE(ops, m, data, ibuf, len) \ +#define PATCH_CASE(ops, m, data, insn_buff, len) \ case PARAVIRT_PATCH(ops.m): \ - return PATCH(data, ops##_##m, ibuf, len) + return PATCH(data, ops##_##m, insn_buff, len) #ifdef CONFIG_PARAVIRT_XXL struct patch_xxl { @@ -57,10 +57,10 @@ static const struct patch_xxl patch_data_xxl = { # endif }; -unsigned int paravirt_patch_ident_64(void *insnbuf, unsigned int len) +unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len) { #ifdef CONFIG_X86_64 - return PATCH(xxl, mov64, insnbuf, len); + return PATCH(xxl, mov64, insn_buff, len); #endif return 0; } @@ -83,44 +83,44 @@ static const struct patch_lock patch_data_lock = { }; #endif /* CONFIG_PARAVIRT_SPINLOCKS */ -unsigned int native_patch(u8 type, void *ibuf, unsigned long addr, +unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr, unsigned int len) { switch (type) { #ifdef CONFIG_PARAVIRT_XXL - PATCH_CASE(irq, restore_fl, xxl, ibuf, len); - PATCH_CASE(irq, save_fl, xxl, ibuf, len); - PATCH_CASE(irq, irq_enable, xxl, ibuf, len); - PATCH_CASE(irq, irq_disable, xxl, ibuf, len); + PATCH_CASE(irq, restore_fl, xxl, insn_buff, len); + PATCH_CASE(irq, save_fl, xxl, insn_buff, len); + PATCH_CASE(irq, irq_enable, xxl, insn_buff, len); + PATCH_CASE(irq, irq_disable, xxl, insn_buff, len); - PATCH_CASE(mmu, read_cr2, xxl, ibuf, len); - PATCH_CASE(mmu, read_cr3, xxl, ibuf, len); - PATCH_CASE(mmu, write_cr3, xxl, ibuf, len); + PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len); + PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len); + PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len); # ifdef CONFIG_X86_64 - PATCH_CASE(cpu, usergs_sysret64, xxl, ibuf, len); - PATCH_CASE(cpu, swapgs, xxl, ibuf, len); - PATCH_CASE(cpu, wbinvd, xxl, ibuf, len); + PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len); + PATCH_CASE(cpu, swapgs, xxl, insn_buff, len); + PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len); # else - PATCH_CASE(cpu, iret, xxl, ibuf, len); + PATCH_CASE(cpu, iret, xxl, insn_buff, len); # endif #endif #ifdef CONFIG_PARAVIRT_SPINLOCKS case PARAVIRT_PATCH(lock.queued_spin_unlock): if (pv_is_native_spin_unlock()) - return PATCH(lock, queued_spin_unlock, ibuf, len); + return PATCH(lock, queued_spin_unlock, insn_buff, len); break; case PARAVIRT_PATCH(lock.vcpu_is_preempted): if (pv_is_native_vcpu_is_preempted()) - return PATCH(lock, vcpu_is_preempted, ibuf, len); + return PATCH(lock, vcpu_is_preempted, insn_buff, len); break; #endif default: break; } - return paravirt_patch_default(type, ibuf, addr, len); + return paravirt_patch_default(type, insn_buff, addr, len); } diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c index a3b4fd954931..34c2b3691f4f 100644 --- a/arch/x86/tools/insn_decoder_test.c +++ b/arch/x86/tools/insn_decoder_test.c @@ -119,7 +119,7 @@ static void parse_args(int argc, char **argv) int main(int argc, char **argv) { char line[BUFSIZE], sym[BUFSIZE] = ""; - unsigned char insn_buf[16]; + unsigned char insn_buff[16]; struct insn insn; int insns = 0; int warnings = 0; @@ -138,7 +138,7 @@ int main(int argc, char **argv) } insns++; - memset(insn_buf, 0, 16); + memset(insn_buff, 0, 16); strcpy(copy, line); tab1 = strchr(copy, '\t'); if (!tab1) @@ -151,13 +151,13 @@ int main(int argc, char **argv) *tab2 = '\0'; /* Characters beyond tab2 aren't examined */ while (s < tab2) { if (sscanf(s, "%x", &b) == 1) { - insn_buf[nb++] = (unsigned char) b; + insn_buff[nb++] = (unsigned char) b; s += 3; } else break; } /* Decode an instruction */ - insn_init(&insn, insn_buf, sizeof(insn_buf), x86_64); + insn_init(&insn, insn_buff, sizeof(insn_buff), x86_64); insn_get_length(&insn); if (insn.length != nb) { warnings++; diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c index 1972565ab106..7adec7b490fd 100644 --- a/arch/x86/tools/insn_sanity.c +++ b/arch/x86/tools/insn_sanity.c @@ -96,7 +96,7 @@ static void dump_insn(FILE *fp, struct insn *insn) } static void dump_stream(FILE *fp, const char *msg, unsigned long nr_iter, - unsigned char *insn_buf, struct insn *insn) + unsigned char *insn_buff, struct insn *insn) { int i; @@ -109,7 +109,7 @@ static void dump_stream(FILE *fp, const char *msg, unsigned long nr_iter, /* Input a decoded instruction sequence directly */ fprintf(fp, " $ echo "); for (i = 0; i < MAX_INSN_SIZE; i++) - fprintf(fp, " %02x", insn_buf[i]); + fprintf(fp, " %02x", insn_buff[i]); fprintf(fp, " | %s -i -\n", prog); if (!input_file) { @@ -137,7 +137,7 @@ fail: } /* Read given instruction sequence from the input file */ -static int read_next_insn(unsigned char *insn_buf) +static int read_next_insn(unsigned char *insn_buff) { char buf[256] = "", *tmp; int i; @@ -147,7 +147,7 @@ static int read_next_insn(unsigned char *insn_buf) return 0; for (i = 0; i < MAX_INSN_SIZE; i++) { - insn_buf[i] = (unsigned char)strtoul(tmp, &tmp, 16); + insn_buff[i] = (unsigned char)strtoul(tmp, &tmp, 16); if (*tmp != ' ') break; } @@ -155,19 +155,19 @@ static int read_next_insn(unsigned char *insn_buf) return i; } -static int generate_insn(unsigned char *insn_buf) +static int generate_insn(unsigned char *insn_buff) { int i; if (input_file) - return read_next_insn(insn_buf); + return read_next_insn(insn_buff); /* Fills buffer with random binary up to MAX_INSN_SIZE */ for (i = 0; i < MAX_INSN_SIZE - 1; i += 2) - *(unsigned short *)(&insn_buf[i]) = random() & 0xffff; + *(unsigned short *)(&insn_buff[i]) = random() & 0xffff; while (i < MAX_INSN_SIZE) - insn_buf[i++] = random() & 0xff; + insn_buff[i++] = random() & 0xff; return i; } @@ -239,31 +239,31 @@ int main(int argc, char **argv) int insns = 0; int errors = 0; unsigned long i; - unsigned char insn_buf[MAX_INSN_SIZE * 2]; + unsigned char insn_buff[MAX_INSN_SIZE * 2]; parse_args(argc, argv); /* Prepare stop bytes with NOPs */ - memset(insn_buf + MAX_INSN_SIZE, INSN_NOP, MAX_INSN_SIZE); + memset(insn_buff + MAX_INSN_SIZE, INSN_NOP, MAX_INSN_SIZE); for (i = 0; i < iter_end; i++) { - if (generate_insn(insn_buf) <= 0) + if (generate_insn(insn_buff) <= 0) break; if (i < iter_start) /* Skip to given iteration number */ continue; /* Decode an instruction */ - insn_init(&insn, insn_buf, sizeof(insn_buf), x86_64); + insn_init(&insn, insn_buff, sizeof(insn_buff), x86_64); insn_get_length(&insn); if (insn.next_byte <= insn.kaddr || insn.kaddr + MAX_INSN_SIZE < insn.next_byte) { /* Access out-of-range memory */ - dump_stream(stderr, "Error: Found an access violation", i, insn_buf, &insn); + dump_stream(stderr, "Error: Found an access violation", i, insn_buff, &insn); errors++; } else if (verbose && !insn_complete(&insn)) - dump_stream(stdout, "Info: Found an undecodable input", i, insn_buf, &insn); + dump_stream(stdout, "Info: Found an undecodable input", i, insn_buff, &insn); else if (verbose >= 2) dump_insn(stdout, &insn); insns++; From 46938cc8ab91354e6d751dc0790ddb4244b6703a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 25 Apr 2019 13:17:01 +0200 Subject: [PATCH 8/8] x86/paravirt: Rename paravirt_patch_site::instrtype to paravirt_patch_site::type It's used as 'type' in almost every paravirt patching function, so standardize the field name from the somewhat weird 'instrtype' name to 'type'. Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/paravirt_types.h | 4 ++-- arch/x86/kernel/alternative.c | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 94b7281e7815..946f8f1f1efc 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -672,8 +672,8 @@ u64 _paravirt_ident_64(u64); /* These all sit in the .parainstructions section to tell us what to patch. */ struct paravirt_patch_site { - u8 *instr; /* original instructions */ - u8 instrtype; /* type of this instruction */ + u8 *instr; /* original instructions */ + u8 type; /* type of this instruction */ u8 len; /* length of original instruction */ }; diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 92eafd1d3493..7ea5a3764fcc 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -599,8 +599,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, BUG_ON(p->len > MAX_PATCH_LEN); /* prep the buffer with the original instructions */ memcpy(insn_buff, p->instr, p->len); - used = pv_ops.init.patch(p->instrtype, insn_buff, - (unsigned long)p->instr, p->len); + used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len); BUG_ON(used > p->len);