1
0
Fork 0
alistair23-linux/arch/powerpc/include/asm/code-patching.h

190 lines
5.4 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_POWERPC_CODE_PATCHING_H
#define _ASM_POWERPC_CODE_PATCHING_H
/*
* Copyright 2008, Michael Ellerman, IBM Corporation.
*/
#include <asm/types.h>
#include <asm/ppc-opcode.h>
#include <linux/string.h>
#include <linux/kallsyms.h>
#include <asm/asm-compat.h>
/* Flags for create_branch:
* "b" == create_branch(addr, target, 0);
* "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
* "bl" == create_branch(addr, target, BRANCH_SET_LINK);
* "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
*/
#define BRANCH_SET_LINK 0x1
#define BRANCH_ABSOLUTE 0x2
bool is_offset_in_branch_range(long offset);
unsigned int create_branch(const unsigned int *addr,
unsigned long target, int flags);
unsigned int create_cond_branch(const unsigned int *addr,
unsigned long target, int flags);
int patch_branch(unsigned int *addr, unsigned long target, int flags);
int patch_instruction(unsigned int *addr, unsigned int instr);
int raw_patch_instruction(unsigned int *addr, unsigned int instr);
static inline unsigned long patch_site_addr(s32 *site)
{
return (unsigned long)site + *site;
}
static inline int patch_instruction_site(s32 *site, unsigned int instr)
{
return patch_instruction((unsigned int *)patch_site_addr(site), instr);
}
static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
{
return patch_branch((unsigned int *)patch_site_addr(site), target, flags);
}
static inline int modify_instruction(unsigned int *addr, unsigned int clr,
unsigned int set)
{
return patch_instruction(addr, (*addr & ~clr) | set);
}
static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
{
return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
}
int instr_is_relative_branch(unsigned int instr);
int instr_is_relative_link_branch(unsigned int instr);
int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
unsigned long branch_target(const unsigned int *instr);
unsigned int translate_branch(const unsigned int *dest,
const unsigned int *src);
extern bool is_conditional_branch(unsigned int instr);
#ifdef CONFIG_PPC_BOOK3E_64
void __patch_exception(int exc, unsigned long addr);
#define patch_exception(exc, name) do { \
extern unsigned int name; \
__patch_exception((exc), (unsigned long)&name); \
} while (0)
#endif
#define OP_RT_RA_MASK 0xffff0000UL
#define LIS_R2 0x3c020000UL
#define ADDIS_R2_R12 0x3c4c0000UL
#define ADDI_R2_R2 0x38420000UL
static inline unsigned long ppc_function_entry(void *func)
{
#ifdef PPC64_ELF_ABI_v2
u32 *insn = func;
/*
* A PPC64 ABIv2 function may have a local and a global entry
* point. We need to use the local entry point when patching
* functions, so identify and step over the global entry point
* sequence.
*
* The global entry point sequence is always of the form:
*
* addis r2,r12,XXXX
* addi r2,r2,XXXX
*
* A linker optimisation may convert the addis to lis:
*
* lis r2,XXXX
* addi r2,r2,XXXX
*/
if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
return (unsigned long)(insn + 2);
else
return (unsigned long)func;
#elif defined(PPC64_ELF_ABI_v1)
/*
* On PPC64 ABIv1 the function pointer actually points to the
* function's descriptor. The first entry in the descriptor is the
* address of the function text.
*/
Revert "powerpc64/elfv1: Only dereference function descriptor for non-text symbols" This reverts commit 83e840c770f2c5 ("powerpc64/elfv1: Only dereference function descriptor for non-text symbols"). Chandan reported that on newer kernels, trying to enable function_graph tracer on ppc64 (BE) locks up the system with the following trace: Unable to handle kernel paging request for data at address 0x600000002fa30010 Faulting instruction address: 0xc0000000001f1300 Thread overran stack, or stack corrupted Oops: Kernel access of bad area, sig: 11 [#1] BE SMP NR_CPUS=2048 DEBUG_PAGEALLOC NUMA pSeries Modules linked in: CPU: 1 PID: 6586 Comm: bash Not tainted 4.14.0-rc3-00162-g6e51f1f-dirty #20 task: c000000625c07200 task.stack: c000000625c07310 NIP: c0000000001f1300 LR: c000000000121cac CTR: c000000000061af8 REGS: c000000625c088c0 TRAP: 0380 Not tainted (4.14.0-rc3-00162-g6e51f1f-dirty) MSR: 8000000000001032 <SF,ME,IR,DR,RI> CR: 28002848 XER: 00000000 CFAR: c0000000001f1320 SOFTE: 0 ... NIP [c0000000001f1300] .__is_insn_slot_addr+0x30/0x90 LR [c000000000121cac] .kernel_text_address+0x18c/0x1c0 Call Trace: [c000000625c08b40] [c0000000001bd040] .is_module_text_address+0x20/0x40 (unreliable) [c000000625c08bc0] [c000000000121cac] .kernel_text_address+0x18c/0x1c0 [c000000625c08c50] [c000000000061960] .prepare_ftrace_return+0x50/0x130 [c000000625c08cf0] [c000000000061b10] .ftrace_graph_caller+0x14/0x34 [c000000625c08d60] [c000000000121b40] .kernel_text_address+0x20/0x1c0 [c000000625c08df0] [c000000000061960] .prepare_ftrace_return+0x50/0x130 ... [c000000625c0ab30] [c000000000061960] .prepare_ftrace_return+0x50/0x130 [c000000625c0abd0] [c000000000061b10] .ftrace_graph_caller+0x14/0x34 [c000000625c0ac40] [c000000000121b40] .kernel_text_address+0x20/0x1c0 [c000000625c0acd0] [c000000000061960] .prepare_ftrace_return+0x50/0x130 [c000000625c0ad70] [c000000000061b10] .ftrace_graph_caller+0x14/0x34 [c000000625c0ade0] [c000000000121b40] .kernel_text_address+0x20/0x1c0 This is because ftrace is using ppc_function_entry() for obtaining the address of return_to_handler() in prepare_ftrace_return(). The call to kernel_text_address() itself gets traced and we end up in a recursive loop. Fixes: 83e840c770f2 ("powerpc64/elfv1: Only dereference function descriptor for non-text symbols") Cc: stable@vger.kernel.org # v4.13+ Reported-by: Chandan Rajendra <chandan@linux.vnet.ibm.com> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-10-30 09:12:08 -06:00
return ((func_descr_t *)func)->entry;
#else
return (unsigned long)func;
#endif
}
static inline unsigned long ppc_global_function_entry(void *func)
{
#ifdef PPC64_ELF_ABI_v2
/* PPC64 ABIv2 the global entry point is at the address */
return (unsigned long)func;
#else
/* All other cases there is no change vs ppc_function_entry() */
return ppc_function_entry(func);
#endif
}
/*
* Wrapper around kallsyms_lookup() to return function entry address:
* - For ABIv1, we lookup the dot variant.
* - For ABIv2, we return the local entry point.
*/
static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
{
unsigned long addr;
#ifdef PPC64_ELF_ABI_v1
/* check for dot variant */
char dot_name[1 + KSYM_NAME_LEN];
bool dot_appended = false;
if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN)
return 0;
if (name[0] != '.') {
dot_name[0] = '.';
dot_name[1] = '\0';
strlcat(dot_name, name, sizeof(dot_name));
dot_appended = true;
} else {
dot_name[0] = '\0';
strlcat(dot_name, name, sizeof(dot_name));
}
addr = kallsyms_lookup_name(dot_name);
if (!addr && dot_appended)
/* Let's try the original non-dot symbol lookup */
addr = kallsyms_lookup_name(name);
#elif defined(PPC64_ELF_ABI_v2)
addr = kallsyms_lookup_name(name);
if (addr)
addr = ppc_function_entry((void *)addr);
#else
addr = kallsyms_lookup_name(name);
#endif
return addr;
}
#ifdef CONFIG_PPC64
/*
* Some instruction encodings commonly used in dynamic ftracing
* and function live patching.
*/
/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
#ifdef PPC64_ELF_ABI_v2
#define R2_STACK_OFFSET 24
#else
#define R2_STACK_OFFSET 40
#endif
#define PPC_INST_LD_TOC (PPC_INST_LD | ___PPC_RT(__REG_R2) | \
___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
/* usually preceded by a mflr r0 */
#define PPC_INST_STD_LR (PPC_INST_STD | ___PPC_RS(__REG_R0) | \
___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_CODE_PATCHING_H */