1
0
Fork 0

RISC-V changes for 4.17

This tag contains the new features we'd like to incorporate into the
 RISC-V port for 4.17.  We might have a bit more stuff land later in the
 merge window, but I wanted to get this out earlier just so everyone can
 see where we currently stand.
 
 A short summary of the changes is:
 
 * We've added support for dynamic ftrace on RISC-V targets.
 * There have been a handful of cleanups to our atomic and locking
   routines.  They now more closely match the released RISC-V memory
   model draft.
 * Our module loading support has been cleaned up and is now enabled by
   default, despite some limitations still existing.
 * A patch to define COMMANDLINE_FORCE instead of COMMANDLINE_OVERRIDE so
   the generic device tree code picks up handling all our command line
   stuff.
 
 There's more information in the merge commits for each patch set.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCAAxFiEEAM520YNJYN/OiG3470yhUCzLq0EFAlrDsOATHHBhbG1lckBk
 YWJiZWx0LmNvbQAKCRDvTKFQLMurQVDuD/9uBjfgzdyJ/C7+rcN6SgsmCOrDSmZz
 9yx0xqJwuVe8b21Lt4fueDjIMpVfjgxbE4SWTQ/MqaGBcxknOtUpJdGzA98BO6oB
 VU/Mx0EJL2L0N+FNqWuE1KCa6h8PH8PB4pzl/dvwKRdtgViL2EGuzQebSDT2iwTg
 JpCsAzfAZjTT0StVj7vn9/7O3uP/9G9j+GeZFvCT+OxtPHYc5TkDKZuOCI66GokJ
 ppGrqa9MqrWADpqWVBUIYsYqRHrhBUWgBqN5CfgsiRFTQn0ISUpdl/qZbLBVsHfY
 o8i/d0RM1jc5U4+qgZLpB8nD9RYpr9GTQobhNkcjyVmKHJFdJirYDD/y3fi38IoT
 svTv1n3VIFBTv1+u5H/Rv+ABhJx5ofFjboD9IXB3j/82i/T06IMZ2IujAAwLQw4u
 1hE/wKM67c6dR+gqMl8Zq1LuYoUGuc2MIX3eLYc4GmWloZkKg/N7rHjaPFfnZlwd
 ZFfrPl+ZTP3FYpr3yanrVNYwHZfFWPlTnXvPWNFi+A5HPVI7+ScR8++Eaqo1mb5h
 nFr4wS8GX2FbUmF20mf4W7RAkpnnUlo9HRNMqfExTmp+dPCbgVXIEfJTCYsamAPV
 nDQ2xG5aLMyRcUBSbQEi7tninMRFGXiBYbcn6LWhYibBpr5SFGwlSkmWxbDi3ark
 6lsKCBK3qrNHBQ==
 =XrWX
 -----END PGP SIGNATURE-----

Merge tag 'riscv-for-linus-4.17-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux

Pull RISC-V updates from Palmer Dabbelt:
 "This contains the new features we'd like to incorporate into the
  RISC-V port for 4.17. We might have a bit more stuff land later in the
  merge window, but I wanted to get this out earlier just so everyone
  can see where we currently stand.

  A short summary of the changes is:

   - We've added support for dynamic ftrace on RISC-V targets.

   - There have been a handful of cleanups to our atomic and locking
     routines. They now more closely match the released RISC-V memory
     model draft.

   - Our module loading support has been cleaned up and is now enabled
     by default, despite some limitations still existing.

   - A patch to define COMMANDLINE_FORCE instead of COMMANDLINE_OVERRIDE
     so the generic device tree code picks up handling all our command
     line stuff.

  There's more information in the merge commits for each patch set"

* tag 'riscv-for-linus-4.17-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux: (21 commits)
  RISC-V: Rename CONFIG_CMDLINE_OVERRIDE to CONFIG_CMDLINE_FORCE
  RISC-V: Add definition of relocation types
  RISC-V: Enable module support in defconfig
  RISC-V: Support SUB32 relocation type in kernel module
  RISC-V: Support ADD32 relocation type in kernel module
  RISC-V: Support ALIGN relocation type in kernel module
  RISC-V: Support RVC_BRANCH/JUMP relocation type in kernel modulewq
  RISC-V: Support HI20/LO12_I/LO12_S relocation type in kernel module
  RISC-V: Support CALL relocation type in kernel module
  RISC-V: Support GOT_HI20/CALL_PLT relocation type in kernel module
  RISC-V: Add section of GOT.PLT for kernel module
  RISC-V: Add sections of PLT and GOT for kernel module
  riscv/atomic: Strengthen implementations with fences
  riscv/spinlock: Strengthen implementations with fences
  riscv/barrier: Define __smp_{store_release,load_acquire}
  riscv/ftrace: Add HAVE_FUNCTION_GRAPH_RET_ADDR_PTR support
  riscv/ftrace: Add DYNAMIC_FTRACE_WITH_REGS support
  riscv/ftrace: Add ARCH_SUPPORTS_FTRACE_OPS support
  riscv/ftrace: Add dynamic function graph tracer support
  riscv/ftrace: Add dynamic function tracer support
  ...
hifive-unleashed-5.1
Linus Torvalds 2018-04-04 16:43:47 -07:00
commit 527cd20771
20 changed files with 1601 additions and 253 deletions

View File

@ -115,6 +115,9 @@ config ARCH_RV64I
select 64BIT
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
endchoice
@ -132,6 +135,10 @@ choice
bool "medium any code model"
endchoice
config MODULE_SECTIONS
bool
select HAVE_MOD_ARCH_SPECIFIC
choice
prompt "Maximum Physical Memory"
default MAXPHYSMEM_2GB if 32BIT
@ -142,6 +149,7 @@ choice
bool "2GiB"
config MAXPHYSMEM_128GB
depends on 64BIT && CMODEL_MEDANY
select MODULE_SECTIONS if MODULES
bool "128GiB"
endchoice
@ -282,7 +290,7 @@ config CMDLINE_BOOL
in CONFIG_CMDLINE.
The built-in options will be concatenated to the default command
line if CMDLINE_OVERRIDE is set to 'N'. Otherwise, the default
line if CMDLINE_FORCE is set to 'N'. Otherwise, the default
command line will be ignored and replaced by the built-in string.
config CMDLINE
@ -292,7 +300,7 @@ config CMDLINE
help
Supply command-line options at build time by entering them here.
config CMDLINE_OVERRIDE
config CMDLINE_FORCE
bool "Built-in command line overrides bootloader arguments"
depends on CMDLINE_BOOL
help

View File

@ -11,6 +11,9 @@
LDFLAGS :=
OBJCOPYFLAGS := -O binary
LDFLAGS_vmlinux :=
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
LDFLAGS_vmlinux := --no-relax
endif
KBUILD_AFLAGS_MODULE += -fPIC
KBUILD_CFLAGS_MODULE += -fPIC
@ -56,6 +59,11 @@ endif
ifeq ($(CONFIG_CMODEL_MEDANY),y)
KBUILD_CFLAGS += -mcmodel=medany
endif
ifeq ($(CONFIG_MODULE_SECTIONS),y)
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/riscv/kernel/module.lds
endif
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
# GCC versions that support the "-mstrict-align" option default to allowing
# unaligned accesses. While unaligned accesses are explicitly allowed in the

View File

@ -73,3 +73,5 @@ CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
# CONFIG_RCU_TRACE is not set
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y

View File

@ -24,6 +24,20 @@
#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory"); \
__ret; \
})
#define __atomic_op_release(op, args...) \
({ \
__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory"); \
op##_relaxed(args); \
})
static __always_inline int atomic_read(const atomic_t *v)
{
return READ_ONCE(v->counter);
@ -50,22 +64,23 @@ static __always_inline void atomic64_set(atomic64_t *v, long i)
* have the AQ or RL bits set. These don't return anything, so there's only
* one version to worry about.
*/
#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
{ \
__asm__ __volatile__ ( \
"amo" #asm_op "." #asm_type " zero, %1, %0" \
: "+A" (v->counter) \
: "r" (I) \
: "memory"); \
}
#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \
void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
{ \
__asm__ __volatile__ ( \
" amo" #asm_op "." #asm_type " zero, %1, %0" \
: "+A" (v->counter) \
: "r" (I) \
: "memory"); \
} \
#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, I) \
#define ATOMIC_OPS(op, asm_op, I) \
ATOMIC_OP (op, asm_op, I, w, int, )
#else
#define ATOMIC_OPS(op, asm_op, I) \
ATOMIC_OP (op, asm_op, I, w, int, ) \
#define ATOMIC_OPS(op, asm_op, I) \
ATOMIC_OP (op, asm_op, I, w, int, ) \
ATOMIC_OP (op, asm_op, I, d, long, 64)
#endif
@ -79,75 +94,115 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_OPS
/*
* Atomic ops that have ordered, relaxed, acquire, and relese variants.
* Atomic ops that have ordered, relaxed, acquire, and release variants.
* There's two flavors of these: the arithmatic ops have both fetch and return
* versions, while the logical ops only have fetch versions.
*/
#define ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, asm_type, c_type, prefix) \
static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v) \
{ \
register c_type ret; \
__asm__ __volatile__ ( \
"amo" #asm_op "." #asm_type #asm_or " %1, %2, %0" \
: "+A" (v->counter), "=r" (ret) \
: "r" (I) \
: "memory"); \
return ret; \
#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \
c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
atomic##prefix##_t *v) \
{ \
register c_type ret; \
__asm__ __volatile__ ( \
" amo" #asm_op "." #asm_type " %1, %2, %0" \
: "+A" (v->counter), "=r" (ret) \
: "r" (I) \
: "memory"); \
return ret; \
} \
static __always_inline \
c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
{ \
register c_type ret; \
__asm__ __volatile__ ( \
" amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
: "+A" (v->counter), "=r" (ret) \
: "r" (I) \
: "memory"); \
return ret; \
}
#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \
static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, atomic##prefix##_t *v) \
{ \
return atomic##prefix##_fetch_##op##c_or(i, v) c_op I; \
#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
static __always_inline \
c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
atomic##prefix##_t *v) \
{ \
return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
} \
static __always_inline \
c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
{ \
return atomic##prefix##_fetch_##op(i, v) c_op I; \
}
#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \
ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, )
#define ATOMIC_OPS(op, asm_op, c_op, I) \
ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
#else
#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \
ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, d, long, 64) \
ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
#define ATOMIC_OPS(op, asm_op, c_op, I) \
ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
ATOMIC_FETCH_OP( op, asm_op, I, d, long, 64) \
ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
#endif
ATOMIC_OPS(add, add, +, i, , _relaxed)
ATOMIC_OPS(add, add, +, i, .aq , _acquire)
ATOMIC_OPS(add, add, +, i, .rl , _release)
ATOMIC_OPS(add, add, +, i, .aqrl, )
ATOMIC_OPS(add, add, +, i)
ATOMIC_OPS(sub, add, +, -i)
ATOMIC_OPS(sub, add, +, -i, , _relaxed)
ATOMIC_OPS(sub, add, +, -i, .aq , _acquire)
ATOMIC_OPS(sub, add, +, -i, .rl , _release)
ATOMIC_OPS(sub, add, +, -i, .aqrl, )
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_add_return atomic_add_return
#define atomic_sub_return atomic_sub_return
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#define atomic_fetch_add atomic_fetch_add
#define atomic_fetch_sub atomic_fetch_sub
#ifndef CONFIG_GENERIC_ATOMIC64
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_add_return atomic64_add_return
#define atomic64_sub_return atomic64_sub_return
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#define atomic64_fetch_add atomic64_fetch_add
#define atomic64_fetch_sub atomic64_fetch_sub
#endif
#undef ATOMIC_OPS
#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or) \
ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w, int, )
#define ATOMIC_OPS(op, asm_op, I) \
ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
#else
#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or) \
ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w, int, ) \
ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, d, long, 64)
#define ATOMIC_OPS(op, asm_op, I) \
ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
#endif
ATOMIC_OPS(and, and, i, , _relaxed)
ATOMIC_OPS(and, and, i, .aq , _acquire)
ATOMIC_OPS(and, and, i, .rl , _release)
ATOMIC_OPS(and, and, i, .aqrl, )
ATOMIC_OPS(and, and, i)
ATOMIC_OPS( or, or, i)
ATOMIC_OPS(xor, xor, i)
ATOMIC_OPS( or, or, i, , _relaxed)
ATOMIC_OPS( or, or, i, .aq , _acquire)
ATOMIC_OPS( or, or, i, .rl , _release)
ATOMIC_OPS( or, or, i, .aqrl, )
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#define atomic_fetch_and atomic_fetch_and
#define atomic_fetch_or atomic_fetch_or
#define atomic_fetch_xor atomic_fetch_xor
ATOMIC_OPS(xor, xor, i, , _relaxed)
ATOMIC_OPS(xor, xor, i, .aq , _acquire)
ATOMIC_OPS(xor, xor, i, .rl , _release)
ATOMIC_OPS(xor, xor, i, .aqrl, )
#ifndef CONFIG_GENERIC_ATOMIC64
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#define atomic64_fetch_and atomic64_fetch_and
#define atomic64_fetch_or atomic64_fetch_or
#define atomic64_fetch_xor atomic64_fetch_xor
#endif
#undef ATOMIC_OPS
@ -157,22 +212,24 @@ ATOMIC_OPS(xor, xor, i, .aqrl, )
/*
* The extra atomic operations that are constructed from one of the core
* AMO-based operations above (aside from sub, which is easier to fit above).
* These are required to perform a barrier, but they're OK this way because
* atomic_*_return is also required to perform a barrier.
* These are required to perform a full barrier, but they're OK this way
* because atomic_*_return is also required to perform a full barrier.
*
*/
#define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \
static __always_inline bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
{ \
return atomic##prefix##_##func_op##_return(i, v) comp_op I; \
#define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \
static __always_inline \
bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
{ \
return atomic##prefix##_##func_op##_return(i, v) comp_op I; \
}
#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, func_op, comp_op, I) \
ATOMIC_OP (op, func_op, comp_op, I, int, )
#define ATOMIC_OPS(op, func_op, comp_op, I) \
ATOMIC_OP(op, func_op, comp_op, I, int, )
#else
#define ATOMIC_OPS(op, func_op, comp_op, I) \
ATOMIC_OP (op, func_op, comp_op, I, int, ) \
ATOMIC_OP (op, func_op, comp_op, I, long, 64)
#define ATOMIC_OPS(op, func_op, comp_op, I) \
ATOMIC_OP(op, func_op, comp_op, I, int, ) \
ATOMIC_OP(op, func_op, comp_op, I, long, 64)
#endif
ATOMIC_OPS(add_and_test, add, ==, 0)
@ -182,51 +239,87 @@ ATOMIC_OPS(add_negative, add, <, 0)
#undef ATOMIC_OP
#undef ATOMIC_OPS
#define ATOMIC_OP(op, func_op, I, c_type, prefix) \
static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v) \
{ \
atomic##prefix##_##func_op(I, v); \
#define ATOMIC_OP(op, func_op, I, c_type, prefix) \
static __always_inline \
void atomic##prefix##_##op(atomic##prefix##_t *v) \
{ \
atomic##prefix##_##func_op(I, v); \
}
#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \
static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
{ \
return atomic##prefix##_fetch_##func_op(I, v); \
#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \
static __always_inline \
c_type atomic##prefix##_fetch_##op##_relaxed(atomic##prefix##_t *v) \
{ \
return atomic##prefix##_fetch_##func_op##_relaxed(I, v); \
} \
static __always_inline \
c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
{ \
return atomic##prefix##_fetch_##func_op(I, v); \
}
#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \
static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \
{ \
return atomic##prefix##_fetch_##op(v) c_op I; \
#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \
static __always_inline \
c_type atomic##prefix##_##op##_return_relaxed(atomic##prefix##_t *v) \
{ \
return atomic##prefix##_fetch_##op##_relaxed(v) c_op I; \
} \
static __always_inline \
c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \
{ \
return atomic##prefix##_fetch_##op(v) c_op I; \
}
#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, c_op, I) \
ATOMIC_OP (op, asm_op, I, int, ) \
ATOMIC_FETCH_OP (op, asm_op, I, int, ) \
#define ATOMIC_OPS(op, asm_op, c_op, I) \
ATOMIC_OP( op, asm_op, I, int, ) \
ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
#else
#define ATOMIC_OPS(op, asm_op, c_op, I) \
ATOMIC_OP (op, asm_op, I, int, ) \
ATOMIC_FETCH_OP (op, asm_op, I, int, ) \
ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
ATOMIC_OP (op, asm_op, I, long, 64) \
ATOMIC_FETCH_OP (op, asm_op, I, long, 64) \
#define ATOMIC_OPS(op, asm_op, c_op, I) \
ATOMIC_OP( op, asm_op, I, int, ) \
ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
ATOMIC_OP( op, asm_op, I, long, 64) \
ATOMIC_FETCH_OP( op, asm_op, I, long, 64) \
ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
#endif
ATOMIC_OPS(inc, add, +, 1)
ATOMIC_OPS(dec, add, +, -1)
#define atomic_inc_return_relaxed atomic_inc_return_relaxed
#define atomic_dec_return_relaxed atomic_dec_return_relaxed
#define atomic_inc_return atomic_inc_return
#define atomic_dec_return atomic_dec_return
#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
#define atomic_fetch_inc atomic_fetch_inc
#define atomic_fetch_dec atomic_fetch_dec
#ifndef CONFIG_GENERIC_ATOMIC64
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
#define atomic64_inc_return atomic64_inc_return
#define atomic64_dec_return atomic64_dec_return
#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
#define atomic64_fetch_inc atomic64_fetch_inc
#define atomic64_fetch_dec atomic64_fetch_dec
#endif
#undef ATOMIC_OPS
#undef ATOMIC_OP
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#define ATOMIC_OP(op, func_op, comp_op, I, prefix) \
static __always_inline bool atomic##prefix##_##op(atomic##prefix##_t *v) \
{ \
return atomic##prefix##_##func_op##_return(v) comp_op I; \
#define ATOMIC_OP(op, func_op, comp_op, I, prefix) \
static __always_inline \
bool atomic##prefix##_##op(atomic##prefix##_t *v) \
{ \
return atomic##prefix##_##func_op##_return(v) comp_op I; \
}
ATOMIC_OP(inc_and_test, inc, ==, 0, )
@ -238,19 +331,19 @@ ATOMIC_OP(dec_and_test, dec, ==, 0, 64)
#undef ATOMIC_OP
/* This is required to provide a barrier on success. */
/* This is required to provide a full barrier on success. */
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
__asm__ __volatile__ (
"0:\n\t"
"lr.w.aqrl %[p], %[c]\n\t"
"beq %[p], %[u], 1f\n\t"
"add %[rc], %[p], %[a]\n\t"
"sc.w.aqrl %[rc], %[rc], %[c]\n\t"
"bnez %[rc], 0b\n\t"
"1:"
"0: lr.w %[p], %[c]\n"
" beq %[p], %[u], 1f\n"
" add %[rc], %[p], %[a]\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
: "memory");
@ -263,13 +356,13 @@ static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
long prev, rc;
__asm__ __volatile__ (
"0:\n\t"
"lr.d.aqrl %[p], %[c]\n\t"
"beq %[p], %[u], 1f\n\t"
"add %[rc], %[p], %[a]\n\t"
"sc.d.aqrl %[rc], %[rc], %[c]\n\t"
"bnez %[rc], 0b\n\t"
"1:"
"0: lr.d %[p], %[c]\n"
" beq %[p], %[u], 1f\n"
" add %[rc], %[p], %[a]\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
: "memory");
@ -300,37 +393,63 @@ static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
/*
* atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
* {cmp,}xchg and the operations that return, so they need a barrier.
* {cmp,}xchg and the operations that return, so they need a full barrier.
*/
/*
* FIXME: atomic_cmpxchg_{acquire,release,relaxed} are all implemented by
* assigning the same barrier to both the LR and SC operations, but that might
* not make any sense. We're waiting on a memory model specification to
* determine exactly what the right thing to do is here.
*/
#define ATOMIC_OP(c_t, prefix, c_or, size, asm_or) \
static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) \
{ \
return __cmpxchg(&(v->counter), o, n, size, asm_or, asm_or); \
} \
static __always_inline c_t atomic##prefix##_xchg##c_or(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg(n, &(v->counter), size, asm_or); \
#define ATOMIC_OP(c_t, prefix, size) \
static __always_inline \
c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_relaxed(&(v->counter), n, size); \
} \
static __always_inline \
c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_acquire(&(v->counter), n, size); \
} \
static __always_inline \
c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_release(&(v->counter), n, size); \
} \
static __always_inline \
c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg(&(v->counter), n, size); \
} \
static __always_inline \
c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_relaxed(&(v->counter), o, n, size); \
} \
static __always_inline \
c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_acquire(&(v->counter), o, n, size); \
} \
static __always_inline \
c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_release(&(v->counter), o, n, size); \
} \
static __always_inline \
c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
{ \
return __cmpxchg(&(v->counter), o, n, size); \
}
#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(c_or, asm_or) \
ATOMIC_OP( int, , c_or, 4, asm_or)
#define ATOMIC_OPS() \
ATOMIC_OP( int, , 4)
#else
#define ATOMIC_OPS(c_or, asm_or) \
ATOMIC_OP( int, , c_or, 4, asm_or) \
ATOMIC_OP(long, 64, c_or, 8, asm_or)
#define ATOMIC_OPS() \
ATOMIC_OP( int, , 4) \
ATOMIC_OP(long, 64, 8)
#endif
ATOMIC_OPS( , .aqrl)
ATOMIC_OPS(_acquire, .aq)
ATOMIC_OPS(_release, .rl)
ATOMIC_OPS(_relaxed, )
ATOMIC_OPS()
#undef ATOMIC_OPS
#undef ATOMIC_OP
@ -340,13 +459,13 @@ static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
int prev, rc;
__asm__ __volatile__ (
"0:\n\t"
"lr.w.aqrl %[p], %[c]\n\t"
"sub %[rc], %[p], %[o]\n\t"
"bltz %[rc], 1f\n\t"
"sc.w.aqrl %[rc], %[rc], %[c]\n\t"
"bnez %[rc], 0b\n\t"
"1:"
"0: lr.w %[p], %[c]\n"
" sub %[rc], %[p], %[o]\n"
" bltz %[rc], 1f\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [o]"r" (offset)
: "memory");
@ -361,13 +480,13 @@ static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
long prev, rc;
__asm__ __volatile__ (
"0:\n\t"
"lr.d.aqrl %[p], %[c]\n\t"
"sub %[rc], %[p], %[o]\n\t"
"bltz %[rc], 1f\n\t"
"sc.d.aqrl %[rc], %[rc], %[c]\n\t"
"bnez %[rc], 0b\n\t"
"1:"
"0: lr.d %[p], %[c]\n"
" sub %[rc], %[p], %[o]\n"
" bltz %[rc], 1f\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [o]"r" (offset)
: "memory");

View File

@ -38,6 +38,21 @@
#define __smp_rmb() RISCV_FENCE(r,r)
#define __smp_wmb() RISCV_FENCE(w,w)
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
RISCV_FENCE(rw,w); \
WRITE_ONCE(*p, v); \
} while (0)
#define __smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
RISCV_FENCE(r,rw); \
___p1; \
})
/*
* This is a very specific barrier: it's currently only used in two places in
* the kernel, both in the scheduler. See include/linux/spinlock.h for the two

View File

@ -17,45 +17,153 @@
#include <linux/bug.h>
#include <asm/barrier.h>
#include <asm/fence.h>
#define __xchg(new, ptr, size, asm_or) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 4: \
__asm__ __volatile__ ( \
"amoswap.w" #asm_or " %0, %2, %1" \
: "=r" (__ret), "+A" (*__ptr) \
: "r" (__new) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__ ( \
"amoswap.d" #asm_or " %0, %2, %1" \
: "=r" (__ret), "+A" (*__ptr) \
: "r" (__new) \
: "memory"); \
break; \
default: \
BUILD_BUG(); \
} \
__ret; \
#define __xchg_relaxed(ptr, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 4: \
__asm__ __volatile__ ( \
" amoswap.w %0, %2, %1\n" \
: "=r" (__ret), "+A" (*__ptr) \
: "r" (__new) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__ ( \
" amoswap.d %0, %2, %1\n" \
: "=r" (__ret), "+A" (*__ptr) \
: "r" (__new) \
: "memory"); \
break; \
default: \
BUILD_BUG(); \
} \
__ret; \
})
#define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr)), .aqrl))
#define xchg32(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
xchg((ptr), (x)); \
#define xchg_relaxed(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \
_x_, sizeof(*(ptr))); \
})
#define xchg64(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
xchg((ptr), (x)); \
#define __xchg_acquire(ptr, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 4: \
__asm__ __volatile__ ( \
" amoswap.w %0, %2, %1\n" \
RISCV_ACQUIRE_BARRIER \
: "=r" (__ret), "+A" (*__ptr) \
: "r" (__new) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__ ( \
" amoswap.d %0, %2, %1\n" \
RISCV_ACQUIRE_BARRIER \
: "=r" (__ret), "+A" (*__ptr) \
: "r" (__new) \
: "memory"); \
break; \
default: \
BUILD_BUG(); \
} \
__ret; \
})
#define xchg_acquire(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_acquire((ptr), \
_x_, sizeof(*(ptr))); \
})
#define __xchg_release(ptr, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 4: \
__asm__ __volatile__ ( \
RISCV_RELEASE_BARRIER \
" amoswap.w %0, %2, %1\n" \
: "=r" (__ret), "+A" (*__ptr) \
: "r" (__new) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__ ( \
RISCV_RELEASE_BARRIER \
" amoswap.d %0, %2, %1\n" \
: "=r" (__ret), "+A" (*__ptr) \
: "r" (__new) \
: "memory"); \
break; \
default: \
BUILD_BUG(); \
} \
__ret; \
})
#define xchg_release(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_release((ptr), \
_x_, sizeof(*(ptr))); \
})
#define __xchg(ptr, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 4: \
__asm__ __volatile__ ( \
" amoswap.w.aqrl %0, %2, %1\n" \
: "=r" (__ret), "+A" (*__ptr) \
: "r" (__new) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__ ( \
" amoswap.d.aqrl %0, %2, %1\n" \
: "=r" (__ret), "+A" (*__ptr) \
: "r" (__new) \
: "memory"); \
break; \
default: \
BUILD_BUG(); \
} \
__ret; \
})
#define xchg(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \
})
#define xchg32(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
xchg((ptr), (x)); \
})
#define xchg64(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
xchg((ptr), (x)); \
})
/*
@ -63,7 +171,7 @@
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#define __cmpxchg(ptr, old, new, size, lrb, scb) \
#define __cmpxchg_relaxed(ptr, old, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
@ -73,24 +181,22 @@
switch (size) { \
case 4: \
__asm__ __volatile__ ( \
"0:" \
"lr.w" #scb " %0, %2\n" \
"bne %0, %z3, 1f\n" \
"sc.w" #lrb " %1, %z4, %2\n" \
"bnez %1, 0b\n" \
"1:" \
"0: lr.w %0, %2\n" \
" bne %0, %z3, 1f\n" \
" sc.w %1, %z4, %2\n" \
" bnez %1, 0b\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__ ( \
"0:" \
"lr.d" #scb " %0, %2\n" \
"bne %0, %z3, 1f\n" \
"sc.d" #lrb " %1, %z4, %2\n" \
"bnez %1, 0b\n" \
"1:" \
"0: lr.d %0, %2\n" \
" bne %0, %z3, 1f\n" \
" sc.d %1, %z4, %2\n" \
" bnez %1, 0b\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "memory"); \
@ -101,34 +207,177 @@
__ret; \
})
#define cmpxchg(ptr, o, n) \
(__cmpxchg((ptr), (o), (n), sizeof(*(ptr)), .aqrl, .aqrl))
#define cmpxchg_local(ptr, o, n) \
(__cmpxchg((ptr), (o), (n), sizeof(*(ptr)), , ))
#define cmpxchg32(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
cmpxchg((ptr), (o), (n)); \
#define cmpxchg_relaxed(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
_o_, _n_, sizeof(*(ptr))); \
})
#define cmpxchg32_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
cmpxchg_local((ptr), (o), (n)); \
#define __cmpxchg_acquire(ptr, old, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__typeof__(*(ptr)) __ret; \
register unsigned int __rc; \
switch (size) { \
case 4: \
__asm__ __volatile__ ( \
"0: lr.w %0, %2\n" \
" bne %0, %z3, 1f\n" \
" sc.w %1, %z4, %2\n" \
" bnez %1, 0b\n" \
RISCV_ACQUIRE_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__ ( \
"0: lr.d %0, %2\n" \
" bne %0, %z3, 1f\n" \
" sc.d %1, %z4, %2\n" \
" bnez %1, 0b\n" \
RISCV_ACQUIRE_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "memory"); \
break; \
default: \
BUILD_BUG(); \
} \
__ret; \
})
#define cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
#define cmpxchg_acquire(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
_o_, _n_, sizeof(*(ptr))); \
})
#define cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
#define __cmpxchg_release(ptr, old, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__typeof__(*(ptr)) __ret; \
register unsigned int __rc; \
switch (size) { \
case 4: \
__asm__ __volatile__ ( \
RISCV_RELEASE_BARRIER \
"0: lr.w %0, %2\n" \
" bne %0, %z3, 1f\n" \
" sc.w %1, %z4, %2\n" \
" bnez %1, 0b\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__ ( \
RISCV_RELEASE_BARRIER \
"0: lr.d %0, %2\n" \
" bne %0, %z3, 1f\n" \
" sc.d %1, %z4, %2\n" \
" bnez %1, 0b\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "memory"); \
break; \
default: \
BUILD_BUG(); \
} \
__ret; \
})
#define cmpxchg_release(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg_release((ptr), \
_o_, _n_, sizeof(*(ptr))); \
})
#define __cmpxchg(ptr, old, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__typeof__(*(ptr)) __ret; \
register unsigned int __rc; \
switch (size) { \
case 4: \
__asm__ __volatile__ ( \
"0: lr.w %0, %2\n" \
" bne %0, %z3, 1f\n" \
" sc.w.rl %1, %z4, %2\n" \
" bnez %1, 0b\n" \
" fence rw, rw\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__ ( \
"0: lr.d %0, %2\n" \
" bne %0, %z3, 1f\n" \
" sc.d.rl %1, %z4, %2\n" \
" bnez %1, 0b\n" \
" fence rw, rw\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "memory"); \
break; \
default: \
BUILD_BUG(); \
} \
__ret; \
})
#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), \
_o_, _n_, sizeof(*(ptr))); \
})
#define cmpxchg_local(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
#define cmpxchg32(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
cmpxchg((ptr), (o), (n)); \
})
#define cmpxchg32_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
cmpxchg_relaxed((ptr), (o), (n)) \
})
#define cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
})
#define cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_relaxed((ptr), (o), (n)); \
})
#endif /* _ASM_RISCV_CMPXCHG_H */

View File

@ -0,0 +1,12 @@
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H
#ifdef CONFIG_SMP
#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
#else
#define RISCV_ACQUIRE_BARRIER
#define RISCV_RELEASE_BARRIER
#endif
#endif /* _ASM_RISCV_FENCE_H */

View File

@ -8,3 +8,59 @@
#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER)
#define HAVE_FUNCTION_GRAPH_FP_TEST
#endif
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#define ARCH_SUPPORTS_FTRACE_OPS 1
#ifndef __ASSEMBLY__
void _mcount(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
struct dyn_arch_ftrace {
};
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
/*
* A general call in RISC-V is a pair of insts:
* 1) auipc: setting high-20 pc-related bits to ra register
* 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to
* return address (original pc + 4)
*
* Dynamic ftrace generates probes to call sites, so we must deal with
* both auipc and jalr at the same time.
*/
#define MCOUNT_ADDR ((unsigned long)_mcount)
#define JALR_SIGN_MASK (0x00000800)
#define JALR_OFFSET_MASK (0x00000fff)
#define AUIPC_OFFSET_MASK (0xfffff000)
#define AUIPC_PAD (0x00001000)
#define JALR_SHIFT 20
#define JALR_BASIC (0x000080e7)
#define AUIPC_BASIC (0x00000097)
#define NOP4 (0x00000013)
#define make_call(caller, callee, call) \
do { \
call[0] = to_auipc_insn((unsigned int)((unsigned long)callee - \
(unsigned long)caller)); \
call[1] = to_jalr_insn((unsigned int)((unsigned long)callee - \
(unsigned long)caller)); \
} while (0)
#define to_jalr_insn(offset) \
(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_BASIC)
#define to_auipc_insn(offset) \
((offset & JALR_SIGN_MASK) ? \
(((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_BASIC) : \
((offset & AUIPC_OFFSET_MASK) | AUIPC_BASIC))
/*
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
*/
#define MCOUNT_INSN_SIZE 8
#endif

View File

@ -0,0 +1,113 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2017 Andes Technology Corporation */
#ifndef _ASM_RISCV_MODULE_H
#define _ASM_RISCV_MODULE_H
#include <asm-generic/module.h>
#define MODULE_ARCH_VERMAGIC "riscv"
u64 module_emit_got_entry(struct module *mod, u64 val);
u64 module_emit_plt_entry(struct module *mod, u64 val);
#ifdef CONFIG_MODULE_SECTIONS
struct mod_section {
struct elf64_shdr *shdr;
int num_entries;
int max_entries;
};
struct mod_arch_specific {
struct mod_section got;
struct mod_section plt;
struct mod_section got_plt;
};
struct got_entry {
u64 symbol_addr; /* the real variable address */
};
static inline struct got_entry emit_got_entry(u64 val)
{
return (struct got_entry) {val};
}
static inline struct got_entry *get_got_entry(u64 val,
const struct mod_section *sec)
{
struct got_entry *got = (struct got_entry *)sec->shdr->sh_addr;
int i;
for (i = 0; i < sec->num_entries; i++) {
if (got[i].symbol_addr == val)
return &got[i];
}
return NULL;
}
struct plt_entry {
/*
* Trampoline code to real target address. The return address
* should be the original (pc+4) before entring plt entry.
*/
u32 insn_auipc; /* auipc t0, 0x0 */
u32 insn_ld; /* ld t1, 0x10(t0) */
u32 insn_jr; /* jr t1 */
};
#define OPC_AUIPC 0x0017
#define OPC_LD 0x3003
#define OPC_JALR 0x0067
#define REG_T0 0x5
#define REG_T1 0x6
static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt)
{
/*
* U-Type encoding:
* +------------+----------+----------+
* | imm[31:12] | rd[11:7] | opc[6:0] |
* +------------+----------+----------+
*
* I-Type encoding:
* +------------+------------+--------+----------+----------+
* | imm[31:20] | rs1[19:15] | funct3 | rd[11:7] | opc[6:0] |
* +------------+------------+--------+----------+----------+
*
*/
u64 offset = got_plt - plt;
u32 hi20 = (offset + 0x800) & 0xfffff000;
u32 lo12 = (offset - hi20);
return (struct plt_entry) {
OPC_AUIPC | (REG_T0 << 7) | hi20,
OPC_LD | (lo12 << 20) | (REG_T0 << 15) | (REG_T1 << 7),
OPC_JALR | (REG_T1 << 15)
};
}
static inline int get_got_plt_idx(u64 val, const struct mod_section *sec)
{
struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr;
int i;
for (i = 0; i < sec->num_entries; i++) {
if (got_plt[i].symbol_addr == val)
return i;
}
return -1;
}
static inline struct plt_entry *get_plt_entry(u64 val,
const struct mod_section *sec_plt,
const struct mod_section *sec_got_plt)
{
struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr;
int got_plt_idx = get_got_plt_idx(val, sec_got_plt);
if (got_plt_idx >= 0)
return plt + got_plt_idx;
else
return NULL;
}
#endif /* CONFIG_MODULE_SECTIONS */
#endif /* _ASM_RISCV_MODULE_H */

View File

@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <asm/current.h>
#include <asm/fence.h>
/*
* Simple spin lock operations. These provide no fairness guarantees.
@ -28,10 +29,7 @@
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__ (
"amoswap.w.rl x0, x0, %0"
: "=A" (lock->lock)
:: "memory");
smp_store_release(&lock->lock, 0);
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
@ -39,7 +37,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
int tmp = 1, busy;
__asm__ __volatile__ (
"amoswap.w.aq %0, %2, %1"
" amoswap.w %0, %2, %1\n"
RISCV_ACQUIRE_BARRIER
: "=r" (busy), "+A" (lock->lock)
: "r" (tmp)
: "memory");
@ -68,8 +67,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
"1: lr.w %1, %0\n"
" bltz %1, 1b\n"
" addi %1, %1, 1\n"
" sc.w.aq %1, %1, %0\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
: "+A" (lock->lock), "=&r" (tmp)
:: "memory");
}
@ -82,8 +82,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
"1: lr.w %1, %0\n"
" bnez %1, 1b\n"
" li %1, -1\n"
" sc.w.aq %1, %1, %0\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
: "+A" (lock->lock), "=&r" (tmp)
:: "memory");
}
@ -96,8 +97,9 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
"1: lr.w %1, %0\n"
" bltz %1, 1f\n"
" addi %1, %1, 1\n"
" sc.w.aq %1, %1, %0\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
"1:\n"
: "+A" (lock->lock), "=&r" (busy)
:: "memory");
@ -113,8 +115,9 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
"1: lr.w %1, %0\n"
" bnez %1, 1f\n"
" li %1, -1\n"
" sc.w.aq %1, %1, %0\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
"1:\n"
: "+A" (lock->lock), "=&r" (busy)
:: "memory");
@ -125,7 +128,8 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
static inline void arch_read_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
"amoadd.w.rl x0, %1, %0"
RISCV_RELEASE_BARRIER
" amoadd.w x0, %1, %0\n"
: "+A" (lock->lock)
: "r" (-1)
: "memory");
@ -133,10 +137,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
static inline void arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__ (
"amoswap.w.rl x0, x0, %0"
: "=A" (lock->lock)
:: "memory");
smp_store_release(&lock->lock, 0);
}
#endif /* _ASM_RISCV_SPINLOCK_H */

View File

@ -79,5 +79,12 @@ typedef union __riscv_fp_state elf_fpregset_t;
#define R_RISCV_TPREL_I 49
#define R_RISCV_TPREL_S 50
#define R_RISCV_RELAX 51
#define R_RISCV_SUB6 52
#define R_RISCV_SET6 53
#define R_RISCV_SET8 54
#define R_RISCV_SET16 55
#define R_RISCV_SET32 56
#define R_RISCV_32_PCREL 57
#endif /* _UAPI_ASM_ELF_H */

View File

@ -34,7 +34,9 @@ CFLAGS_setup.o := -mcmodel=medany
obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
clean:

View File

@ -6,9 +6,126 @@
*/
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_DYNAMIC_FTRACE
static int ftrace_check_current_call(unsigned long hook_pos,
unsigned int *expected)
{
unsigned int replaced[2];
unsigned int nops[2] = {NOP4, NOP4};
/* we expect nops at the hook position */
if (!expected)
expected = nops;
/*
* Read the text we want to modify;
* return must be -EFAULT on read error
*/
if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE))
return -EFAULT;
/*
* Make sure it is what we expect it to be;
* return must be -EINVAL on failed comparison
*/
if (memcmp(expected, replaced, sizeof(replaced))) {
pr_err("%p: expected (%08x %08x) but get (%08x %08x)",
(void *)hook_pos, expected[0], expected[1], replaced[0],
replaced[1]);
return -EINVAL;
}
return 0;
}
static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
bool enable)
{
unsigned int call[2];
unsigned int nops[2] = {NOP4, NOP4};
int ret = 0;
make_call(hook_pos, target, call);
/* replace the auipc-jalr pair at once */
ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
MCOUNT_INSN_SIZE);
/* return must be -EPERM on write error */
if (ret)
return -EPERM;
smp_mb();
flush_icache_range((void *)hook_pos, (void *)hook_pos + MCOUNT_INSN_SIZE);
return 0;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
int ret = ftrace_check_current_call(rec->ip, NULL);
if (ret)
return ret;
return __ftrace_modify_call(rec->ip, addr, true);
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
unsigned int call[2];
int ret;
make_call(rec->ip, addr, call);
ret = ftrace_check_current_call(rec->ip, call);
if (ret)
return ret;
return __ftrace_modify_call(rec->ip, addr, false);
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
(unsigned long)func, true);
if (!ret) {
ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
(unsigned long)func, true);
}
return ret;
}
int __init ftrace_dyn_arch_init(void)
{
return 0;
}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
unsigned int call[2];
int ret;
make_call(rec->ip, old_addr, call);
ret = ftrace_check_current_call(rec->ip, call);
if (ret)
return ret;
return __ftrace_modify_call(rec->ip, addr, true);
}
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Most of this file is copied from arm64.
* Most of this function is copied from arm64.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
@ -34,8 +151,62 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
return;
err = ftrace_push_return_trace(old, self_addr, &trace.depth,
frame_pointer, NULL);
frame_pointer, parent);
if (err == -EBUSY)
return;
*parent = return_hooker;
}
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned int call[2];
static int init_graph = 1;
int ret;
make_call(&ftrace_graph_call, &ftrace_stub, call);
/*
* When enabling graph tracer for the first time, ftrace_graph_call
* should contains a call to ftrace_stub. Once it has been disabled,
* the 8-bytes at the position becomes NOPs.
*/
if (init_graph) {
ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
call);
init_graph = 0;
} else {
ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
NULL);
}
if (ret)
return ret;
return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
(unsigned long)&prepare_ftrace_return, true);
}
int ftrace_disable_ftrace_graph_caller(void)
{
unsigned int call[2];
int ret;
make_call(&ftrace_graph_call, &prepare_ftrace_return, call);
/*
* This is to make sure that ftrace_enable_ftrace_graph_caller
* did the right thing.
*/
ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
call);
if (ret)
return ret;
return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
(unsigned long)&prepare_ftrace_return, false);
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

View File

@ -0,0 +1,239 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2017 Andes Technology Corporation */
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm-generic/export.h>
#include <asm/ftrace.h>
.text
.macro SAVE_ABI_STATE
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
addi sp, sp, -48
sd s0, 32(sp)
sd ra, 40(sp)
addi s0, sp, 48
sd t0, 24(sp)
sd t1, 16(sp)
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
sd t2, 8(sp)
#endif
#else
addi sp, sp, -16
sd s0, 0(sp)
sd ra, 8(sp)
addi s0, sp, 16
#endif
.endm
.macro RESTORE_ABI_STATE
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ld s0, 32(sp)
ld ra, 40(sp)
addi sp, sp, 48
#else
ld ra, 8(sp)
ld s0, 0(sp)
addi sp, sp, 16
#endif
.endm
.macro RESTORE_GRAPH_ARGS
ld a0, 24(sp)
ld a1, 16(sp)
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ld a2, 8(sp)
#endif
.endm
ENTRY(ftrace_graph_caller)
addi sp, sp, -16
sd s0, 0(sp)
sd ra, 8(sp)
addi s0, sp, 16
ftrace_graph_call:
.global ftrace_graph_call
/*
* Calling ftrace_enable/disable_ftrace_graph_caller would overwrite the
* call below. Check ftrace_modify_all_code for details.
*/
call ftrace_stub
ld ra, 8(sp)
ld s0, 0(sp)
addi sp, sp, 16
ret
ENDPROC(ftrace_graph_caller)
ENTRY(ftrace_caller)
/*
* a0: the address in the caller when calling ftrace_caller
* a1: the caller's return address
* a2: the address of global variable function_trace_op
*/
ld a1, -8(s0)
addi a0, ra, -MCOUNT_INSN_SIZE
la t5, function_trace_op
ld a2, 0(t5)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* the graph tracer (specifically, prepare_ftrace_return) needs these
* arguments but for now the function tracer occupies the regs, so we
* save them in temporary regs to recover later.
*/
addi t0, s0, -8
mv t1, a0
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ld t2, -16(s0)
#endif
#endif
SAVE_ABI_STATE
ftrace_call:
.global ftrace_call
/*
* For the dynamic ftrace to work, here we should reserve at least
* 8 bytes for a functional auipc-jalr pair. The following call
* serves this purpose.
*
* Calling ftrace_update_ftrace_func would overwrite the nops below.
* Check ftrace_modify_all_code for details.
*/
call ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
RESTORE_GRAPH_ARGS
call ftrace_graph_caller
#endif
RESTORE_ABI_STATE
ret
ENDPROC(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
.macro SAVE_ALL
addi sp, sp, -(PT_SIZE_ON_STACK+16)
sd s0, (PT_SIZE_ON_STACK)(sp)
sd ra, (PT_SIZE_ON_STACK+8)(sp)
addi s0, sp, (PT_SIZE_ON_STACK+16)
sd x1, PT_RA(sp)
sd x2, PT_SP(sp)
sd x3, PT_GP(sp)
sd x4, PT_TP(sp)
sd x5, PT_T0(sp)
sd x6, PT_T1(sp)
sd x7, PT_T2(sp)
sd x8, PT_S0(sp)
sd x9, PT_S1(sp)
sd x10, PT_A0(sp)
sd x11, PT_A1(sp)
sd x12, PT_A2(sp)
sd x13, PT_A3(sp)
sd x14, PT_A4(sp)
sd x15, PT_A5(sp)
sd x16, PT_A6(sp)
sd x17, PT_A7(sp)
sd x18, PT_S2(sp)
sd x19, PT_S3(sp)
sd x20, PT_S4(sp)
sd x21, PT_S5(sp)
sd x22, PT_S6(sp)
sd x23, PT_S7(sp)
sd x24, PT_S8(sp)
sd x25, PT_S9(sp)
sd x26, PT_S10(sp)
sd x27, PT_S11(sp)
sd x28, PT_T3(sp)
sd x29, PT_T4(sp)
sd x30, PT_T5(sp)
sd x31, PT_T6(sp)
.endm
.macro RESTORE_ALL
ld x1, PT_RA(sp)
ld x2, PT_SP(sp)
ld x3, PT_GP(sp)
ld x4, PT_TP(sp)
ld x5, PT_T0(sp)
ld x6, PT_T1(sp)
ld x7, PT_T2(sp)
ld x8, PT_S0(sp)
ld x9, PT_S1(sp)
ld x10, PT_A0(sp)
ld x11, PT_A1(sp)
ld x12, PT_A2(sp)
ld x13, PT_A3(sp)
ld x14, PT_A4(sp)
ld x15, PT_A5(sp)
ld x16, PT_A6(sp)
ld x17, PT_A7(sp)
ld x18, PT_S2(sp)
ld x19, PT_S3(sp)
ld x20, PT_S4(sp)
ld x21, PT_S5(sp)
ld x22, PT_S6(sp)
ld x23, PT_S7(sp)
ld x24, PT_S8(sp)
ld x25, PT_S9(sp)
ld x26, PT_S10(sp)
ld x27, PT_S11(sp)
ld x28, PT_T3(sp)
ld x29, PT_T4(sp)
ld x30, PT_T5(sp)
ld x31, PT_T6(sp)
ld s0, (PT_SIZE_ON_STACK)(sp)
ld ra, (PT_SIZE_ON_STACK+8)(sp)
addi sp, sp, (PT_SIZE_ON_STACK+16)
.endm
.macro RESTORE_GRAPH_REG_ARGS
ld a0, PT_T0(sp)
ld a1, PT_T1(sp)
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ld a2, PT_T2(sp)
#endif
.endm
/*
* Most of the contents are the same as ftrace_caller.
*/
ENTRY(ftrace_regs_caller)
/*
* a3: the address of all registers in the stack
*/
ld a1, -8(s0)
addi a0, ra, -MCOUNT_INSN_SIZE
la t5, function_trace_op
ld a2, 0(t5)
addi a3, sp, -(PT_SIZE_ON_STACK+16)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
addi t0, s0, -8
mv t1, a0
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ld t2, -16(s0)
#endif
#endif
SAVE_ALL
ftrace_regs_call:
.global ftrace_regs_call
call ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
RESTORE_GRAPH_REG_ARGS
call ftrace_graph_caller
#endif
RESTORE_ALL
ret
ENDPROC(ftrace_regs_caller)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */

View File

@ -32,13 +32,13 @@
addi s0, sp, 32
.endm
.macro STORE_ABI_STATE
.macro RESTORE_ABI_STATE
ld ra, 8(sp)
ld s0, 0(sp)
addi sp, sp, 16
.endm
.macro STORE_RET_ABI_STATE
.macro RESTORE_RET_ABI_STATE
ld ra, 24(sp)
ld s0, 16(sp)
ld a0, 8(sp)
@ -46,6 +46,10 @@
.endm
ENTRY(ftrace_stub)
#ifdef CONFIG_DYNAMIC_FTRACE
.global _mcount
.set _mcount, ftrace_stub
#endif
ret
ENDPROC(ftrace_stub)
@ -66,15 +70,15 @@ ENTRY(return_to_handler)
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
mv a0, t6
#endif
la t0, ftrace_return_to_handler
jalr t0
call ftrace_return_to_handler
mv a1, a0
STORE_RET_ABI_STATE
RESTORE_RET_ABI_STATE
jalr a1
ENDPROC(return_to_handler)
EXPORT_SYMBOL(return_to_handler)
#endif
#ifndef CONFIG_DYNAMIC_FTRACE
ENTRY(_mcount)
la t4, ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@ -104,9 +108,8 @@ do_ftrace_graph_caller:
ld a2, -16(s0)
#endif
SAVE_ABI_STATE
la t0, prepare_ftrace_return
jalr t0
STORE_ABI_STATE
call prepare_ftrace_return
RESTORE_ABI_STATE
ret
#endif
@ -120,7 +123,8 @@ do_trace:
SAVE_ABI_STATE
jalr t5
STORE_ABI_STATE
RESTORE_ABI_STATE
ret
ENDPROC(_mcount)
EXPORT_SYMBOL(_mcount)
#endif

View File

@ -0,0 +1,156 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* Copyright (C) 2018 Andes Technology Corporation <zong@andestech.com>
*/
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/module.h>
u64 module_emit_got_entry(struct module *mod, u64 val)
{
struct mod_section *got_sec = &mod->arch.got;
int i = got_sec->num_entries;
struct got_entry *got = get_got_entry(val, got_sec);
if (got)
return (u64)got;
/* There is no duplicate entry, create a new one */
got = (struct got_entry *)got_sec->shdr->sh_addr;
got[i] = emit_got_entry(val);
got_sec->num_entries++;
BUG_ON(got_sec->num_entries > got_sec->max_entries);
return (u64)&got[i];
}
u64 module_emit_plt_entry(struct module *mod, u64 val)
{
struct mod_section *got_plt_sec = &mod->arch.got_plt;
struct got_entry *got_plt;
struct mod_section *plt_sec = &mod->arch.plt;
struct plt_entry *plt = get_plt_entry(val, plt_sec, got_plt_sec);
int i = plt_sec->num_entries;
if (plt)
return (u64)plt;
/* There is no duplicate entry, create a new one */
got_plt = (struct got_entry *)got_plt_sec->shdr->sh_addr;
got_plt[i] = emit_got_entry(val);
plt = (struct plt_entry *)plt_sec->shdr->sh_addr;
plt[i] = emit_plt_entry(val, (u64)&plt[i], (u64)&got_plt[i]);
plt_sec->num_entries++;
got_plt_sec->num_entries++;
BUG_ON(plt_sec->num_entries > plt_sec->max_entries);
return (u64)&plt[i];
}
static int is_rela_equal(const Elf64_Rela *x, const Elf64_Rela *y)
{
return x->r_info == y->r_info && x->r_addend == y->r_addend;
}
static bool duplicate_rela(const Elf64_Rela *rela, int idx)
{
int i;
for (i = 0; i < idx; i++) {
if (is_rela_equal(&rela[i], &rela[idx]))
return true;
}
return false;
}
static void count_max_entries(Elf64_Rela *relas, int num,
unsigned int *plts, unsigned int *gots)
{
unsigned int type, i;
for (i = 0; i < num; i++) {
type = ELF64_R_TYPE(relas[i].r_info);
if (type == R_RISCV_CALL_PLT) {
if (!duplicate_rela(relas, i))
(*plts)++;
} else if (type == R_RISCV_GOT_HI20) {
if (!duplicate_rela(relas, i))
(*gots)++;
}
}
}
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
unsigned int num_plts = 0;
unsigned int num_gots = 0;
int i;
/*
* Find the empty .got and .plt sections.
*/
for (i = 0; i < ehdr->e_shnum; i++) {
if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
mod->arch.plt.shdr = sechdrs + i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".got"))
mod->arch.got.shdr = sechdrs + i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".got.plt"))
mod->arch.got_plt.shdr = sechdrs + i;
}
if (!mod->arch.plt.shdr) {
pr_err("%s: module PLT section(s) missing\n", mod->name);
return -ENOEXEC;
}
if (!mod->arch.got.shdr) {
pr_err("%s: module GOT section(s) missing\n", mod->name);
return -ENOEXEC;
}
if (!mod->arch.got_plt.shdr) {
pr_err("%s: module GOT.PLT section(s) missing\n", mod->name);
return -ENOEXEC;
}
/* Calculate the maxinum number of entries */
for (i = 0; i < ehdr->e_shnum; i++) {
Elf64_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
int num_rela = sechdrs[i].sh_size / sizeof(Elf64_Rela);
Elf64_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
if (sechdrs[i].sh_type != SHT_RELA)
continue;
/* ignore relocations that operate on non-exec sections */
if (!(dst_sec->sh_flags & SHF_EXECINSTR))
continue;
count_max_entries(relas, num_rela, &num_plts, &num_gots);
}
mod->arch.plt.shdr->sh_type = SHT_NOBITS;
mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES;
mod->arch.plt.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_entry);
mod->arch.plt.num_entries = 0;
mod->arch.plt.max_entries = num_plts;
mod->arch.got.shdr->sh_type = SHT_NOBITS;
mod->arch.got.shdr->sh_flags = SHF_ALLOC;
mod->arch.got.shdr->sh_addralign = L1_CACHE_BYTES;
mod->arch.got.shdr->sh_size = (num_gots + 1) * sizeof(struct got_entry);
mod->arch.got.num_entries = 0;
mod->arch.got.max_entries = num_gots;
mod->arch.got_plt.shdr->sh_type = SHT_NOBITS;
mod->arch.got_plt.shdr->sh_flags = SHF_ALLOC;
mod->arch.got_plt.shdr->sh_addralign = L1_CACHE_BYTES;
mod->arch.got_plt.shdr->sh_size = (num_plts + 1) * sizeof(struct got_entry);
mod->arch.got_plt.num_entries = 0;
mod->arch.got_plt.max_entries = num_plts;
return 0;
}

View File

@ -49,6 +49,39 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
return 0;
}
static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
Elf_Addr v)
{
s64 offset = (void *)v - (void *)location;
u16 imm8 = (offset & 0x100) << (12 - 8);
u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
u16 imm5 = (offset & 0x20) >> (5 - 2);
u16 imm4_3 = (offset & 0x18) << (12 - 5);
u16 imm2_1 = (offset & 0x6) << (12 - 10);
*(u16 *)location = (*(u16 *)location & 0xe383) |
imm8 | imm7_6 | imm5 | imm4_3 | imm2_1;
return 0;
}
static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
Elf_Addr v)
{
s64 offset = (void *)v - (void *)location;
u16 imm11 = (offset & 0x800) << (12 - 11);
u16 imm10 = (offset & 0x400) >> (10 - 8);
u16 imm9_8 = (offset & 0x300) << (12 - 11);
u16 imm7 = (offset & 0x80) >> (7 - 6);
u16 imm6 = (offset & 0x40) << (12 - 11);
u16 imm5 = (offset & 0x20) >> (5 - 2);
u16 imm4 = (offset & 0x10) << (12 - 5);
u16 imm3_1 = (offset & 0xe) << (12 - 10);
*(u16 *)location = (*(u16 *)location & 0xe003) |
imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1;
return 0;
}
static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
Elf_Addr v)
{
@ -92,6 +125,67 @@ static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location,
return 0;
}
static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
Elf_Addr v)
{
s32 hi20;
if (IS_ENABLED(CMODEL_MEDLOW)) {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
me->name, v, location);
return -EINVAL;
}
hi20 = ((s32)v + 0x800) & 0xfffff000;
*location = (*location & 0xfff) | hi20;
return 0;
}
static int apply_r_riscv_lo12_i_rela(struct module *me, u32 *location,
Elf_Addr v)
{
/* Skip medlow checking because of filtering by HI20 already */
s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
s32 lo12 = ((s32)v - hi20);
*location = (*location & 0xfffff) | ((lo12 & 0xfff) << 20);
return 0;
}
static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
Elf_Addr v)
{
/* Skip medlow checking because of filtering by HI20 already */
s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
s32 lo12 = ((s32)v - hi20);
u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11);
u32 imm4_0 = (lo12 & 0x1f) << (11 - 4);
*location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
return 0;
}
static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
Elf_Addr v)
{
s64 offset = (void *)v - (void *)location;
s32 hi20;
/* Always emit the got entry */
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
offset = module_emit_got_entry(me, v);
offset = (void *)offset - (void *)location;
} else {
pr_err(
"%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
me->name, v, location);
return -EINVAL;
}
hi20 = (offset + 0x800) & 0xfffff000;
*location = (*location & 0xfff) | hi20;
return 0;
}
static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
Elf_Addr v)
{
@ -99,6 +193,33 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
s32 fill_v = offset;
u32 hi20, lo12;
if (offset != fill_v) {
/* Only emit the plt entry if offset over 32-bit range */
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
offset = module_emit_plt_entry(me, v);
offset = (void *)offset - (void *)location;
} else {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
me->name, v, location);
return -EINVAL;
}
}
hi20 = (offset + 0x800) & 0xfffff000;
lo12 = (offset - hi20) & 0xfff;
*location = (*location & 0xfff) | hi20;
*(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
return 0;
}
static int apply_r_riscv_call_rela(struct module *me, u32 *location,
Elf_Addr v)
{
s64 offset = (void *)v - (void *)location;
s32 fill_v = offset;
u32 hi20, lo12;
if (offset != fill_v) {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
@ -119,16 +240,49 @@ static int apply_r_riscv_relax_rela(struct module *me, u32 *location,
return 0;
}
static int apply_r_riscv_align_rela(struct module *me, u32 *location,
Elf_Addr v)
{
pr_err(
"%s: The unexpected relocation type 'R_RISCV_ALIGN' from PC = %p\n",
me->name, location);
return -EINVAL;
}
static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*(u32 *)location += (*(u32 *)v);
return 0;
}
static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*(u32 *)location -= (*(u32 *)v);
return 0;
}
static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
Elf_Addr v) = {
[R_RISCV_64] = apply_r_riscv_64_rela,
[R_RISCV_BRANCH] = apply_r_riscv_branch_rela,
[R_RISCV_JAL] = apply_r_riscv_jal_rela,
[R_RISCV_RVC_BRANCH] = apply_r_riscv_rcv_branch_rela,
[R_RISCV_RVC_JUMP] = apply_r_riscv_rvc_jump_rela,
[R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela,
[R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela,
[R_RISCV_PCREL_LO12_S] = apply_r_riscv_pcrel_lo12_s_rela,
[R_RISCV_HI20] = apply_r_riscv_hi20_rela,
[R_RISCV_LO12_I] = apply_r_riscv_lo12_i_rela,
[R_RISCV_LO12_S] = apply_r_riscv_lo12_s_rela,
[R_RISCV_GOT_HI20] = apply_r_riscv_got_hi20_rela,
[R_RISCV_CALL_PLT] = apply_r_riscv_call_plt_rela,
[R_RISCV_CALL] = apply_r_riscv_call_rela,
[R_RISCV_RELAX] = apply_r_riscv_relax_rela,
[R_RISCV_ALIGN] = apply_r_riscv_align_rela,
[R_RISCV_ADD32] = apply_r_riscv_add32_rela,
[R_RISCV_SUB32] = apply_r_riscv_sub32_rela,
};
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
@ -184,25 +338,38 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
u64 hi20_loc =
sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[j].r_offset;
/* Find the corresponding HI20 PC-relative relocation entry */
if (hi20_loc == sym->st_value) {
u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
/* Find the corresponding HI20 relocation entry */
if (hi20_loc == sym->st_value
&& (hi20_type == R_RISCV_PCREL_HI20
|| hi20_type == R_RISCV_GOT_HI20)) {
s32 hi20, lo12;
Elf_Sym *hi20_sym =
(Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_RISCV_R_SYM(rel[j].r_info);
u64 hi20_sym_val =
hi20_sym->st_value
+ rel[j].r_addend;
/* Calculate lo12 */
s64 offset = hi20_sym_val - hi20_loc;
s32 hi20 = (offset + 0x800) & 0xfffff000;
s32 lo12 = offset - hi20;
u64 offset = hi20_sym_val - hi20_loc;
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
&& hi20_type == R_RISCV_GOT_HI20) {
offset = module_emit_got_entry(
me, hi20_sym_val);
offset = offset - hi20_loc;
}
hi20 = (offset + 0x800) & 0xfffff000;
lo12 = offset - hi20;
v = lo12;
break;
}
}
if (j == sechdrs[relsec].sh_size / sizeof(*rel)) {
pr_err(
"%s: Can not find HI20 PC-relative relocation information\n",
"%s: Can not find HI20 relocation information\n",
me->name);
return -EINVAL;
}

View File

@ -0,0 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2017 Andes Technology Corporation */
SECTIONS {
.plt (NOLOAD) : { BYTE(0) }
.got (NOLOAD) : { BYTE(0) }
.got.plt (NOLOAD) : { BYTE(0) }
}

View File

@ -18,6 +18,7 @@
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <linux/ftrace.h>
#ifdef CONFIG_FRAME_POINTER
@ -63,7 +64,12 @@ static void notrace walk_stackframe(struct task_struct *task,
frame = (struct stackframe *)fp - 1;
sp = fp;
fp = frame->fp;
#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
(unsigned long *)(fp - 8));
#else
pc = frame->ra - 0x4;
#endif
}
}

View File

@ -368,6 +368,11 @@ if ($arch eq "x86_64") {
} elsif ($arch eq "microblaze") {
# Microblaze calls '_mcount' instead of plain 'mcount'.
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
} elsif ($arch eq "riscv") {
$function_regex = "^([0-9a-fA-F]+)\\s+<([^.0-9][0-9a-zA-Z_\\.]+)>:";
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
$type = ".quad";
$alignment = 2;
} else {
die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
}