1
0
Fork 0

Merge branch 'core/core' into x86/build, to prevent conflicts

Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Ingo Molnar 2018-10-06 15:51:56 +02:00
commit 02678a5823
21 changed files with 226 additions and 174 deletions

View File

@ -359,6 +359,9 @@ config HAVE_PERF_USER_STACK_DUMP
config HAVE_ARCH_JUMP_LABEL
bool
config HAVE_ARCH_JUMP_LABEL_RELATIVE
bool
config HAVE_RCU_TABLE_FREE
bool

View File

@ -104,6 +104,7 @@ config ARM64
select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS

View File

@ -26,13 +26,16 @@
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
static __always_inline bool arch_static_branch(struct static_key *key,
bool branch)
{
asm_volatile_goto("1: nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
asm_volatile_goto(
"1: nop \n\t"
" .pushsection __jump_table, \"aw\" \n\t"
" .align 3 \n\t"
" .long 1b - ., %l[l_yes] - . \n\t"
" .quad %c0 - . \n\t"
" .popsection \n\t"
: : "i"(&((char *)key)[branch]) : : l_yes);
return false;
@ -40,13 +43,16 @@ l_yes:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
static __always_inline bool arch_static_branch_jump(struct static_key *key,
bool branch)
{
asm_volatile_goto("1: b %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
asm_volatile_goto(
"1: b %l[l_yes] \n\t"
" .pushsection __jump_table, \"aw\" \n\t"
" .align 3 \n\t"
" .long 1b - ., %l[l_yes] - . \n\t"
" .quad %c0 - . \n\t"
" .popsection \n\t"
: : "i"(&((char *)key)[branch]) : : l_yes);
return false;
@ -54,13 +60,5 @@ l_yes:
return true;
}
typedef u64 jump_label_t;
struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
};
#endif /* __ASSEMBLY__ */
#endif /* __ASM_JUMP_LABEL_H */

View File

@ -25,12 +25,12 @@
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
void *addr = (void *)entry->code;
void *addr = (void *)jump_entry_code(entry);
u32 insn;
if (type == JUMP_LABEL_JMP) {
insn = aarch64_insn_gen_branch_imm(entry->code,
entry->target,
insn = aarch64_insn_gen_branch_imm(jump_entry_code(entry),
jump_entry_target(entry),
AARCH64_INSN_BRANCH_NOLINK);
} else {
insn = aarch64_insn_gen_nop();

View File

@ -120,6 +120,7 @@ config S390
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY

View File

@ -14,41 +14,33 @@
* We use a brcl 0,2 instruction for jump labels at compile time so it
* can be easily distinguished from a hotpatch generated instruction.
*/
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
static inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
".pushsection __jump_table, \"aw\"\n"
".balign 8\n"
".quad 0b, %l[label], %0\n"
".popsection\n"
: : "X" (&((char *)key)[branch]) : : label);
asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
".pushsection __jump_table,\"aw\"\n"
".balign 8\n"
".long 0b-.,%l[label]-.\n"
".quad %0-.\n"
".popsection\n"
: : "X" (&((char *)key)[branch]) : : label);
return false;
label:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
static inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("0: brcl 15, %l[label]\n"
".pushsection __jump_table, \"aw\"\n"
".balign 8\n"
".quad 0b, %l[label], %0\n"
".popsection\n"
: : "X" (&((char *)key)[branch]) : : label);
asm_volatile_goto("0: brcl 15,%l[label]\n"
".pushsection __jump_table,\"aw\"\n"
".balign 8\n"
".long 0b-.,%l[label]-.\n"
".quad %0-.\n"
".popsection\n"
: : "X" (&((char *)key)[branch]) : : label);
return false;
label:
return true;
}
typedef unsigned long jump_label_t;
struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
};
#endif /* __ASSEMBLY__ */
#endif

View File

@ -33,13 +33,13 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
{
/* brcl 15,offset */
insn->opcode = 0xc0f4;
insn->offset = (entry->target - entry->code) >> 1;
insn->offset = (jump_entry_target(entry) - jump_entry_code(entry)) >> 1;
}
static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
struct insn *new)
{
unsigned char *ipc = (unsigned char *)entry->code;
unsigned char *ipc = (unsigned char *)jump_entry_code(entry);
unsigned char *ipe = (unsigned char *)expected;
unsigned char *ipn = (unsigned char *)new;
@ -59,6 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
int init)
{
void *code = (void *)jump_entry_code(entry);
struct insn old, new;
if (type == JUMP_LABEL_JMP) {
@ -69,13 +70,13 @@ static void __jump_label_transform(struct jump_entry *entry,
jump_label_make_nop(entry, &new);
}
if (init) {
if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
if (memcmp(code, &orignop, sizeof(orignop)))
jump_label_bug(entry, &orignop, &new);
} else {
if (memcmp((void *)entry->code, &old, sizeof(old)))
if (memcmp(code, &old, sizeof(old)))
jump_label_bug(entry, &old, &new);
}
s390_kernel_write((void *)entry->code, &new, sizeof(new));
s390_kernel_write(code, &new, sizeof(new));
}
static int __sm_arch_jump_label_transform(void *data)

View File

@ -64,6 +64,7 @@ SECTIONS
__start_ro_after_init = .;
.data..ro_after_init : {
*(.data..ro_after_init)
JUMP_TABLE_DATA
}
EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE);

View File

@ -119,6 +119,7 @@ config X86
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU

View File

@ -62,8 +62,7 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
#define R_X86_64_8 14 /* Direct 8 bit sign extended */
#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
#define R_X86_64_NUM 16
#define R_X86_64_PC64 24 /* Place relative 64-bit signed */
/*
* These are used to set parameters in the core dumps.

View File

@ -37,7 +37,8 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes);
@ -53,7 +54,8 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
"2:\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes);
@ -62,18 +64,6 @@ l_yes:
return true;
}
#ifdef CONFIG_X86_64
typedef u64 jump_label_t;
#else
typedef u32 jump_label_t;
#endif
struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
};
#else /* __ASSEMBLY__ */
.macro STATIC_JUMP_IF_TRUE target, key, def
@ -88,7 +78,8 @@ struct jump_entry {
.endif
.pushsection __jump_table, "aw"
_ASM_ALIGN
_ASM_PTR .Lstatic_jump_\@, \target, \key
.long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key - .
.popsection
.endm
@ -104,7 +95,8 @@ struct jump_entry {
.endif
.pushsection __jump_table, "aw"
_ASM_ALIGN
_ASM_PTR .Lstatic_jump_\@, \target, \key + 1
.long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key + 1 - .
.popsection
.endm

View File

@ -42,55 +42,40 @@ static void __ref __jump_label_transform(struct jump_entry *entry,
void *(*poker)(void *, const void *, size_t),
int init)
{
union jump_code_union code;
union jump_code_union jmp;
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
const void *expect, *code;
int line;
jmp.jump = 0xe9;
jmp.offset = jump_entry_target(entry) -
(jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
if (early_boot_irqs_disabled)
poker = text_poke_early;
if (type == JUMP_LABEL_JMP) {
if (init) {
/*
* Jump label is enabled for the first time.
* So we expect a default_nop...
*/
if (unlikely(memcmp((void *)entry->code, default_nop, 5)
!= 0))
bug_at((void *)entry->code, __LINE__);
expect = default_nop; line = __LINE__;
} else {
/*
* ...otherwise expect an ideal_nop. Otherwise
* something went horribly wrong.
*/
if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
!= 0))
bug_at((void *)entry->code, __LINE__);
expect = ideal_nop; line = __LINE__;
}
code.jump = 0xe9;
code.offset = entry->target -
(entry->code + JUMP_LABEL_NOP_SIZE);
code = &jmp.code;
} else {
/*
* We are disabling this jump label. If it is not what
* we think it is, then something must have gone wrong.
* If this is the first initialization call, then we
* are converting the default nop to the ideal nop.
*/
if (init) {
if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
bug_at((void *)entry->code, __LINE__);
expect = default_nop; line = __LINE__;
} else {
code.jump = 0xe9;
code.offset = entry->target -
(entry->code + JUMP_LABEL_NOP_SIZE);
if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
bug_at((void *)entry->code, __LINE__);
expect = &jmp.code; line = __LINE__;
}
memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
code = ideal_nop;
}
if (memcmp((void *)jump_entry_code(entry), expect, JUMP_LABEL_NOP_SIZE))
bug_at((void *)jump_entry_code(entry), line);
/*
* Make text_poke_bp() a default fallback poker.
*
@ -99,11 +84,14 @@ static void __ref __jump_label_transform(struct jump_entry *entry,
* always nop being the 'currently valid' instruction
*
*/
if (poker)
(*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
else
text_poke_bp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE,
(void *)entry->code + JUMP_LABEL_NOP_SIZE);
if (poker) {
(*poker)((void *)jump_entry_code(entry), code,
JUMP_LABEL_NOP_SIZE);
return;
}
text_poke_bp((void *)jump_entry_code(entry), code, JUMP_LABEL_NOP_SIZE,
(void *)jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
}
void arch_jump_label_transform(struct jump_entry *entry,

View File

@ -201,6 +201,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
goto overflow;
#endif
break;
case R_X86_64_PC64:
if (*(u64 *)loc != 0)
goto invalid_relocation;
val -= (u64)loc;
*(u64 *)loc = val;
break;
default:
pr_err("%s: Unknown rela relocation: %llu\n",
me->name, ELF64_R_TYPE(rel[i].r_info));

View File

@ -196,6 +196,7 @@ static const char *rel_type(unsigned type)
#if ELF_BITS == 64
REL_TYPE(R_X86_64_NONE),
REL_TYPE(R_X86_64_64),
REL_TYPE(R_X86_64_PC64),
REL_TYPE(R_X86_64_PC32),
REL_TYPE(R_X86_64_GOT32),
REL_TYPE(R_X86_64_PLT32),
@ -782,6 +783,15 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
add_reloc(&relocs32neg, offset);
break;
case R_X86_64_PC64:
/*
* Only used by jump labels
*/
if (is_percpu_sym(sym, symname))
die("Invalid R_X86_64_PC64 relocation against per-CPU symbol %s\n",
symname);
break;
case R_X86_64_32:
case R_X86_64_32S:
case R_X86_64_64:

View File

@ -116,8 +116,7 @@ do { \
#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
#define R_X86_64_8 14 /* Direct 8 bit sign extended */
#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
#define R_X86_64_NUM 16
#define R_X86_64_PC64 24 /* Place relative 64-bit signed */
/*
* This is used to ensure we don't load something for the wrong architecture.

View File

@ -253,10 +253,6 @@
STRUCT_ALIGN(); \
*(__tracepoints) \
/* implement dynamic printk debug */ \
. = ALIGN(8); \
__start___jump_table = .; \
KEEP(*(__jump_table)) \
__stop___jump_table = .; \
. = ALIGN(8); \
__start___verbose = .; \
KEEP(*(__verbose)) \
@ -300,6 +296,12 @@
. = __start_init_task + THREAD_SIZE; \
__end_init_task = .;
#define JUMP_TABLE_DATA \
. = ALIGN(8); \
__start___jump_table = .; \
KEEP(*(__jump_table)) \
__stop___jump_table = .;
/*
* Allow architectures to handle ro_after_init data on their
* own by defining an empty RO_AFTER_INIT_DATA.
@ -308,6 +310,7 @@
#define RO_AFTER_INIT_DATA \
__start_ro_after_init = .; \
*(.data..ro_after_init) \
JUMP_TABLE_DATA \
__end_ro_after_init = .;
#endif

View File

@ -119,6 +119,68 @@ struct static_key {
#ifdef HAVE_JUMP_LABEL
#include <asm/jump_label.h>
#ifndef __ASSEMBLY__
#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
struct jump_entry {
s32 code;
s32 target;
long key; // key may be far away from the core kernel under KASLR
};
static inline unsigned long jump_entry_code(const struct jump_entry *entry)
{
return (unsigned long)&entry->code + entry->code;
}
static inline unsigned long jump_entry_target(const struct jump_entry *entry)
{
return (unsigned long)&entry->target + entry->target;
}
static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
{
long offset = entry->key & ~3L;
return (struct static_key *)((unsigned long)&entry->key + offset);
}
#else
static inline unsigned long jump_entry_code(const struct jump_entry *entry)
{
return entry->code;
}
static inline unsigned long jump_entry_target(const struct jump_entry *entry)
{
return entry->target;
}
static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
{
return (struct static_key *)((unsigned long)entry->key & ~3UL);
}
#endif
static inline bool jump_entry_is_branch(const struct jump_entry *entry)
{
return (unsigned long)entry->key & 1UL;
}
static inline bool jump_entry_is_init(const struct jump_entry *entry)
{
return (unsigned long)entry->key & 2UL;
}
static inline void jump_entry_set_init(struct jump_entry *entry)
{
entry->key |= 2;
}
#endif
#endif
#ifndef __ASSEMBLY__
@ -151,7 +213,6 @@ extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void);
extern void jump_label_invalidate_initmem(void);
extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
@ -199,8 +260,6 @@ static __always_inline void jump_label_init(void)
static_key_initialized = true;
}
static inline void jump_label_invalidate_initmem(void) {}
static __always_inline bool static_key_false(struct static_key *key)
{
if (unlikely(static_key_count(key) > 0))

View File

@ -1064,7 +1064,6 @@ static int __ref kernel_init(void *unused)
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
ftrace_free_init_mem();
jump_label_invalidate_initmem();
free_initmem();
mark_readonly();

View File

@ -38,23 +38,43 @@ static int jump_label_cmp(const void *a, const void *b)
const struct jump_entry *jea = a;
const struct jump_entry *jeb = b;
if (jea->key < jeb->key)
if (jump_entry_key(jea) < jump_entry_key(jeb))
return -1;
if (jea->key > jeb->key)
if (jump_entry_key(jea) > jump_entry_key(jeb))
return 1;
return 0;
}
static void jump_label_swap(void *a, void *b, int size)
{
long delta = (unsigned long)a - (unsigned long)b;
struct jump_entry *jea = a;
struct jump_entry *jeb = b;
struct jump_entry tmp = *jea;
jea->code = jeb->code - delta;
jea->target = jeb->target - delta;
jea->key = jeb->key - delta;
jeb->code = tmp.code + delta;
jeb->target = tmp.target + delta;
jeb->key = tmp.key + delta;
}
static void
jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
{
unsigned long size;
void *swapfn = NULL;
if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
swapfn = jump_label_swap;
size = (((unsigned long)stop - (unsigned long)start)
/ sizeof(struct jump_entry));
sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
}
static void jump_label_update(struct static_key *key);
@ -261,8 +281,8 @@ EXPORT_SYMBOL_GPL(jump_label_rate_limit);
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
if (entry->code <= (unsigned long)end &&
entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
if (jump_entry_code(entry) <= (unsigned long)end &&
jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
return 1;
return 0;
@ -321,16 +341,6 @@ static inline void static_key_set_linked(struct static_key *key)
key->type |= JUMP_TYPE_LINKED;
}
static inline struct static_key *jump_entry_key(struct jump_entry *entry)
{
return (struct static_key *)((unsigned long)entry->key & ~1UL);
}
static bool jump_entry_branch(struct jump_entry *entry)
{
return (unsigned long)entry->key & 1UL;
}
/***
* A 'struct static_key' uses a union such that it either points directly
* to a table of 'struct jump_entry' or to a linked list of modules which in
@ -355,7 +365,7 @@ static enum jump_label_type jump_label_type(struct jump_entry *entry)
{
struct static_key *key = jump_entry_key(entry);
bool enabled = static_key_enabled(key);
bool branch = jump_entry_branch(entry);
bool branch = jump_entry_is_branch(entry);
/* See the comment in linux/jump_label.h */
return enabled ^ branch;
@ -363,19 +373,20 @@ static enum jump_label_type jump_label_type(struct jump_entry *entry)
static void __jump_label_update(struct static_key *key,
struct jump_entry *entry,
struct jump_entry *stop)
struct jump_entry *stop,
bool init)
{
for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
/*
* An entry->code of 0 indicates an entry which has been
* disabled because it was in an init text area.
*/
if (entry->code) {
if (kernel_text_address(entry->code))
if (init || !jump_entry_is_init(entry)) {
if (kernel_text_address(jump_entry_code(entry)))
arch_jump_label_transform(entry, jump_label_type(entry));
else
WARN_ONCE(1, "can't patch jump_label at %pS",
(void *)(unsigned long)entry->code);
(void *)jump_entry_code(entry));
}
}
}
@ -410,6 +421,9 @@ void __init jump_label_init(void)
if (jump_label_type(iter) == JUMP_LABEL_NOP)
arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
if (init_section_contains((void *)jump_entry_code(iter), 1))
jump_entry_set_init(iter);
iterk = jump_entry_key(iter);
if (iterk == key)
continue;
@ -422,26 +436,13 @@ void __init jump_label_init(void)
cpus_read_unlock();
}
/* Disable any jump label entries in __init/__exit code */
void __init jump_label_invalidate_initmem(void)
{
struct jump_entry *iter_start = __start___jump_table;
struct jump_entry *iter_stop = __stop___jump_table;
struct jump_entry *iter;
for (iter = iter_start; iter < iter_stop; iter++) {
if (init_section_contains((void *)(unsigned long)iter->code, 1))
iter->code = 0;
}
}
#ifdef CONFIG_MODULES
static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
{
struct static_key *key = jump_entry_key(entry);
bool type = static_key_type(key);
bool branch = jump_entry_branch(entry);
bool branch = jump_entry_is_branch(entry);
/* See the comment in linux/jump_label.h */
return type ^ branch;
@ -514,7 +515,8 @@ static void __jump_label_mod_update(struct static_key *key)
stop = __stop___jump_table;
else
stop = m->jump_entries + m->num_jump_entries;
__jump_label_update(key, mod->entries, stop);
__jump_label_update(key, mod->entries, stop,
m && m->state == MODULE_STATE_COMING);
}
}
@ -560,12 +562,15 @@ static int jump_label_add_module(struct module *mod)
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
if (within_module_init(jump_entry_code(iter), mod))
jump_entry_set_init(iter);
iterk = jump_entry_key(iter);
if (iterk == key)
continue;
key = iterk;
if (within_module(iter->key, mod)) {
if (within_module((unsigned long)key, mod)) {
static_key_set_entries(key, iter);
continue;
}
@ -595,7 +600,7 @@ static int jump_label_add_module(struct module *mod)
/* Only update if we've changed from our initial state */
if (jump_label_type(iter) != jump_label_init_type(iter))
__jump_label_update(key, iter, iter_stop);
__jump_label_update(key, iter, iter_stop, true);
}
return 0;
@ -615,7 +620,7 @@ static void jump_label_del_module(struct module *mod)
key = jump_entry_key(iter);
if (within_module(iter->key, mod))
if (within_module((unsigned long)key, mod))
continue;
/* No memory during module load */
@ -651,19 +656,6 @@ static void jump_label_del_module(struct module *mod)
}
}
/* Disable any jump label entries in module init code */
static void jump_label_invalidate_module_init(struct module *mod)
{
struct jump_entry *iter_start = mod->jump_entries;
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
struct jump_entry *iter;
for (iter = iter_start; iter < iter_stop; iter++) {
if (within_module_init(iter->code, mod))
iter->code = 0;
}
}
static int
jump_label_module_notify(struct notifier_block *self, unsigned long val,
void *data)
@ -685,9 +677,6 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
case MODULE_STATE_GOING:
jump_label_del_module(mod);
break;
case MODULE_STATE_LIVE:
jump_label_invalidate_module_init(mod);
break;
}
jump_label_unlock();
@ -757,7 +746,8 @@ static void jump_label_update(struct static_key *key)
entry = static_key_entries(key);
/* if there are no users, entry can be NULL */
if (entry)
__jump_label_update(key, entry, stop);
__jump_label_update(key, entry, stop,
system_state < SYSTEM_RUNNING);
}
#ifdef CONFIG_STATIC_KEYS_SELFTEST

View File

@ -3315,6 +3315,15 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
* Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
*/
ndx = find_sec(info, ".data..ro_after_init");
if (ndx)
info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
/*
* Mark the __jump_table section as ro_after_init as well: these data
* structures are never modified, with the exception of entries that
* refer to code in the __init section, which are annotated as such
* at module load time.
*/
ndx = find_sec(info, "__jump_table");
if (ndx)
info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;

View File

@ -30,9 +30,9 @@
#define EX_ORIG_OFFSET 0
#define EX_NEW_OFFSET 4
#define JUMP_ENTRY_SIZE 24
#define JUMP_ENTRY_SIZE 16
#define JUMP_ORIG_OFFSET 0
#define JUMP_NEW_OFFSET 8
#define JUMP_NEW_OFFSET 4
#define ALT_ENTRY_SIZE 13
#define ALT_ORIG_OFFSET 0