1
0
Fork 0

Merge branch 'akpm' (patches from Andrew)

Merge yet more updates from Andrew Morton:

- various misc things

- kexec updates

- sysctl core updates

- scripts/gdb udpates

- checkpoint-restart updates

- ipc updates

- kernel/watchdog updates

- Kees's "rough equivalent to the glibc _FORTIFY_SOURCE=1 feature"

- "stackprotector: ascii armor the stack canary"

- more MM bits

- checkpatch updates

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (96 commits)
  writeback: rework wb_[dec|inc]_stat family of functions
  ARM: samsung: usb-ohci: move inline before return type
  video: fbdev: omap: move inline before return type
  video: fbdev: intelfb: move inline before return type
  USB: serial: safe_serial: move __inline__ before return type
  drivers: tty: serial: move inline before return type
  drivers: s390: move static and inline before return type
  x86/efi: move asmlinkage before return type
  sh: move inline before return type
  MIPS: SMP: move asmlinkage before return type
  m68k: coldfire: move inline before return type
  ia64: sn: pci: move inline before type
  ia64: move inline before return type
  FRV: tlbflush: move asmlinkage before return type
  CRIS: gpio: move inline before return type
  ARM: HP Jornada 7XX: move inline before return type
  ARM: KVM: move asmlinkage before type
  checkpatch: improve the STORAGE_CLASS test
  mm, migration: do not trigger OOM killer when migrating memory
  drm/i915: use __GFP_RETRY_MAYFAIL
  ...
hifive-unleashed-5.1
Linus Torvalds 2017-07-13 12:38:49 -07:00
commit ad51271afc
155 changed files with 3229 additions and 970 deletions

View File

@ -42,7 +42,7 @@ requirements you pass the flag GFP_DMA to kmalloc.
Unfortunately the memory available for ISA DMA is scarce so unless you
allocate the memory during boot-up it's a good idea to also pass
__GFP_REPEAT and __GFP_NOWARN to make the allocator try a bit harder.
__GFP_RETRY_MAYFAIL and __GFP_NOWARN to make the allocator try a bit harder.
(This scarcity also means that you should allocate the buffer as
early as possible and not release it until the driver is unloaded.)

View File

@ -134,6 +134,22 @@ use the boot option:
fail_futex=
mmc_core.fail_request=<interval>,<probability>,<space>,<times>
o proc entries
- /proc/self/task/<current-tid>/fail-nth:
Write to this file of integer N makes N-th call in the current task fail
(N is 0-based). Read from this file returns a single char 'Y' or 'N'
that says if the fault setup with a previous write to this file was
injected or not, and disables the fault if it wasn't yet injected.
Note that this file enables all types of faults (slab, futex, etc).
This setting takes precedence over all other generic debugfs settings
like probability, interval, times, etc. But per-capability settings
(e.g. fail_futex/ignore-private) take precedence over it.
This feature is intended for systematic testing of faults in a single
system call. See an example below.
How to add new fault injection capability
-----------------------------------------
@ -278,3 +294,65 @@ allocation failure.
# env FAILCMD_TYPE=fail_page_alloc \
./tools/testing/fault-injection/failcmd.sh --times=100 \
-- make -C tools/testing/selftests/ run_tests
Systematic faults using fail-nth
---------------------------------
The following code systematically faults 0-th, 1-st, 2-nd and so on
capabilities in the socketpair() system call.
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
int main()
{
int i, err, res, fail_nth, fds[2];
char buf[128];
system("echo N > /sys/kernel/debug/failslab/ignore-gfp-wait");
sprintf(buf, "/proc/self/task/%ld/fail-nth", syscall(SYS_gettid));
fail_nth = open(buf, O_RDWR);
for (i = 0;; i++) {
sprintf(buf, "%d", i);
write(fail_nth, buf, strlen(buf));
res = socketpair(AF_LOCAL, SOCK_STREAM, 0, fds);
err = errno;
read(fail_nth, buf, 1);
if (res == 0) {
close(fds[0]);
close(fds[1]);
}
printf("%d-th fault %c: res=%d/%d\n", i, buf[0], res, err);
if (buf[0] != 'Y')
break;
}
return 0;
}
An example output:
0-th fault Y: res=-1/23
1-th fault Y: res=-1/23
2-th fault Y: res=-1/23
3-th fault Y: res=-1/12
4-th fault Y: res=-1/12
5-th fault Y: res=-1/23
6-th fault Y: res=-1/23
7-th fault Y: res=-1/23
8-th fault Y: res=-1/12
9-th fault Y: res=-1/12
10-th fault Y: res=-1/12
11-th fault Y: res=-1/12
12-th fault Y: res=-1/12
13-th fault Y: res=-1/12
14-th fault Y: res=-1/12
15-th fault Y: res=-1/12
16-th fault N: res=0/12

View File

@ -1786,12 +1786,16 @@ pair provide additional information particular to the objects they represent.
pos: 0
flags: 02
mnt_id: 9
tfd: 5 events: 1d data: ffffffffffffffff
tfd: 5 events: 1d data: ffffffffffffffff pos:0 ino:61af sdev:7
where 'tfd' is a target file descriptor number in decimal form,
'events' is events mask being watched and the 'data' is data
associated with a target [see epoll(7) for more details].
The 'pos' is current offset of the target file in decimal form
[see lseek(2)], 'ino' and 'sdev' are inode and device numbers
where target file resides, all in hex format.
Fsnotify files
~~~~~~~~~~~~~~
For inotify files the format is the following

View File

@ -112,8 +112,8 @@ There are two possible methods of using Kdump.
2) Or use the system kernel binary itself as dump-capture kernel and there is
no need to build a separate dump-capture kernel. This is possible
only with the architectures which support a relocatable kernel. As
of today, i386, x86_64, ppc64, ia64 and arm architectures support relocatable
kernel.
of today, i386, x86_64, ppc64, ia64, arm and arm64 architectures support
relocatable kernel.
Building a relocatable kernel is advantageous from the point of view that
one does not have to build a second kernel for capturing the dump. But
@ -339,7 +339,7 @@ For arm:
For arm64:
- Use vmlinux or Image
If you are using a uncompressed vmlinux image then use following command
If you are using an uncompressed vmlinux image then use following command
to load dump-capture kernel.
kexec -p <dump-capture-kernel-vmlinux-image> \
@ -361,6 +361,12 @@ to load dump-capture kernel.
--dtb=<dtb-for-dump-capture-kernel> \
--append="root=<root-dev> <arch-specific-options>"
If you are using an uncompressed Image, then use following command
to load dump-capture kernel.
kexec -p <dump-capture-kernel-Image> \
--initrd=<initrd-for-dump-capture-kernel> \
--append="root=<root-dev> <arch-specific-options>"
Please note, that --args-linux does not need to be specified for ia64.
It is planned to make this a no-op on that architecture, but for now

View File

@ -198,9 +198,6 @@ config HAVE_KPROBES_ON_FTRACE
config HAVE_NMI
bool
config HAVE_NMI_WATCHDOG
depends on HAVE_NMI
bool
#
# An arch should select this if it provides all these things:
#
@ -226,6 +223,12 @@ config GENERIC_SMP_IDLE_THREAD
config GENERIC_IDLE_POLL_SETUP
bool
config ARCH_HAS_FORTIFY_SOURCE
bool
help
An architecture should select this when it can successfully
build and run with CONFIG_FORTIFY_SOURCE.
# Select if arch has all set_memory_ro/rw/x/nx() functions in asm/cacheflush.h
config ARCH_HAS_SET_MEMORY
bool
@ -288,6 +291,28 @@ config HAVE_PERF_EVENTS_NMI
subsystem. Also has support for calculating CPU cycle events
to determine how many clock cycles in a given period.
config HAVE_HARDLOCKUP_DETECTOR_PERF
bool
depends on HAVE_PERF_EVENTS_NMI
help
The arch chooses to use the generic perf-NMI-based hardlockup
detector. Must define HAVE_PERF_EVENTS_NMI.
config HAVE_NMI_WATCHDOG
depends on HAVE_NMI
bool
help
The arch provides a low level NMI watchdog. It provides
asm/nmi.h, and defines its own arch_touch_nmi_watchdog().
config HAVE_HARDLOCKUP_DETECTOR_ARCH
bool
select HAVE_NMI_WATCHDOG
help
The arch chooses to provide its own hardlockup detector, which is
a superset of the HAVE_NMI_WATCHDOG. It also conforms to config
interfaces and parameters provided by hardlockup detector subsystem.
config HAVE_PERF_REGS
bool
help

View File

@ -110,8 +110,8 @@ void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp);
void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp);
asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp);
asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp);
static inline bool __vfp_enabled(void)
{
return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10)));
@ -120,8 +120,8 @@ static inline bool __vfp_enabled(void)
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt);
void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt);
int asmlinkage __guest_enter(struct kvm_vcpu *vcpu,
asmlinkage int __guest_enter(struct kvm_vcpu *vcpu,
struct kvm_cpu_context *host);
int asmlinkage __hyp_do_panic(const char *, int, u32);
asmlinkage int __hyp_do_panic(const char *, int, u32);
#endif /* __ARM_KVM_HYP_H__ */

View File

@ -33,7 +33,7 @@ static unsigned long jornada_ssp_flags;
* we need to reverse all data we receive from the mcu due to its physical location
* returns : 01110111 -> 11101110
*/
u8 inline jornada_ssp_reverse(u8 byte)
inline u8 jornada_ssp_reverse(u8 byte)
{
return
((0x80 & byte) >> 7) |

View File

@ -12,6 +12,7 @@ config ARM64
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_HAS_KCOV

View File

@ -30,6 +30,7 @@ static __always_inline void boot_init_stack_canary(void)
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
canary ^= LINUX_VERSION_CODE;
canary &= CANARY_MASK;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;

View File

@ -63,6 +63,11 @@ extern int memcmp(const void *, const void *, size_t);
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
#ifndef __NO_FORTIFY
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
#endif
#endif
#endif

View File

@ -35,7 +35,7 @@
* Leave enough space between the mmap area and the stack to honour ulimit in
* the face of randomisation.
*/
#define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1))
#define MIN_GAP (SZ_128M)
#define MAX_GAP (STACK_TOP/6*5)
static int mmap_is_legacy(void)
@ -65,6 +65,11 @@ unsigned long arch_mmap_rnd(void)
static unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
/* Values close to RLIM_INFINITY can overflow. */
if (gap + pad > gap)
gap += pad;
if (gap < MIN_GAP)
gap = MIN_GAP;

View File

@ -9,4 +9,6 @@
#include <linux/nmi.h>
extern void arch_touch_nmi_watchdog(void);
#endif

View File

@ -190,7 +190,7 @@ static int __init init_nmi_wdt(void)
}
device_initcall(init_nmi_wdt);
void touch_nmi_watchdog(void)
void arch_touch_nmi_watchdog(void)
{
atomic_set(&nmi_touched[smp_processor_id()], 1);
}

View File

@ -399,7 +399,7 @@ out:
/* Main device API. ioctl's to read/set/clear bits, as well as to
* set alarms to wait for using a subsequent select().
*/
unsigned long inline setget_input(struct gpio_private *priv, unsigned long arg)
inline unsigned long setget_input(struct gpio_private *priv, unsigned long arg)
{
/* Set direction 0=unchanged 1=input,
* return mask with 1=input */
@ -450,7 +450,7 @@ unsigned long inline setget_input(struct gpio_private *priv, unsigned long arg)
return dir_g_in_bits;
} /* setget_input */
unsigned long inline setget_output(struct gpio_private *priv, unsigned long arg)
inline unsigned long setget_output(struct gpio_private *priv, unsigned long arg)
{
if (USE_PORTS(priv)) {
*priv->dir = *priv->dir_shadow |=

View File

@ -18,10 +18,10 @@
#ifdef CONFIG_MMU
#ifndef __ASSEMBLY__
extern void asmlinkage __flush_tlb_all(void);
extern void asmlinkage __flush_tlb_mm(unsigned long contextid);
extern void asmlinkage __flush_tlb_page(unsigned long contextid, unsigned long start);
extern void asmlinkage __flush_tlb_range(unsigned long contextid,
extern asmlinkage void __flush_tlb_all(void);
extern asmlinkage void __flush_tlb_mm(unsigned long contextid);
extern asmlinkage void __flush_tlb_page(unsigned long contextid, unsigned long start);
extern asmlinkage void __flush_tlb_range(unsigned long contextid,
unsigned long start, unsigned long end);
#endif /* !__ASSEMBLY__ */

View File

@ -163,8 +163,3 @@ void arch_crash_save_vmcoreinfo(void)
#endif
}
phys_addr_t paddr_vmcoreinfo_note(void)
{
return ia64_tpa((unsigned long)(char *)&vmcoreinfo_note);
}

View File

@ -334,7 +334,7 @@ static void ia64_mlogbuf_dump_from_init(void)
ia64_mlogbuf_dump();
}
static void inline
static inline void
ia64_mca_spin(const char *func)
{
if (monarch_cpu == smp_processor_id())

View File

@ -140,7 +140,7 @@ static inline u64 __iomem *pcibr_ate_addr(struct pcibus_info *pcibus_info,
/*
* Update the ate.
*/
void inline
inline void
ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
volatile u64 ate)
{

View File

@ -52,7 +52,7 @@
* All registers defined in struct tioce will meet that criteria.
*/
static void inline
static inline void
tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr)
{
u64 mmr_base;
@ -78,7 +78,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr)
}
}
static void inline
static inline void
tioce_mmr_war_post(struct tioce_kernel *kern, void __iomem *mmr_addr)
{
u64 mmr_base;

View File

@ -35,7 +35,7 @@
#define EINT7 67 /* EDGE Port interrupt 7 */
static unsigned int irqebitmap[] = { 0, 1, 4, 7 };
static unsigned int inline irq2ebit(unsigned int irq)
static inline unsigned int irq2ebit(unsigned int irq)
{
return irqebitmap[irq - EINT0];
}
@ -51,7 +51,7 @@ static unsigned int inline irq2ebit(unsigned int irq)
#define EINT1 65 /* EDGE Port interrupt 1 */
#define EINT7 71 /* EDGE Port interrupt 7 */
static unsigned int inline irq2ebit(unsigned int irq)
static inline unsigned int irq2ebit(unsigned int irq)
{
return irq - EINT0;
}

View File

@ -116,7 +116,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
pud_t *pud;
pud = (pud_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PUD_ORDER);
pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER);
if (pud)
pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
return pud;

View File

@ -47,7 +47,7 @@ extern int __cpu_logical_map[NR_CPUS];
/* Mask of CPUs which are currently definitely operating coherently */
extern cpumask_t cpu_coherent_mask;
extern void asmlinkage smp_bootstrap(void);
extern asmlinkage void smp_bootstrap(void);
extern void calculate_cpu_foreign_map(void);

View File

@ -11,4 +11,6 @@
#ifndef _ASM_NMI_H
#define _ASM_NMI_H
extern void arch_touch_nmi_watchdog(void);
#endif /* _ASM_NMI_H */

View File

@ -50,9 +50,9 @@ watchdog_handler:
# we can't inline it)
#
###############################################################################
.globl touch_nmi_watchdog
.type touch_nmi_watchdog,@function
touch_nmi_watchdog:
.globl arch_touch_nmi_watchdog
.type arch_touch_nmi_watchdog,@function
arch_touch_nmi_watchdog:
clr d0
clr d1
mov watchdog_alert_counter, a0
@ -63,4 +63,4 @@ touch_nmi_watchdog:
lne
ret [],0
.size touch_nmi_watchdog,.-touch_nmi_watchdog
.size arch_touch_nmi_watchdog,.-arch_touch_nmi_watchdog

View File

@ -31,7 +31,7 @@ static unsigned int watchdog;
static unsigned int watchdog_hz = 1;
unsigned int watchdog_alert_counter[NR_CPUS];
EXPORT_SYMBOL(touch_nmi_watchdog);
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
/*
* the best way to detect whether a CPU has a 'hard lockup' problem

View File

@ -82,7 +82,7 @@ config NR_IRQS
config NMI_IPI
bool
depends on SMP && (DEBUGGER || KEXEC_CORE)
depends on SMP && (DEBUGGER || KEXEC_CORE || HARDLOCKUP_DETECTOR)
default y
config STACKTRACE_SUPPORT
@ -125,6 +125,7 @@ config PPC
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_SET_COHERENT_MASK
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select ARCH_HAS_SG_CHAIN
@ -192,11 +193,13 @@ config PPC
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S)
select HAVE_OPROFILE
select HAVE_OPTPROBES if PPC64
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if SMP

View File

@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
#else
struct page *page;
page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_REPEAT),
page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
4);
if (!page)
return NULL;

View File

@ -1,4 +1,15 @@
#ifndef _ASM_NMI_H
#define _ASM_NMI_H
#ifdef CONFIG_HARDLOCKUP_DETECTOR
extern void arch_touch_nmi_watchdog(void);
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
bool exclude_self);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#else
static inline void arch_touch_nmi_watchdog(void) {}
#endif
#endif /* _ASM_NMI_H */

View File

@ -55,6 +55,8 @@ struct smp_ops_t {
int (*cpu_bootable)(unsigned int nr);
};
extern void smp_flush_nmi_ipi(u64 delay_us);
extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern void smp_send_debugger_break(void);
extern void start_secondary_resume(void);
extern void smp_generic_give_timebase(void);

View File

@ -38,6 +38,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o nvram_64.o firmware.o
obj-$(CONFIG_VDSO32) += vdso32/
obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o

View File

@ -1314,6 +1314,31 @@ EXC_REAL_NONE(0x1800, 0x100)
EXC_VIRT_NONE(0x5800, 0x100)
#endif
#if defined(CONFIG_HARDLOCKUP_DETECTOR) && defined(CONFIG_HAVE_HARDLOCKUP_DETECTOR_ARCH)
#define MASKED_DEC_HANDLER_LABEL 3f
#define MASKED_DEC_HANDLER(_H) \
3: /* soft-nmi */ \
std r12,PACA_EXGEN+EX_R12(r13); \
GET_SCRATCH0(r10); \
std r10,PACA_EXGEN+EX_R13(r13); \
EXCEPTION_PROLOG_PSERIES_1(soft_nmi_common, _H)
EXC_COMMON_BEGIN(soft_nmi_common)
mr r10,r1
ld r1,PACAEMERGSP(r13)
ld r1,PACA_NMI_EMERG_SP(r13)
subi r1,r1,INT_FRAME_SIZE
EXCEPTION_COMMON_NORET_STACK(PACA_EXGEN, 0x900,
system_reset, soft_nmi_interrupt,
ADD_NVGPRS;ADD_RECONCILE)
b ret_from_except
#else
#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
#define MASKED_DEC_HANDLER(_H)
#endif
/*
* An interrupt came in while soft-disabled. We set paca->irq_happened, then:
@ -1336,7 +1361,7 @@ masked_##_H##interrupt: \
lis r10,0x7fff; \
ori r10,r10,0xffff; \
mtspr SPRN_DEC,r10; \
b 2f; \
b MASKED_DEC_HANDLER_LABEL; \
1: cmpwi r10,PACA_IRQ_DBELL; \
beq 2f; \
cmpwi r10,PACA_IRQ_HMI; \
@ -1351,7 +1376,8 @@ masked_##_H##interrupt: \
ld r11,PACA_EXGEN+EX_R11(r13); \
GET_SCRATCH0(r13); \
##_H##rfid; \
b .
b .; \
MASKED_DEC_HANDLER(_H)
/*
* Real mode exceptions actually use this too, but alternate

View File

@ -999,8 +999,7 @@ static int fadump_create_elfcore_headers(char *bufp)
phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note());
phdr->p_offset = phdr->p_paddr;
phdr->p_memsz = vmcoreinfo_max_size;
phdr->p_filesz = vmcoreinfo_max_size;
phdr->p_memsz = phdr->p_filesz = VMCOREINFO_NOTE_SIZE;
/* Increment number of program headers. */
(elf->e_phnum)++;

View File

@ -25,6 +25,7 @@
#include <linux/kvm_para.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/nmi.h> /* hardlockup_detector_disable() */
#include <asm/reg.h>
#include <asm/sections.h>
@ -718,6 +719,12 @@ static __init void kvm_free_tmp(void)
static int __init kvm_guest_init(void)
{
/*
* The hardlockup detector is likely to get false positives in
* KVM guests, so disable it by default.
*/
hardlockup_detector_disable();
if (!kvm_para_available())
goto free_tmp;

View File

@ -15,6 +15,9 @@
#undef DEBUG_PROM
/* we cannot use FORTIFY as it brings in new symbols */
#define __NO_FORTIFY
#include <stdarg.h>
#include <linux/kernel.h>
#include <linux/string.h>

View File

@ -751,22 +751,3 @@ unsigned long memory_block_size_bytes(void)
struct ppc_pci_io ppc_pci_io;
EXPORT_SYMBOL(ppc_pci_io);
#endif
#ifdef CONFIG_HARDLOCKUP_DETECTOR
u64 hw_nmi_get_sample_period(int watchdog_thresh)
{
return ppc_proc_freq * watchdog_thresh;
}
/*
* The hardlockup detector breaks PMU event based branches and is likely
* to get false positives in KVM guests, so disable it by default.
*/
static int __init disable_hardlockup_detector(void)
{
hardlockup_detector_disable();
return 0;
}
early_initcall(disable_hardlockup_detector);
#endif

View File

@ -435,13 +435,31 @@ static void do_smp_send_nmi_ipi(int cpu)
}
}
void smp_flush_nmi_ipi(u64 delay_us)
{
unsigned long flags;
nmi_ipi_lock_start(&flags);
while (nmi_ipi_busy_count) {
nmi_ipi_unlock_end(&flags);
udelay(1);
if (delay_us) {
delay_us--;
if (!delay_us)
return;
}
nmi_ipi_lock_start(&flags);
}
nmi_ipi_unlock_end(&flags);
}
/*
* - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
* - fn is the target callback function.
* - delay_us > 0 is the delay before giving up waiting for targets to
* enter the handler, == 0 specifies indefinite delay.
*/
static int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
{
unsigned long flags;
int me = raw_smp_processor_id();

View File

@ -0,0 +1,386 @@
/*
* Watchdog support on powerpc systems.
*
* Copyright 2017, IBM Corporation.
*
* This uses code from arch/sparc/kernel/nmi.c and kernel/watchdog.c
*/
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/nmi.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/kprobes.h>
#include <linux/hardirq.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/kdebug.h>
#include <linux/sched/debug.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <asm/paca.h>
/*
* The watchdog has a simple timer that runs on each CPU, once per timer
* period. This is the heartbeat.
*
* Then there are checks to see if the heartbeat has not triggered on a CPU
* for the panic timeout period. Currently the watchdog only supports an
* SMP check, so the heartbeat only turns on when we have 2 or more CPUs.
*
* This is not an NMI watchdog, but Linux uses that name for a generic
* watchdog in some cases, so NMI gets used in some places.
*/
static cpumask_t wd_cpus_enabled __read_mostly;
static u64 wd_panic_timeout_tb __read_mostly; /* timebase ticks until panic */
static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
static DEFINE_PER_CPU(struct timer_list, wd_timer);
static DEFINE_PER_CPU(u64, wd_timer_tb);
/*
* These are for the SMP checker. CPUs clear their pending bit in their
* heartbeat. If the bitmask becomes empty, the time is noted and the
* bitmask is refilled.
*
* All CPUs clear their bit in the pending mask every timer period.
* Once all have cleared, the time is noted and the bits are reset.
* If the time since all clear was greater than the panic timeout,
* we can panic with the list of stuck CPUs.
*
* This will work best with NMI IPIs for crash code so the stuck CPUs
* can be pulled out to get their backtraces.
*/
static unsigned long __wd_smp_lock;
static cpumask_t wd_smp_cpus_pending;
static cpumask_t wd_smp_cpus_stuck;
static u64 wd_smp_last_reset_tb;
static inline void wd_smp_lock(unsigned long *flags)
{
/*
* Avoid locking layers if possible.
* This may be called from low level interrupt handlers at some
* point in future.
*/
local_irq_save(*flags);
while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock)))
cpu_relax();
}
static inline void wd_smp_unlock(unsigned long *flags)
{
clear_bit_unlock(0, &__wd_smp_lock);
local_irq_restore(*flags);
}
static void wd_lockup_ipi(struct pt_regs *regs)
{
pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", raw_smp_processor_id());
print_modules();
print_irqtrace_events(current);
if (regs)
show_regs(regs);
else
dump_stack();
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
}
static void set_cpu_stuck(int cpu, u64 tb)
{
cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
if (cpumask_empty(&wd_smp_cpus_pending)) {
wd_smp_last_reset_tb = tb;
cpumask_andnot(&wd_smp_cpus_pending,
&wd_cpus_enabled,
&wd_smp_cpus_stuck);
}
}
static void watchdog_smp_panic(int cpu, u64 tb)
{
unsigned long flags;
int c;
wd_smp_lock(&flags);
/* Double check some things under lock */
if ((s64)(tb - wd_smp_last_reset_tb) < (s64)wd_smp_panic_timeout_tb)
goto out;
if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending))
goto out;
if (cpumask_weight(&wd_smp_cpus_pending) == 0)
goto out;
pr_emerg("Watchdog CPU:%d detected Hard LOCKUP other CPUS:%*pbl\n",
cpu, cpumask_pr_args(&wd_smp_cpus_pending));
/*
* Try to trigger the stuck CPUs.
*/
for_each_cpu(c, &wd_smp_cpus_pending) {
if (c == cpu)
continue;
smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
}
smp_flush_nmi_ipi(1000000);
/* Take the stuck CPU out of the watch group */
for_each_cpu(c, &wd_smp_cpus_pending)
set_cpu_stuck(c, tb);
out:
wd_smp_unlock(&flags);
printk_safe_flush();
/*
* printk_safe_flush() seems to require another print
* before anything actually goes out to console.
*/
if (sysctl_hardlockup_all_cpu_backtrace)
trigger_allbutself_cpu_backtrace();
if (hardlockup_panic)
nmi_panic(NULL, "Hard LOCKUP");
}
static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
{
if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) {
if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) {
unsigned long flags;
pr_emerg("Watchdog CPU:%d became unstuck\n", cpu);
wd_smp_lock(&flags);
cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
wd_smp_unlock(&flags);
}
return;
}
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
if (cpumask_empty(&wd_smp_cpus_pending)) {
unsigned long flags;
wd_smp_lock(&flags);
if (cpumask_empty(&wd_smp_cpus_pending)) {
wd_smp_last_reset_tb = tb;
cpumask_andnot(&wd_smp_cpus_pending,
&wd_cpus_enabled,
&wd_smp_cpus_stuck);
}
wd_smp_unlock(&flags);
}
}
static void watchdog_timer_interrupt(int cpu)
{
u64 tb = get_tb();
per_cpu(wd_timer_tb, cpu) = tb;
wd_smp_clear_cpu_pending(cpu, tb);
if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb)
watchdog_smp_panic(cpu, tb);
}
void soft_nmi_interrupt(struct pt_regs *regs)
{
unsigned long flags;
int cpu = raw_smp_processor_id();
u64 tb;
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
return;
nmi_enter();
tb = get_tb();
if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
per_cpu(wd_timer_tb, cpu) = tb;
wd_smp_lock(&flags);
if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
wd_smp_unlock(&flags);
goto out;
}
set_cpu_stuck(cpu, tb);
pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", cpu);
print_modules();
print_irqtrace_events(current);
if (regs)
show_regs(regs);
else
dump_stack();
wd_smp_unlock(&flags);
if (sysctl_hardlockup_all_cpu_backtrace)
trigger_allbutself_cpu_backtrace();
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
}
if (wd_panic_timeout_tb < 0x7fffffff)
mtspr(SPRN_DEC, wd_panic_timeout_tb);
out:
nmi_exit();
}
static void wd_timer_reset(unsigned int cpu, struct timer_list *t)
{
t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms);
if (wd_timer_period_ms > 1000)
t->expires = __round_jiffies_up(t->expires, cpu);
add_timer_on(t, cpu);
}
static void wd_timer_fn(unsigned long data)
{
struct timer_list *t = this_cpu_ptr(&wd_timer);
int cpu = smp_processor_id();
watchdog_timer_interrupt(cpu);
wd_timer_reset(cpu, t);
}
void arch_touch_nmi_watchdog(void)
{
int cpu = smp_processor_id();
watchdog_timer_interrupt(cpu);
}
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
static void start_watchdog_timer_on(unsigned int cpu)
{
struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
per_cpu(wd_timer_tb, cpu) = get_tb();
setup_pinned_timer(t, wd_timer_fn, 0);
wd_timer_reset(cpu, t);
}
static void stop_watchdog_timer_on(unsigned int cpu)
{
struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
del_timer_sync(t);
}
static int start_wd_on_cpu(unsigned int cpu)
{
if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
WARN_ON(1);
return 0;
}
if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
return 0;
if (watchdog_suspended)
return 0;
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
return 0;
cpumask_set_cpu(cpu, &wd_cpus_enabled);
if (cpumask_weight(&wd_cpus_enabled) == 1) {
cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
wd_smp_last_reset_tb = get_tb();
}
smp_wmb();
start_watchdog_timer_on(cpu);
return 0;
}
static int stop_wd_on_cpu(unsigned int cpu)
{
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
return 0; /* Can happen in CPU unplug case */
stop_watchdog_timer_on(cpu);
cpumask_clear_cpu(cpu, &wd_cpus_enabled);
wd_smp_clear_cpu_pending(cpu, get_tb());
return 0;
}
static void watchdog_calc_timeouts(void)
{
wd_panic_timeout_tb = watchdog_thresh * ppc_tb_freq;
/* Have the SMP detector trigger a bit later */
wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2;
/* 2/5 is the factor that the perf based detector uses */
wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
}
void watchdog_nmi_reconfigure(void)
{
int cpu;
watchdog_calc_timeouts();
for_each_cpu(cpu, &wd_cpus_enabled)
stop_wd_on_cpu(cpu);
for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
start_wd_on_cpu(cpu);
}
/*
* This runs after lockup_detector_init() which sets up watchdog_cpumask.
*/
static int __init powerpc_watchdog_init(void)
{
int err;
watchdog_calc_timeouts();
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online",
start_wd_on_cpu, stop_wd_on_cpu);
if (err < 0)
pr_warn("Watchdog could not be initialized");
return 0;
}
arch_initcall(powerpc_watchdog_init);
static void handle_backtrace_ipi(struct pt_regs *regs)
{
nmi_cpu_backtrace(regs);
}
static void raise_backtrace_ipi(cpumask_t *mask)
{
unsigned int cpu;
for_each_cpu(cpu, mask) {
if (cpu == smp_processor_id())
handle_backtrace_ipi(NULL);
else
smp_send_nmi_ipi(cpu, handle_backtrace_ipi, 1000000);
}
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
}

View File

@ -93,7 +93,7 @@ int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order)
}
if (!hpt)
hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT
hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL
|__GFP_NOWARN, order - PAGE_SHIFT);
if (!hpt)

View File

@ -233,192 +233,192 @@ static long calc_offset(struct fixup_entry *entry, unsigned int *p)
static void test_basic_patching(void)
{
extern unsigned int ftr_fixup_test1;
extern unsigned int end_ftr_fixup_test1;
extern unsigned int ftr_fixup_test1_orig;
extern unsigned int ftr_fixup_test1_expected;
int size = &end_ftr_fixup_test1 - &ftr_fixup_test1;
extern unsigned int ftr_fixup_test1[];
extern unsigned int end_ftr_fixup_test1[];
extern unsigned int ftr_fixup_test1_orig[];
extern unsigned int ftr_fixup_test1_expected[];
int size = end_ftr_fixup_test1 - ftr_fixup_test1;
fixup.value = fixup.mask = 8;
fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1);
fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2);
fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1);
fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2);
fixup.alt_start_off = fixup.alt_end_off = 0;
/* Sanity check */
check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
/* Check we don't patch if the value matches */
patch_feature_section(8, &fixup);
check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
/* Check we do patch if the value doesn't match */
patch_feature_section(0, &fixup);
check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
/* Check we do patch if the mask doesn't match */
memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size);
check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size);
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
patch_feature_section(~8, &fixup);
check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
}
static void test_alternative_patching(void)
{
extern unsigned int ftr_fixup_test2;
extern unsigned int end_ftr_fixup_test2;
extern unsigned int ftr_fixup_test2_orig;
extern unsigned int ftr_fixup_test2_alt;
extern unsigned int ftr_fixup_test2_expected;
int size = &end_ftr_fixup_test2 - &ftr_fixup_test2;
extern unsigned int ftr_fixup_test2[];
extern unsigned int end_ftr_fixup_test2[];
extern unsigned int ftr_fixup_test2_orig[];
extern unsigned int ftr_fixup_test2_alt[];
extern unsigned int ftr_fixup_test2_expected[];
int size = end_ftr_fixup_test2 - ftr_fixup_test2;
fixup.value = fixup.mask = 0xF;
fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1);
fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2);
fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt);
fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1);
fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1);
fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2);
fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt);
fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1);
/* Sanity check */
check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
/* Check we don't patch if the value matches */
patch_feature_section(0xF, &fixup);
check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
/* Check we do patch if the value doesn't match */
patch_feature_section(0, &fixup);
check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
/* Check we do patch if the mask doesn't match */
memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size);
check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size);
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
patch_feature_section(~0xF, &fixup);
check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
}
static void test_alternative_case_too_big(void)
{
extern unsigned int ftr_fixup_test3;
extern unsigned int end_ftr_fixup_test3;
extern unsigned int ftr_fixup_test3_orig;
extern unsigned int ftr_fixup_test3_alt;
int size = &end_ftr_fixup_test3 - &ftr_fixup_test3;
extern unsigned int ftr_fixup_test3[];
extern unsigned int end_ftr_fixup_test3[];
extern unsigned int ftr_fixup_test3_orig[];
extern unsigned int ftr_fixup_test3_alt[];
int size = end_ftr_fixup_test3 - ftr_fixup_test3;
fixup.value = fixup.mask = 0xC;
fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1);
fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2);
fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt);
fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2);
fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1);
fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2);
fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt);
fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2);
/* Sanity check */
check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
/* Expect nothing to be patched, and the error returned to us */
check(patch_feature_section(0xF, &fixup) == 1);
check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
check(patch_feature_section(0, &fixup) == 1);
check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
check(patch_feature_section(~0xF, &fixup) == 1);
check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
}
static void test_alternative_case_too_small(void)
{
extern unsigned int ftr_fixup_test4;
extern unsigned int end_ftr_fixup_test4;
extern unsigned int ftr_fixup_test4_orig;
extern unsigned int ftr_fixup_test4_alt;
extern unsigned int ftr_fixup_test4_expected;
int size = &end_ftr_fixup_test4 - &ftr_fixup_test4;
extern unsigned int ftr_fixup_test4[];
extern unsigned int end_ftr_fixup_test4[];
extern unsigned int ftr_fixup_test4_orig[];
extern unsigned int ftr_fixup_test4_alt[];
extern unsigned int ftr_fixup_test4_expected[];
int size = end_ftr_fixup_test4 - ftr_fixup_test4;
unsigned long flag;
/* Check a high-bit flag */
flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
fixup.value = fixup.mask = flag;
fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1);
fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5);
fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt);
fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2);
fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1);
fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5);
fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt);
fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2);
/* Sanity check */
check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
/* Check we don't patch if the value matches */
patch_feature_section(flag, &fixup);
check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
/* Check we do patch if the value doesn't match */
patch_feature_section(0, &fixup);
check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
/* Check we do patch if the mask doesn't match */
memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size);
check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size);
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
patch_feature_section(~flag, &fixup);
check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
}
static void test_alternative_case_with_branch(void)
{
extern unsigned int ftr_fixup_test5;
extern unsigned int end_ftr_fixup_test5;
extern unsigned int ftr_fixup_test5_expected;
int size = &end_ftr_fixup_test5 - &ftr_fixup_test5;
extern unsigned int ftr_fixup_test5[];
extern unsigned int end_ftr_fixup_test5[];
extern unsigned int ftr_fixup_test5_expected[];
int size = end_ftr_fixup_test5 - ftr_fixup_test5;
check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0);
check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0);
}
static void test_alternative_case_with_external_branch(void)
{
extern unsigned int ftr_fixup_test6;
extern unsigned int end_ftr_fixup_test6;
extern unsigned int ftr_fixup_test6_expected;
int size = &end_ftr_fixup_test6 - &ftr_fixup_test6;
extern unsigned int ftr_fixup_test6[];
extern unsigned int end_ftr_fixup_test6[];
extern unsigned int ftr_fixup_test6_expected[];
int size = end_ftr_fixup_test6 - ftr_fixup_test6;
check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0);
check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0);
}
static void test_cpu_macros(void)
{
extern u8 ftr_fixup_test_FTR_macros;
extern u8 ftr_fixup_test_FTR_macros_expected;
unsigned long size = &ftr_fixup_test_FTR_macros_expected -
&ftr_fixup_test_FTR_macros;
extern u8 ftr_fixup_test_FTR_macros[];
extern u8 ftr_fixup_test_FTR_macros_expected[];
unsigned long size = ftr_fixup_test_FTR_macros_expected -
ftr_fixup_test_FTR_macros;
/* The fixups have already been done for us during boot */
check(memcmp(&ftr_fixup_test_FTR_macros,
&ftr_fixup_test_FTR_macros_expected, size) == 0);
check(memcmp(ftr_fixup_test_FTR_macros,
ftr_fixup_test_FTR_macros_expected, size) == 0);
}
static void test_fw_macros(void)
{
#ifdef CONFIG_PPC64
extern u8 ftr_fixup_test_FW_FTR_macros;
extern u8 ftr_fixup_test_FW_FTR_macros_expected;
unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected -
&ftr_fixup_test_FW_FTR_macros;
extern u8 ftr_fixup_test_FW_FTR_macros[];
extern u8 ftr_fixup_test_FW_FTR_macros_expected[];
unsigned long size = ftr_fixup_test_FW_FTR_macros_expected -
ftr_fixup_test_FW_FTR_macros;
/* The fixups have already been done for us during boot */
check(memcmp(&ftr_fixup_test_FW_FTR_macros,
&ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
check(memcmp(ftr_fixup_test_FW_FTR_macros,
ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
#endif
}
static void test_lwsync_macros(void)
{
extern u8 lwsync_fixup_test;
extern u8 end_lwsync_fixup_test;
extern u8 lwsync_fixup_test_expected_LWSYNC;
extern u8 lwsync_fixup_test_expected_SYNC;
unsigned long size = &end_lwsync_fixup_test -
&lwsync_fixup_test;
extern u8 lwsync_fixup_test[];
extern u8 end_lwsync_fixup_test[];
extern u8 lwsync_fixup_test_expected_LWSYNC[];
extern u8 lwsync_fixup_test_expected_SYNC[];
unsigned long size = end_lwsync_fixup_test -
lwsync_fixup_test;
/* The fixups have already been done for us during boot */
if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
check(memcmp(&lwsync_fixup_test,
&lwsync_fixup_test_expected_LWSYNC, size) == 0);
check(memcmp(lwsync_fixup_test,
lwsync_fixup_test_expected_LWSYNC, size) == 0);
} else {
check(memcmp(&lwsync_fixup_test,
&lwsync_fixup_test_expected_SYNC, size) == 0);
check(memcmp(lwsync_fixup_test,
lwsync_fixup_test_expected_SYNC, size) == 0);
}
}

View File

@ -34,16 +34,9 @@
/*
* Top of mmap area (just below the process stack).
*
* Leave at least a ~128 MB hole on 32bit applications.
*
* On 64bit applications we randomise the stack by 1GB so we need to
* space our mmap start address by a further 1GB, otherwise there is a
* chance the mmap area will end up closer to the stack than our ulimit
* requires.
* Leave at least a ~128 MB hole.
*/
#define MIN_GAP32 (128*1024*1024)
#define MIN_GAP64 ((128 + 1024)*1024*1024UL)
#define MIN_GAP ((is_32bit_task()) ? MIN_GAP32 : MIN_GAP64)
#define MIN_GAP (128*1024*1024)
#define MAX_GAP (TASK_SIZE/6*5)
static inline int mmap_is_legacy(void)
@ -71,9 +64,26 @@ unsigned long arch_mmap_rnd(void)
return rnd << PAGE_SHIFT;
}
static inline unsigned long stack_maxrandom_size(void)
{
if (!(current->flags & PF_RANDOMIZE))
return 0;
/* 8MB for 32bit, 1GB for 64bit */
if (is_32bit_task())
return (1<<23);
else
return (1<<30);
}
static inline unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
/* Values close to RLIM_INFINITY can overflow. */
if (gap + pad > gap)
gap += pad;
if (gap < MIN_GAP)
gap = MIN_GAP;

View File

@ -246,6 +246,7 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(lowcore_ptr);
VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
}
void machine_shutdown(void)

View File

@ -496,11 +496,6 @@ static void __init setup_memory_end(void)
pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
}
static void __init setup_vmcoreinfo(void)
{
mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
}
#ifdef CONFIG_CRASH_DUMP
/*
@ -939,7 +934,6 @@ void __init setup_arch(char **cmdline_p)
#endif
setup_resources();
setup_vmcoreinfo();
setup_lowcore();
smp_fill_possible_mask();
cpu_detect_mhz_feature();

View File

@ -48,6 +48,7 @@ do { \
"i" (__FILE__), \
"i" (__LINE__), "i" (0), \
"i" (sizeof(struct bug_entry))); \
unreachable(); \
} while (0)
#define __WARN_FLAGS(flags) \

View File

@ -19,6 +19,7 @@ static __always_inline void boot_init_stack_canary(void)
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
canary ^= LINUX_VERSION_CODE;
canary &= CANARY_MASK;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;

View File

@ -234,7 +234,7 @@ static void sh64_icache_inv_current_user_range(unsigned long start, unsigned lon
#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
static inline void sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
{
/* Purge all ways in a particular block of sets, specified by the base
set number and number of sets. Can handle wrap-around, if that's

View File

@ -7,6 +7,7 @@ void nmi_adjust_hz(unsigned int new_hz);
extern atomic_t nmi_active;
void arch_touch_nmi_watchdog(void);
void start_nmi_watchdog(void *unused);
void stop_nmi_watchdog(void *unused);

View File

@ -205,7 +205,7 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
handle_size = (sizeof(struct mdesc_handle) -
sizeof(struct mdesc_hdr) +
mdesc_size);
base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_REPEAT);
base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!base)
return NULL;

View File

@ -51,7 +51,7 @@ static DEFINE_PER_CPU(unsigned int, last_irq_sum);
static DEFINE_PER_CPU(long, alert_counter);
static DEFINE_PER_CPU(int, nmi_touch);
void touch_nmi_watchdog(void)
void arch_touch_nmi_watchdog(void)
{
if (atomic_read(&nmi_active)) {
int cpu;
@ -61,10 +61,8 @@ void touch_nmi_watchdog(void)
per_cpu(nmi_touch, cpu) = 1;
}
}
touch_softlockup_watchdog();
}
EXPORT_SYMBOL(touch_nmi_watchdog);
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
{

View File

@ -50,6 +50,7 @@ config X86
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_MMIO_FLUSH
@ -162,6 +163,7 @@ config X86
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI
select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API

View File

@ -411,3 +411,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
debug_putstr("done.\nBooting the kernel.\n");
return output;
}
void fortify_panic(const char *name)
{
error("detected buffer overflow");
}

View File

@ -33,7 +33,7 @@
#ifdef CONFIG_X86_32
extern unsigned long asmlinkage efi_call_phys(void *, ...);
extern asmlinkage unsigned long efi_call_phys(void *, ...);
#define arch_efi_call_virt_setup() kernel_fpu_begin()
#define arch_efi_call_virt_teardown() kernel_fpu_end()
@ -52,7 +52,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
#define EFI_LOADER_SIGNATURE "EL64"
extern u64 asmlinkage efi_call(void *fp, ...);
extern asmlinkage u64 efi_call(void *fp, ...);
#define efi_call_phys(f, args...) efi_call((f), args)

View File

@ -74,6 +74,7 @@ static __always_inline void boot_init_stack_canary(void)
get_random_bytes(&canary, sizeof(canary));
tsc = rdtsc();
canary += tsc + (tsc << 32UL);
canary &= CANARY_MASK;
current->stack_canary = canary;
#ifdef CONFIG_X86_64

View File

@ -142,7 +142,9 @@ static __always_inline void *__constant_memcpy(void *to, const void *from,
}
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, size_t);
#ifndef CONFIG_FORTIFY_SOURCE
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
@ -195,11 +197,15 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
#endif
#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMMOVE
void *memmove(void *dest, const void *src, size_t n);
extern int memcmp(const void *, const void *, size_t);
#ifndef CONFIG_FORTIFY_SOURCE
#define memcmp __builtin_memcmp
#endif
#define __HAVE_ARCH_MEMCHR
extern void *memchr(const void *cs, int c, size_t count);
@ -321,6 +327,8 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
: __memset_generic((s), (c), (count)))
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, size_t);
#ifndef CONFIG_FORTIFY_SOURCE
#if (__GNUC__ >= 4)
#define memset(s, c, count) __builtin_memset(s, c, count)
#else
@ -330,6 +338,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
(count)) \
: __memset((s), (c), (count)))
#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
/*
* find the first occurrence of byte 'c', or 1 past the area if none

View File

@ -31,6 +31,7 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
extern void *memcpy(void *to, const void *from, size_t len);
extern void *__memcpy(void *to, const void *from, size_t len);
#ifndef CONFIG_FORTIFY_SOURCE
#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
#define memcpy(dst, src, len) \
@ -51,6 +52,7 @@ extern void *__memcpy(void *to, const void *from, size_t len);
*/
#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);
@ -77,6 +79,11 @@ int strcmp(const char *cs, const char *ct);
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
#ifndef __NO_FORTIFY
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
#endif
#endif
#define __HAVE_ARCH_MEMCPY_MCSAFE 1

View File

@ -19,7 +19,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#ifdef CONFIG_HARDLOCKUP_DETECTOR
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
u64 hw_nmi_get_sample_period(int watchdog_thresh)
{
return (u64)(cpu_khz) * 1000 * watchdog_thresh;

View File

@ -457,7 +457,7 @@ static int prepare_elf64_headers(struct crash_elf_data *ced,
bufp += sizeof(Elf64_Phdr);
phdr->p_type = PT_NOTE;
phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note);
phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
(ehdr->e_phnum)++;
#ifdef CONFIG_X86_64

View File

@ -6,7 +6,7 @@
__visible void *memcpy(void *to, const void *from, size_t n)
{
#ifdef CONFIG_X86_USE_3DNOW
#if defined(CONFIG_X86_USE_3DNOW) && !defined(CONFIG_FORTIFY_SOURCE)
return __memcpy3d(to, from, n);
#else
return __memcpy(to, from, n);

View File

@ -92,13 +92,18 @@ unsigned long arch_mmap_rnd(void)
static unsigned long mmap_base(unsigned long rnd, unsigned long task_size)
{
unsigned long gap = rlimit(RLIMIT_STACK);
unsigned long pad = stack_maxrandom_size(task_size) + stack_guard_gap;
unsigned long gap_min, gap_max;
/* Values close to RLIM_INFINITY can overflow. */
if (gap + pad > gap)
gap += pad;
/*
* Top of mmap area (just below the process stack).
* Leave an at least ~128 MB hole with possible stack randomization.
*/
gap_min = SIZE_128M + stack_maxrandom_size(task_size);
gap_min = SIZE_128M;
gap_max = (task_size / 6) * 5;
if (gap < gap_min)

View File

@ -2693,8 +2693,8 @@ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
phys_addr_t paddr_vmcoreinfo_note(void)
{
if (xen_pv_domain())
return virt_to_machine(&vmcoreinfo_note).maddr;
return virt_to_machine(vmcoreinfo_note).maddr;
else
return __pa_symbol(&vmcoreinfo_note);
return __pa(vmcoreinfo_note);
}
#endif /* CONFIG_KEXEC_CORE */

View File

@ -987,6 +987,11 @@ void add_device_randomness(const void *buf, unsigned int size)
unsigned long time = random_get_entropy() ^ jiffies;
unsigned long flags;
if (!crng_ready()) {
crng_fast_load(buf, size);
return;
}
trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(&input_pool, buf, size);

View File

@ -17,6 +17,7 @@ cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
-D__NO_FORTIFY \
$(call cc-option,-ffreestanding) \
$(call cc-option,-fno-stack-protector)

View File

@ -2434,8 +2434,9 @@ rebuild_st:
* again with !__GFP_NORETRY. However, we still
* want to fail this allocation rather than
* trigger the out-of-memory killer and for
* this we want the future __GFP_MAYFAIL.
* this we want __GFP_RETRY_MAYFAIL.
*/
gfp |= __GFP_RETRY_MAYFAIL;
}
} while (1);

View File

@ -995,7 +995,9 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
free_rd_atomic_resource(qp, res);
rxe_advance_resp_resource(qp);
memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
sizeof(skb->cb) - sizeof(ack_pkt));
res->type = RXE_ATOMIC_MASK;
res->atomic.skb = skb;

View File

@ -1386,7 +1386,7 @@ static void wbsd_request_dma(struct wbsd_host *host, int dma)
* order for ISA to be able to DMA to it.
*/
host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
GFP_NOIO | GFP_DMA | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!host->dma_buffer)
goto free;

View File

@ -98,7 +98,7 @@ vmcp_write(struct file *file, const char __user *buff, size_t count,
}
if (!session->response)
session->response = (char *)__get_free_pages(GFP_KERNEL
| __GFP_REPEAT | GFP_DMA,
| __GFP_RETRY_MAYFAIL | GFP_DMA,
get_order(session->bufsize));
if (!session->response) {
mutex_unlock(&session->mutex);

View File

@ -1115,7 +1115,7 @@ static const struct net_device_ops ctcm_mpc_netdev_ops = {
.ndo_start_xmit = ctcmpc_tx,
};
void static ctcm_dev_setup(struct net_device *dev)
static void ctcm_dev_setup(struct net_device *dev)
{
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 100;

View File

@ -2408,7 +2408,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return rc;
}
int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
inline int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
{
int cast_type = RTN_UNSPEC;
struct neighbour *n = NULL;

View File

@ -252,7 +252,7 @@ int transport_alloc_session_tags(struct se_session *se_sess,
int rc;
se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
if (!se_sess->sess_cmd_map) {
se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
if (!se_sess->sess_cmd_map) {

View File

@ -377,7 +377,7 @@ static struct ioc3_port *get_ioc3_port(struct uart_port *the_port)
* called per port from attach...
* @port: port to initialize
*/
static int inline port_init(struct ioc3_port *port)
static inline int port_init(struct ioc3_port *port)
{
uint32_t sio_cr;
struct port_hooks *hooks = port->ip_hooks;
@ -1430,7 +1430,7 @@ static int receive_chars(struct uart_port *the_port)
* @pending: interrupts to handle
*/
static int inline
static inline int
ioc3uart_intr_one(struct ioc3_submodule *is,
struct ioc3_driver_data *idd,
unsigned int pending)

View File

@ -824,7 +824,7 @@ pending_intrs(struct ioc4_soft *soft, int type)
* called per port from attach...
* @port: port to initialize
*/
static int inline port_init(struct ioc4_port *port)
static inline int port_init(struct ioc4_port *port)
{
uint32_t sio_cr;
struct hooks *hooks = port->ip_hooks;
@ -1048,7 +1048,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
* IOC4 with serial ports in the system.
* @idd: Master module data for this IOC4
*/
static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
static inline int ioc4_attach_local(struct ioc4_driver_data *idd)
{
struct ioc4_port *port;
struct ioc4_port *ports[IOC4_NUM_SERIAL_PORTS];

View File

@ -180,7 +180,7 @@ static const __u16 crc10_table[256] = {
* Perform a memcpy and calculate fcs using ppp 10bit CRC algorithm. Return
* new 10 bit FCS.
*/
static __u16 __inline__ fcs_compute10(unsigned char *sp, int len, __u16 fcs)
static inline __u16 fcs_compute10(unsigned char *sp, int len, __u16 fcs)
{
for (; len-- > 0; fcs = CRC10_FCS(fcs, *sp++));
return fcs;

View File

@ -897,7 +897,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
struct sk_buff **queue;
int i;
n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_REPEAT);
n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!n)
return -ENOMEM;
vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);

View File

@ -1404,7 +1404,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
struct vhost_virtqueue **vqs;
int r = -ENOMEM, i;
vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
if (!vs) {
vs = vzalloc(sizeof(*vs));
if (!vs)

View File

@ -508,7 +508,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
/* This struct is large and allocation could fail, fall back to vmalloc
* if there is no other way.
*/
vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_REPEAT);
vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!vsock)
return -ENOMEM;

View File

@ -907,7 +907,7 @@ static void intelfb_pci_unregister(struct pci_dev *pdev)
* helper functions *
***************************************************************/
int __inline__ intelfb_var_to_depth(const struct fb_var_screeninfo *var)
__inline__ int intelfb_var_to_depth(const struct fb_var_screeninfo *var)
{
DBG_MSG("intelfb_var_to_depth: bpp: %d, green.length is %d\n",
var->bits_per_pixel, var->green.length);

View File

@ -79,12 +79,12 @@ static struct omap_lcd_controller {
unsigned long vram_size;
} lcdc;
static void inline enable_irqs(int mask)
static inline void enable_irqs(int mask)
{
lcdc.irq_mask |= mask;
}
static void inline disable_irqs(int mask)
static inline void disable_irqs(int mask)
{
lcdc.irq_mask &= ~mask;
}
@ -466,7 +466,7 @@ static void calc_ck_div(int is_tft, int pck, int *pck_div)
}
}
static void inline setup_regs(void)
static inline void setup_regs(void)
{
u32 l;
struct lcd_panel *panel = lcdc.fbdev->panel;

View File

@ -80,7 +80,6 @@ config EXPORTFS_BLOCK_OPS
config FILE_LOCKING
bool "Enable POSIX file locking API" if EXPERT
default y
select PERCPU_RWSEM
help
This option enables standard file locking support, required
for filesystems like NFS and for the flock() system

View File

@ -419,7 +419,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
if (i_sblock > info->si_blocks ||
i_eblock > info->si_blocks ||
i_sblock > i_eblock ||
i_eoff > s_size ||
(i_eoff != le32_to_cpu(-1) && i_eoff > s_size) ||
i_sblock * BFS_BSIZE > i_eoff) {
printf("Inode 0x%08x corrupted\n", i);

View File

@ -960,10 +960,14 @@ static void ep_show_fdinfo(struct seq_file *m, struct file *f)
mutex_lock(&ep->mtx);
for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
struct inode *inode = file_inode(epi->ffd.file);
seq_printf(m, "tfd: %8d events: %8x data: %16llx\n",
seq_printf(m, "tfd: %8d events: %8x data: %16llx "
" pos:%lli ino:%lx sdev:%x\n",
epi->ffd.fd, epi->event.events,
(long long)epi->event.data);
(long long)epi->event.data,
(long long)epi->ffd.file->f_pos,
inode->i_ino, inode->i_sb->s_dev);
if (seq_has_overflowed(m))
break;
}
@ -1073,6 +1077,50 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
return epir;
}
#ifdef CONFIG_CHECKPOINT_RESTORE
static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)
{
struct rb_node *rbp;
struct epitem *epi;
for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn);
if (epi->ffd.fd == tfd) {
if (toff == 0)
return epi;
else
toff--;
}
cond_resched();
}
return NULL;
}
struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
unsigned long toff)
{
struct file *file_raw;
struct eventpoll *ep;
struct epitem *epi;
if (!is_file_epoll(file))
return ERR_PTR(-EINVAL);
ep = file->private_data;
mutex_lock(&ep->mtx);
epi = ep_find_tfd(ep, tfd, toff);
if (epi)
file_raw = epi->ffd.file;
else
file_raw = ERR_PTR(-ENOENT);
mutex_unlock(&ep->mtx);
return file_raw;
}
#endif /* CONFIG_CHECKPOINT_RESTORE */
/*
* This is the callback that is passed to the wait queue wakeup
* mechanism. It is called by the stored file descriptors when they

View File

@ -380,8 +380,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
struct page *page = radix_tree_deref_slot_protected(slot,
&mapping->tree_lock);
if (likely(page) && PageDirty(page)) {
__dec_wb_stat(old_wb, WB_RECLAIMABLE);
__inc_wb_stat(new_wb, WB_RECLAIMABLE);
dec_wb_stat(old_wb, WB_RECLAIMABLE);
inc_wb_stat(new_wb, WB_RECLAIMABLE);
}
}
@ -391,8 +391,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
&mapping->tree_lock);
if (likely(page)) {
WARN_ON_ONCE(!PageWriteback(page));
__dec_wb_stat(old_wb, WB_WRITEBACK);
__inc_wb_stat(new_wb, WB_WRITEBACK);
dec_wb_stat(old_wb, WB_WRITEBACK);
inc_wb_stat(new_wb, WB_WRITEBACK);
}
}

View File

@ -1355,6 +1355,53 @@ static const struct file_operations proc_fault_inject_operations = {
.write = proc_fault_inject_write,
.llseek = generic_file_llseek,
};
static ssize_t proc_fail_nth_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task;
int err, n;
task = get_proc_task(file_inode(file));
if (!task)
return -ESRCH;
put_task_struct(task);
if (task != current)
return -EPERM;
err = kstrtoint_from_user(buf, count, 10, &n);
if (err)
return err;
if (n < 0 || n == INT_MAX)
return -EINVAL;
current->fail_nth = n + 1;
return count;
}
static ssize_t proc_fail_nth_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task;
int err;
task = get_proc_task(file_inode(file));
if (!task)
return -ESRCH;
put_task_struct(task);
if (task != current)
return -EPERM;
if (count < 1)
return -EINVAL;
err = put_user((char)(current->fail_nth ? 'N' : 'Y'), buf);
if (err)
return err;
current->fail_nth = 0;
return 1;
}
static const struct file_operations proc_fail_nth_operations = {
.read = proc_fail_nth_read,
.write = proc_fail_nth_write,
};
#endif
@ -3311,6 +3358,11 @@ static const struct pid_entry tid_base_stuff[] = {
#endif
#ifdef CONFIG_FAULT_INJECTION
REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
/*
* Operations on the file check that the task is current,
* so we create it with 0666 to support testing under unprivileged user.
*/
REG("fail-nth", 0666, proc_fail_nth_operations),
#endif
#ifdef CONFIG_TASK_IO_ACCOUNTING
ONE("io", S_IRUSR, proc_tid_io_accounting),

View File

@ -1078,16 +1078,30 @@ static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...)
return -EINVAL;
}
static int sysctl_check_table_array(const char *path, struct ctl_table *table)
{
int err = 0;
if ((table->proc_handler == proc_douintvec) ||
(table->proc_handler == proc_douintvec_minmax)) {
if (table->maxlen != sizeof(unsigned int))
err |= sysctl_err(path, table, "array now allowed");
}
return err;
}
static int sysctl_check_table(const char *path, struct ctl_table *table)
{
int err = 0;
for (; table->procname; table++) {
if (table->child)
err = sysctl_err(path, table, "Not a file");
err |= sysctl_err(path, table, "Not a file");
if ((table->proc_handler == proc_dostring) ||
(table->proc_handler == proc_dointvec) ||
(table->proc_handler == proc_douintvec) ||
(table->proc_handler == proc_douintvec_minmax) ||
(table->proc_handler == proc_dointvec_minmax) ||
(table->proc_handler == proc_dointvec_jiffies) ||
(table->proc_handler == proc_dointvec_userhz_jiffies) ||
@ -1095,15 +1109,17 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
(table->proc_handler == proc_doulongvec_minmax) ||
(table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) {
if (!table->data)
err = sysctl_err(path, table, "No data");
err |= sysctl_err(path, table, "No data");
if (!table->maxlen)
err = sysctl_err(path, table, "No maxlen");
err |= sysctl_err(path, table, "No maxlen");
else
err |= sysctl_check_table_array(path, table);
}
if (!table->proc_handler)
err = sysctl_err(path, table, "No proc_handler");
err |= sysctl_err(path, table, "No proc_handler");
if ((table->mode & (S_IRUGO|S_IWUGO)) != table->mode)
err = sysctl_err(path, table, "bogus .mode 0%o",
err |= sysctl_err(path, table, "bogus .mode 0%o",
table->mode);
}
return err;

View File

@ -54,6 +54,16 @@ kmem_flags_convert(xfs_km_flags_t flags)
lflags &= ~__GFP_FS;
}
/*
* Default page/slab allocator behavior is to retry for ever
* for small allocations. We can override this behavior by using
* __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
* as it is feasible but rather fail than retry forever for all
* request sizes.
*/
if (flags & KM_MAYFAIL)
lflags |= __GFP_RETRY_MAYFAIL;
if (flags & KM_ZERO)
lflags |= __GFP_ZERO;

View File

@ -69,34 +69,14 @@ static inline void __add_wb_stat(struct bdi_writeback *wb,
percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
}
static inline void __inc_wb_stat(struct bdi_writeback *wb,
enum wb_stat_item item)
static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
__add_wb_stat(wb, item, 1);
}
static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
__inc_wb_stat(wb, item);
local_irq_restore(flags);
}
static inline void __dec_wb_stat(struct bdi_writeback *wb,
enum wb_stat_item item)
{
__add_wb_stat(wb, item, -1);
}
static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
__dec_wb_stat(wb, item);
local_irq_restore(flags);
__add_wb_stat(wb, item, -1);
}
static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)

View File

@ -19,7 +19,7 @@
CRASH_CORE_NOTE_NAME_BYTES + \
CRASH_CORE_NOTE_DESC_BYTES)
#define VMCOREINFO_BYTES (4096)
#define VMCOREINFO_BYTES PAGE_SIZE
#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
#define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
@ -28,6 +28,7 @@
typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4];
void crash_update_vmcoreinfo_safecopy(void *ptr);
void crash_save_vmcoreinfo(void);
void arch_crash_save_vmcoreinfo(void);
__printf(1, 2)
@ -56,9 +57,7 @@ phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_CONFIG(name) \
vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
extern size_t vmcoreinfo_size;
extern size_t vmcoreinfo_max_size;
extern u32 *vmcoreinfo_note;
Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void *data, size_t data_len);

View File

@ -592,8 +592,8 @@ static inline struct inode *d_real_inode(const struct dentry *dentry)
}
struct name_snapshot {
const char *name;
char inline_name[DNAME_INLINE_LEN];
const unsigned char *name;
unsigned char inline_name[DNAME_INLINE_LEN];
};
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
void release_dentry_name_snapshot(struct name_snapshot *);

View File

@ -14,6 +14,7 @@
#define _LINUX_EVENTPOLL_H
#include <uapi/linux/eventpoll.h>
#include <uapi/linux/kcmp.h>
/* Forward declarations to avoid compiler errors */
@ -22,6 +23,10 @@ struct file;
#ifdef CONFIG_EPOLL
#ifdef CONFIG_CHECKPOINT_RESTORE
struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff);
#endif
/* Used to initialize the epoll bits inside the "struct file" */
static inline void eventpoll_init_file(struct file *file)
{

View File

@ -25,7 +25,7 @@ struct vm_area_struct;
#define ___GFP_FS 0x80u
#define ___GFP_COLD 0x100u
#define ___GFP_NOWARN 0x200u
#define ___GFP_REPEAT 0x400u
#define ___GFP_RETRY_MAYFAIL 0x400u
#define ___GFP_NOFAIL 0x800u
#define ___GFP_NORETRY 0x1000u
#define ___GFP_MEMALLOC 0x2000u
@ -136,26 +136,56 @@ struct vm_area_struct;
*
* __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
* __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
* _might_ fail. This depends upon the particular VM implementation.
* The default allocator behavior depends on the request size. We have a concept
* of so called costly allocations (with order > PAGE_ALLOC_COSTLY_ORDER).
* !costly allocations are too essential to fail so they are implicitly
* non-failing by default (with some exceptions like OOM victims might fail so
* the caller still has to check for failures) while costly requests try to be
* not disruptive and back off even without invoking the OOM killer.
* The following three modifiers might be used to override some of these
* implicit rules
*
* __GFP_NORETRY: The VM implementation will try only very lightweight
* memory direct reclaim to get some memory under memory pressure (thus
* it can sleep). It will avoid disruptive actions like OOM killer. The
* caller must handle the failure which is quite likely to happen under
* heavy memory pressure. The flag is suitable when failure can easily be
* handled at small cost, such as reduced throughput
*
* __GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
* procedures that have previously failed if there is some indication
* that progress has been made else where. It can wait for other
* tasks to attempt high level approaches to freeing memory such as
* compaction (which removes fragmentation) and page-out.
* There is still a definite limit to the number of retries, but it is
* a larger limit than with __GFP_NORETRY.
* Allocations with this flag may fail, but only when there is
* genuinely little unused memory. While these allocations do not
* directly trigger the OOM killer, their failure indicates that
* the system is likely to need to use the OOM killer soon. The
* caller must handle failure, but can reasonably do so by failing
* a higher-level request, or completing it only in a much less
* efficient manner.
* If the allocation does fail, and the caller is in a position to
* free some non-essential memory, doing so could benefit the system
* as a whole.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
* cannot handle allocation failures. New users should be evaluated carefully
* (and the flag should be used only when there is no reasonable failure
* policy) but it is definitely preferable to use the flag rather than
* opencode endless loop around allocator.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely and will
* return NULL when direct reclaim and memory compaction have failed to allow
* the allocation to succeed. The OOM killer is not called with the current
* implementation.
* cannot handle allocation failures. The allocation could block
* indefinitely but will never return with failure. Testing for
* failure is pointless.
* New users should be evaluated carefully (and the flag should be
* used only when there is no reasonable failure policy) but it is
* definitely preferable to use the flag rather than opencode endless
* loop around allocator.
* Using this flag for costly allocations is _highly_ discouraged.
*/
#define __GFP_IO ((__force gfp_t)___GFP_IO)
#define __GFP_FS ((__force gfp_t)___GFP_FS)
#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)

View File

@ -20,6 +20,9 @@ struct kern_ipc_perm {
umode_t mode;
unsigned long seq;
void *security;
struct rcu_head rcu;
atomic_t refcount;
} ____cacheline_aligned_in_smp;
#endif /* _LINUX_IPC_H */

View File

@ -11,6 +11,7 @@
#include <linux/log2.h>
#include <linux/typecheck.h>
#include <linux/printk.h>
#include <linux/build_bug.h>
#include <asm/byteorder.h>
#include <uapi/linux/kernel.h>
@ -854,9 +855,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @member: the name of the member within the struct.
*
*/
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
#define container_of(ptr, type, member) ({ \
void *__mptr = (void *)(ptr); \
BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
!__same_type(*(ptr), void), \
"pointer type mismatch in container_of()"); \
((type *)(__mptr - offsetof(type, member))); })
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD

View File

@ -172,6 +172,7 @@ struct kimage {
unsigned long start;
struct page *control_code_page;
struct page *swap_page;
void *vmcoreinfo_data_copy; /* locates in the crash memory */
unsigned long nr_segments;
struct kexec_segment segment[KEXEC_SEGMENT_MAX];
@ -241,6 +242,7 @@ extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
int kexec_crash_loaded(void);
void crash_save_cpu(struct pt_regs *regs, int cpu);
extern int kimage_crash_copy_vmcoreinfo(struct kimage *image);
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;

View File

@ -34,7 +34,7 @@ extern char *migrate_reason_names[MR_TYPES];
static inline struct page *new_page_nodemask(struct page *page,
int preferred_nid, nodemask_t *nodemask)
{
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
if (PageHuge(page))
return alloc_huge_page_nodemask(page_hstate(compound_head(page)),

View File

@ -6,18 +6,26 @@
#include <linux/sched.h>
#include <asm/irq.h>
#if defined(CONFIG_HAVE_NMI_WATCHDOG)
#include <asm/nmi.h>
#endif
#ifdef CONFIG_LOCKUP_DETECTOR
void lockup_detector_init(void);
#else
static inline void lockup_detector_init(void)
{
}
#endif
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
extern void touch_softlockup_watchdog_sched(void);
extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos);
extern unsigned int softlockup_panic;
extern unsigned int hardlockup_panic;
void lockup_detector_init(void);
extern int soft_watchdog_enabled;
extern atomic_t watchdog_park_in_progress;
#else
static inline void touch_softlockup_watchdog_sched(void)
{
@ -31,9 +39,6 @@ static inline void touch_softlockup_watchdog_sync(void)
static inline void touch_all_softlockup_watchdogs(void)
{
}
static inline void lockup_detector_init(void)
{
}
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
@ -61,6 +66,21 @@ static inline void reset_hung_task_detector(void)
#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void hardlockup_detector_disable(void);
extern unsigned int hardlockup_panic;
#else
static inline void hardlockup_detector_disable(void) {}
#endif
#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
extern void arch_touch_nmi_watchdog(void);
#else
#if !defined(CONFIG_HAVE_NMI_WATCHDOG)
static inline void arch_touch_nmi_watchdog(void) {}
#endif
#endif
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
*
@ -68,21 +88,11 @@ static inline void reset_hung_task_detector(void)
* may be used to reset the timeout - for code which intentionally
* disables interrupts for a long time. This call is stateless.
*/
#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
#include <asm/nmi.h>
extern void touch_nmi_watchdog(void);
#else
static inline void touch_nmi_watchdog(void)
{
arch_touch_nmi_watchdog();
touch_softlockup_watchdog();
}
#endif
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void hardlockup_detector_disable(void);
#else
static inline void hardlockup_detector_disable(void) {}
#endif
/*
* Create trigger_all_cpu_backtrace() out of the arch-provided
@ -139,15 +149,18 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
}
#endif
#ifdef CONFIG_LOCKUP_DETECTOR
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
u64 hw_nmi_get_sample_period(int watchdog_thresh);
#endif
#ifdef CONFIG_LOCKUP_DETECTOR
extern int nmi_watchdog_enabled;
extern int soft_watchdog_enabled;
extern int watchdog_user_enabled;
extern int watchdog_thresh;
extern unsigned long watchdog_enabled;
extern struct cpumask watchdog_cpumask;
extern unsigned long *watchdog_cpumask_bits;
extern atomic_t watchdog_park_in_progress;
extern int __read_mostly watchdog_suspended;
#ifdef CONFIG_SMP
extern int sysctl_softlockup_all_cpu_backtrace;
extern int sysctl_hardlockup_all_cpu_backtrace;

View File

@ -31,7 +31,7 @@ struct s3c2410_hcd_info {
void (*report_oc)(struct s3c2410_hcd_info *, int ports);
};
static void inline s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports)
static inline void s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports)
{
if (info->report_oc != NULL) {
(info->report_oc)(info, ports);

View File

@ -57,6 +57,27 @@ static inline unsigned long get_random_long(void)
#endif
}
/*
* On 64-bit architectures, protect against non-terminated C string overflows
* by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
*/
#ifdef CONFIG_64BIT
# ifdef __LITTLE_ENDIAN
# define CANARY_MASK 0xffffffffffffff00UL
# else /* big endian, 64 bits: */
# define CANARY_MASK 0x00ffffffffffffffUL
# endif
#else /* 32 bits: */
# define CANARY_MASK 0xffffffffUL
#endif
static inline unsigned long get_random_canary(void)
{
unsigned long val = get_random_long();
return val & CANARY_MASK;
}
unsigned long randomize_page(unsigned long start, unsigned long range);
u32 prandom_u32(void);

View File

@ -974,6 +974,7 @@ struct task_struct {
#ifdef CONFIG_FAULT_INJECTION
int make_it_fail;
int fail_nth;
#endif
/*
* When (nr_dirtied >= nr_dirtied_pause), it's time to call

View File

@ -8,11 +8,29 @@
struct task_struct;
/* One semaphore structure for each semaphore in the system. */
struct sem {
int semval; /* current value */
/*
* PID of the process that last modified the semaphore. For
* Linux, specifically these are:
* - semop
* - semctl, via SETVAL and SETALL.
* - at task exit when performing undo adjustments (see exit_sem).
*/
int sempid;
spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head pending_alter; /* pending single-sop operations */
/* that alter the semaphore */
struct list_head pending_const; /* pending single-sop operations */
/* that do not alter the semaphore*/
time_t sem_otime; /* candidate for sem_otime */
} ____cacheline_aligned_in_smp;
/* One sem_array data structure for each set of semaphores in the system. */
struct sem_array {
struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
time_t sem_ctime; /* last change time */
struct sem *sem_base; /* ptr to first semaphore in array */
time_t sem_ctime; /* create/last semctl() time */
struct list_head pending_alter; /* pending operations */
/* that alter the array */
struct list_head pending_const; /* pending complex operations */
@ -21,6 +39,8 @@ struct sem_array {
int sem_nsems; /* no. of semaphores in array */
int complex_count; /* pending complex operations */
unsigned int use_global_lock;/* >0: global lock required */
struct sem sems[];
};
#ifdef CONFIG_SYSVIPC

View File

@ -471,7 +471,8 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
*
* %__GFP_NOWARN - If allocation fails, don't issue any warnings.
*
* %__GFP_REPEAT - If allocation fails initially, try once more before failing.
* %__GFP_RETRY_MAYFAIL - Try really hard to succeed the allocation but fail
* eventually.
*
* There are other flags available as well, but these are not intended
* for general use, and so are not documented here. For a full list of

View File

@ -193,4 +193,204 @@ static inline const char *kbasename(const char *path)
return tail ? tail + 1 : path;
}
#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline))
#define __RENAME(x) __asm__(#x)
void fortify_panic(const char *name) __noreturn __cold;
void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter");
void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter");
void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
__FORTIFY_INLINE char *strcpy(char *p, const char *q)
{
size_t p_size = __builtin_object_size(p, 0);
size_t q_size = __builtin_object_size(q, 0);
if (p_size == (size_t)-1 && q_size == (size_t)-1)
return __builtin_strcpy(p, q);
if (strscpy(p, q, p_size < q_size ? p_size : q_size) < 0)
fortify_panic(__func__);
return p;
}
__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
if (__builtin_constant_p(size) && p_size < size)
__write_overflow();
if (p_size < size)
fortify_panic(__func__);
return __builtin_strncpy(p, q, size);
}
__FORTIFY_INLINE char *strcat(char *p, const char *q)
{
size_t p_size = __builtin_object_size(p, 0);
if (p_size == (size_t)-1)
return __builtin_strcat(p, q);
if (strlcat(p, q, p_size) >= p_size)
fortify_panic(__func__);
return p;
}
__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
{
__kernel_size_t ret;
size_t p_size = __builtin_object_size(p, 0);
if (p_size == (size_t)-1)
return __builtin_strlen(p);
ret = strnlen(p, p_size);
if (p_size <= ret)
fortify_panic(__func__);
return ret;
}
extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
{
size_t p_size = __builtin_object_size(p, 0);
__kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
if (p_size <= ret && maxlen != ret)
fortify_panic(__func__);
return ret;
}
/* defined after fortified strlen to reuse it */
extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
{
size_t ret;
size_t p_size = __builtin_object_size(p, 0);
size_t q_size = __builtin_object_size(q, 0);
if (p_size == (size_t)-1 && q_size == (size_t)-1)
return __real_strlcpy(p, q, size);
ret = strlen(q);
if (size) {
size_t len = (ret >= size) ? size - 1 : ret;
if (__builtin_constant_p(len) && len >= p_size)
__write_overflow();
if (len >= p_size)
fortify_panic(__func__);
__builtin_memcpy(p, q, len);
p[len] = '\0';
}
return ret;
}
/* defined after fortified strlen and strnlen to reuse them */
__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
{
size_t p_len, copy_len;
size_t p_size = __builtin_object_size(p, 0);
size_t q_size = __builtin_object_size(q, 0);
if (p_size == (size_t)-1 && q_size == (size_t)-1)
return __builtin_strncat(p, q, count);
p_len = strlen(p);
copy_len = strnlen(q, count);
if (p_size < p_len + copy_len + 1)
fortify_panic(__func__);
__builtin_memcpy(p + p_len, q, copy_len);
p[p_len + copy_len] = '\0';
return p;
}
__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
if (__builtin_constant_p(size) && p_size < size)
__write_overflow();
if (p_size < size)
fortify_panic(__func__);
return __builtin_memset(p, c, size);
}
__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
size_t q_size = __builtin_object_size(q, 0);
if (__builtin_constant_p(size)) {
if (p_size < size)
__write_overflow();
if (q_size < size)
__read_overflow2();
}
if (p_size < size || q_size < size)
fortify_panic(__func__);
return __builtin_memcpy(p, q, size);
}
__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
size_t q_size = __builtin_object_size(q, 0);
if (__builtin_constant_p(size)) {
if (p_size < size)
__write_overflow();
if (q_size < size)
__read_overflow2();
}
if (p_size < size || q_size < size)
fortify_panic(__func__);
return __builtin_memmove(p, q, size);
}
extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
if (__builtin_constant_p(size) && p_size < size)
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
return __real_memscan(p, c, size);
}
__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
size_t q_size = __builtin_object_size(q, 0);
if (__builtin_constant_p(size)) {
if (p_size < size)
__read_overflow();
if (q_size < size)
__read_overflow2();
}
if (p_size < size || q_size < size)
fortify_panic(__func__);
return __builtin_memcmp(p, q, size);
}
__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
if (__builtin_constant_p(size) && p_size < size)
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
return __builtin_memchr(p, c, size);
}
void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
{
size_t p_size = __builtin_object_size(p, 0);
if (__builtin_constant_p(size) && p_size < size)
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
return __real_memchr_inv(p, c, size);
}
extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
{
size_t p_size = __builtin_object_size(p, 0);
if (__builtin_constant_p(size) && p_size < size)
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
return __real_kmemdup(p, size, gfp);
}
#endif
#endif /* _LINUX_STRING_H_ */

View File

@ -47,6 +47,9 @@ extern int proc_douintvec(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_minmax(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_douintvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
extern int proc_dointvec_jiffies(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,

Some files were not shown because too many files have changed in this diff Show More