1
0
Fork 0

s390/kernel: introduce .dma sections

With a relocatable kernel that could reside at any place in memory, code
and data that has to stay below 2 GB needs special handling.

This patch introduces .dma sections for such text, data and ex_table.
The sections will be part of the decompressor kernel, so they will not
be relocated and stay below 2 GB. Their location is passed over to the
decompressed / relocated kernel via the .boot.preserved.data section.

The duald and aste for control register setup also need to stay below
2 GB, so move the setup code from arch/s390/kernel/head64.S to
arch/s390/boot/head.S. The duct and linkage_stack could reside above
2 GB, but their content has to be preserved for the decompresed kernel,
so they are also moved into the .dma section.

The start and end address of the .dma sections is added to vmcoreinfo,
for crash support, to help debugging in case the kernel crashed there.

Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Reviewed-by: Philipp Rudo <prudo@linux.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
hifive-unleashed-5.2
Gerald Schaefer 2019-02-03 21:37:20 +01:00 committed by Martin Schwidefsky
parent 087c4d7423
commit a80313ff91
24 changed files with 333 additions and 187 deletions

View File

@ -30,7 +30,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y += ctype.o
obj-y += ctype.o text_dma.o
obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o
obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o
targets := bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)

View File

@ -33,6 +33,27 @@ SECTIONS
*(.data.*)
_edata = . ;
}
/*
* .dma section for code, data, ex_table that need to stay below 2 GB,
* even when the kernel is relocate: above 2 GB.
*/
_sdma = .;
.dma.text : {
. = ALIGN(PAGE_SIZE);
_stext_dma = .;
*(.dma.text)
. = ALIGN(PAGE_SIZE);
_etext_dma = .;
}
. = ALIGN(16);
.dma.ex_table : {
_start_dma_ex_table = .;
KEEP(*(.dma.ex_table))
_stop_dma_ex_table = .;
}
.dma.data : { *(.dma.data) }
_edma = .;
BOOT_DATA
BOOT_DATA_PRESERVED

View File

@ -305,7 +305,7 @@ ENTRY(startup_kdump)
xc 0x300(256),0x300
xc 0xe00(256),0xe00
xc 0xf00(256),0xf00
lctlg %c0,%c15,0x200(%r0) # initialize control registers
lctlg %c0,%c15,.Lctl-.LPG0(%r13) # load control registers
stcke __LC_BOOT_CLOCK
mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
spt 6f-.LPG0(%r13)
@ -319,6 +319,36 @@ ENTRY(startup_kdump)
.align 8
6: .long 0x7fffffff,0xffffffff
.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
.quad 0 # cr1: primary space segment table
.quad .Lduct # cr2: dispatchable unit control table
.quad 0 # cr3: instruction authorization
.quad 0xffff # cr4: instruction authorization
.quad .Lduct # cr5: primary-aste origin
.quad 0 # cr6: I/O interrupts
.quad 0 # cr7: secondary space segment table
.quad 0 # cr8: access registers translation
.quad 0 # cr9: tracing off
.quad 0 # cr10: tracing off
.quad 0 # cr11: tracing off
.quad 0 # cr12: tracing off
.quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off
.quad .Llinkage_stack # cr15: linkage stack operations
.section .dma.data,"aw",@progbits
.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
.long 0,0,0,0,0,0,0,0
.Llinkage_stack:
.long 0,0,0x89000000,0,0,0,0x8a000000,0
.align 64
.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
.align 128
.Lduald:.rept 8
.long 0x80000000,0,0,0 # invalid access-list entries
.endr
.previous
#include "head_kdump.S"
#

View File

@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
#include <linux/elf.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/kexec.h>
#include <asm/sclp.h>
#include <asm/diag.h>
#include <asm/uv.h>
#include "compressed/decompressor.h"
#include "boot.h"
@ -11,6 +13,43 @@
extern char __boot_data_start[], __boot_data_end[];
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
/*
* Some code and data needs to stay below 2 GB, even when the kernel would be
* relocated above 2 GB, because it has to use 31 bit addresses.
* Such code and data is part of the .dma section, and its location is passed
* over to the decompressed / relocated kernel via the .boot.preserved.data
* section.
*/
extern char _sdma[], _edma[];
extern char _stext_dma[], _etext_dma[];
extern struct exception_table_entry _start_dma_ex_table[];
extern struct exception_table_entry _stop_dma_ex_table[];
unsigned long __bootdata_preserved(__sdma) = __pa(&_sdma);
unsigned long __bootdata_preserved(__edma) = __pa(&_edma);
unsigned long __bootdata_preserved(__stext_dma) = __pa(&_stext_dma);
unsigned long __bootdata_preserved(__etext_dma) = __pa(&_etext_dma);
struct exception_table_entry *
__bootdata_preserved(__start_dma_ex_table) = _start_dma_ex_table;
struct exception_table_entry *
__bootdata_preserved(__stop_dma_ex_table) = _stop_dma_ex_table;
int _diag210_dma(struct diag210 *addr);
int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode);
int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode);
void _diag0c_dma(struct hypfs_diag0c_entry *entry);
void _diag308_reset_dma(void);
struct diag_ops __bootdata_preserved(diag_dma_ops) = {
.diag210 = _diag210_dma,
.diag26c = _diag26c_dma,
.diag14 = _diag14_dma,
.diag0c = _diag0c_dma,
.diag308_reset = _diag308_reset_dma
};
static struct diag210 _diag210_tmp_dma __section(".dma.data");
struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
void _swsusp_reset_dma(void);
unsigned long __bootdata_preserved(__swsusp_reset_dma) = __pa(_swsusp_reset_dma);
void error(char *x)
{
sclp_early_printk("\n\n");

View File

@ -0,0 +1,167 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Code that needs to run below 2 GB.
*
* Copyright IBM Corp. 2019
*/
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/sigp.h>
.section .dma.text,"ax"
/*
* Simplified version of expoline thunk. The normal thunks can not be used here,
* because they might be more than 2 GB away, and not reachable by the relative
* branch. No comdat, exrl, etc. optimizations used here, because it only
* affects a few functions that are not performance-relevant.
*/
.macro BR_EX_DMA_r14
larl %r1,0f
ex 0,0(%r1)
j .
0: br %r14
.endm
/*
* int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode)
*/
ENTRY(_diag14_dma)
lgr %r1,%r2
lgr %r2,%r3
lgr %r3,%r4
lhi %r5,-EIO
sam31
diag %r1,%r2,0x14
.Ldiag14_ex:
ipm %r5
srl %r5,28
.Ldiag14_fault:
sam64
lgfr %r2,%r5
BR_EX_DMA_r14
EX_TABLE_DMA(.Ldiag14_ex, .Ldiag14_fault)
/*
* int _diag210_dma(struct diag210 *addr)
*/
ENTRY(_diag210_dma)
lgr %r1,%r2
lhi %r2,-1
sam31
diag %r1,%r0,0x210
.Ldiag210_ex:
ipm %r2
srl %r2,28
.Ldiag210_fault:
sam64
lgfr %r2,%r2
BR_EX_DMA_r14
EX_TABLE_DMA(.Ldiag210_ex, .Ldiag210_fault)
/*
* int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode)
*/
ENTRY(_diag26c_dma)
lghi %r5,-EOPNOTSUPP
sam31
diag %r2,%r4,0x26c
.Ldiag26c_ex:
sam64
lgfr %r2,%r5
BR_EX_DMA_r14
EX_TABLE_DMA(.Ldiag26c_ex, .Ldiag26c_ex)
/*
* void _diag0c_dma(struct hypfs_diag0c_entry *entry)
*/
ENTRY(_diag0c_dma)
sam31
diag %r2,%r2,0x0c
sam64
BR_EX_DMA_r14
/*
* void _swsusp_reset_dma(void)
*/
ENTRY(_swsusp_reset_dma)
larl %r1,restart_entry
larl %r2,.Lrestart_diag308_psw
og %r1,0(%r2)
stg %r1,0(%r0)
lghi %r0,0
diag %r0,%r0,0x308
restart_entry:
lhi %r1,1
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
sam64
BR_EX_DMA_r14
/*
* void _diag308_reset_dma(void)
*
* Calls diag 308 subcode 1 and continues execution
*/
ENTRY(_diag308_reset_dma)
larl %r4,.Lctlregs # Save control registers
stctg %c0,%c15,0(%r4)
lg %r2,0(%r4) # Disable lowcore protection
nilh %r2,0xefff
larl %r4,.Lctlreg0
stg %r2,0(%r4)
lctlg %c0,%c0,0(%r4)
larl %r4,.Lfpctl # Floating point control register
stfpc 0(%r4)
larl %r4,.Lprefix # Save prefix register
stpx 0(%r4)
larl %r4,.Lprefix_zero # Set prefix register to 0
spx 0(%r4)
larl %r4,.Lcontinue_psw # Save PSW flags
epsw %r2,%r3
stm %r2,%r3,0(%r4)
larl %r4,restart_part2 # Setup restart PSW at absolute 0
larl %r3,.Lrestart_diag308_psw
og %r4,0(%r3) # Save PSW
lghi %r3,0
sturg %r4,%r3 # Use sturg, because of large pages
lghi %r1,1
lghi %r0,0
diag %r0,%r1,0x308
restart_part2:
lhi %r0,0 # Load r0 with zero
lhi %r1,2 # Use mode 2 = ESAME (dump)
sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode
sam64 # Switch to 64 bit addressing mode
larl %r4,.Lctlregs # Restore control registers
lctlg %c0,%c15,0(%r4)
larl %r4,.Lfpctl # Restore floating point ctl register
lfpc 0(%r4)
larl %r4,.Lprefix # Restore prefix register
spx 0(%r4)
larl %r4,.Lcontinue_psw # Restore PSW flags
lpswe 0(%r4)
.Lcontinue:
BR_EX_DMA_r14
.section .dma.data,"aw",@progbits
.align 8
.Lrestart_diag308_psw:
.long 0x00080000,0x80000000
.align 8
.Lcontinue_psw:
.quad 0,.Lcontinue
.align 8
.Lctlreg0:
.quad 0
.Lctlregs:
.rept 16
.quad 0
.endr
.Lfpctl:
.long 0
.Lprefix:
.long 0
.Lprefix_zero:
.long 0

View File

@ -15,27 +15,13 @@
#define DBFS_D0C_HDR_VERSION 0
/*
* Execute diagnose 0c in 31 bit mode
*/
static void diag0c(struct hypfs_diag0c_entry *entry)
{
diag_stat_inc(DIAG_STAT_X00C);
asm volatile (
" sam31\n"
" diag %0,%0,0x0c\n"
" sam64\n"
: /* no output register */
: "a" (entry)
: "memory");
}
/*
* Get hypfs_diag0c_entry from CPU vector and store diag0c data
*/
static void diag0c_fn(void *data)
{
diag0c(((void **) data)[smp_processor_id()]);
diag_stat_inc(DIAG_STAT_X00C);
diag_dma_ops.diag0c(((void **) data)[smp_processor_id()]);
}
/*

View File

@ -308,4 +308,17 @@ union diag318_info {
int diag204(unsigned long subcode, unsigned long size, void *addr);
int diag224(void *ptr);
int diag26c(void *req, void *resp, enum diag26c_sc subcode);
struct hypfs_diag0c_entry;
struct diag_ops {
int (*diag210)(struct diag210 *addr);
int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode);
int (*diag14)(unsigned long rx, unsigned long ry1, unsigned long subcode);
void (*diag0c)(struct hypfs_diag0c_entry *entry);
void (*diag308_reset)(void);
};
extern struct diag_ops diag_dma_ops;
extern struct diag210 *__diag210_tmp_dma;
#endif /* _ASM_S390_DIAG_H */

View File

@ -19,6 +19,11 @@ struct exception_table_entry
int insn, fixup;
};
extern struct exception_table_entry *__start_dma_ex_table;
extern struct exception_table_entry *__stop_dma_ex_table;
const struct exception_table_entry *s390_search_extables(unsigned long addr);
static inline unsigned long extable_fixup(const struct exception_table_entry *x)
{
return (unsigned long)&x->fixup + x->fixup;

View File

@ -127,7 +127,6 @@ enum diag308_rc {
};
extern int diag308(unsigned long subcode, void *addr);
extern void diag308_reset(void);
extern void store_status(void (*fn)(void *), void *data);
extern void lgr_info_log(void);

View File

@ -28,5 +28,12 @@
.long (_target) - . ; \
.previous
#define EX_TABLE_DMA(_fault, _target) \
.section .dma.ex_table, "a" ; \
.align 4 ; \
.long (_fault) - . ; \
.long (_target) - . ; \
.previous
#endif /* __ASSEMBLY__ */
#endif

View File

@ -23,4 +23,7 @@
*/
#define __bootdata_preserved(var) __section(.boot.preserved.data.var) var
extern unsigned long __sdma, __edma;
extern unsigned long __stext_dma, __etext_dma;
#endif

View File

@ -79,71 +79,3 @@ disabled_wait_psw:
s390_base_pgm_handler_fn:
.quad 0
.previous
#
# Calls diag 308 subcode 1 and continues execution
#
ENTRY(diag308_reset)
larl %r4,.Lctlregs # Save control registers
stctg %c0,%c15,0(%r4)
lg %r2,0(%r4) # Disable lowcore protection
nilh %r2,0xefff
larl %r4,.Lctlreg0
stg %r2,0(%r4)
lctlg %c0,%c0,0(%r4)
larl %r4,.Lfpctl # Floating point control register
stfpc 0(%r4)
larl %r4,.Lprefix # Save prefix register
stpx 0(%r4)
larl %r4,.Lprefix_zero # Set prefix register to 0
spx 0(%r4)
larl %r4,.Lcontinue_psw # Save PSW flags
epsw %r2,%r3
stm %r2,%r3,0(%r4)
larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0
lghi %r3,0
lg %r4,0(%r4) # Save PSW
sturg %r4,%r3 # Use sturg, because of large pages
lghi %r1,1
lghi %r0,0
diag %r0,%r1,0x308
.Lrestart_part2:
lhi %r0,0 # Load r0 with zero
lhi %r1,2 # Use mode 2 = ESAME (dump)
sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode
sam64 # Switch to 64 bit addressing mode
larl %r4,.Lctlregs # Restore control registers
lctlg %c0,%c15,0(%r4)
larl %r4,.Lfpctl # Restore floating point ctl register
lfpc 0(%r4)
larl %r4,.Lprefix # Restore prefix register
spx 0(%r4)
larl %r4,.Lcontinue_psw # Restore PSW flags
lpswe 0(%r4)
.Lcontinue:
BR_EX %r14
.align 16
.Lrestart_psw:
.long 0x00080000,0x80000000 + .Lrestart_part2
.section .data..nosave,"aw",@progbits
.align 8
.Lcontinue_psw:
.quad 0,.Lcontinue
.previous
.section .bss
.align 8
.Lctlreg0:
.quad 0
.Lctlregs:
.rept 16
.quad 0
.endr
.Lfpctl:
.long 0
.Lprefix:
.long 0
.Lprefix_zero:
.long 0
.previous

View File

@ -13,6 +13,7 @@
#include <linux/debugfs.h>
#include <asm/diag.h>
#include <asm/trace/diag.h>
#include <asm/sections.h>
struct diag_stat {
unsigned int counter[NR_DIAG_STAT];
@ -49,6 +50,9 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
[DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
};
struct diag_ops __bootdata_preserved(diag_dma_ops);
struct diag210 *__bootdata_preserved(__diag210_tmp_dma);
static int show_diag_stat(struct seq_file *m, void *v)
{
struct diag_stat *stat;
@ -139,30 +143,10 @@ EXPORT_SYMBOL(diag_stat_inc_norecursion);
/*
* Diagnose 14: Input spool file manipulation
*/
static inline int __diag14(unsigned long rx, unsigned long ry1,
unsigned long subcode)
{
register unsigned long _ry1 asm("2") = ry1;
register unsigned long _ry2 asm("3") = subcode;
int rc = 0;
asm volatile(
" sam31\n"
" diag %2,2,0x14\n"
" sam64\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (rc), "+d" (_ry2)
: "d" (rx), "d" (_ry1)
: "cc");
return rc;
}
int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
{
diag_stat_inc(DIAG_STAT_X014);
return __diag14(rx, ry1, subcode);
return diag_dma_ops.diag14(rx, ry1, subcode);
}
EXPORT_SYMBOL(diag14);
@ -195,30 +179,17 @@ EXPORT_SYMBOL(diag204);
*/
int diag210(struct diag210 *addr)
{
/*
* diag 210 needs its data below the 2GB border, so we
* use a static data area to be sure
*/
static struct diag210 diag210_tmp;
static DEFINE_SPINLOCK(diag210_lock);
unsigned long flags;
int ccode;
spin_lock_irqsave(&diag210_lock, flags);
diag210_tmp = *addr;
*__diag210_tmp_dma = *addr;
diag_stat_inc(DIAG_STAT_X210);
asm volatile(
" lhi %0,-1\n"
" sam31\n"
" diag %1,0,0x210\n"
"0: ipm %0\n"
" srl %0,28\n"
"1: sam64\n"
EX_TABLE(0b, 1b)
: "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
ccode = diag_dma_ops.diag210(__diag210_tmp_dma);
*addr = diag210_tmp;
*addr = *__diag210_tmp_dma;
spin_unlock_irqrestore(&diag210_lock, flags);
return ccode;
@ -243,27 +214,9 @@ EXPORT_SYMBOL(diag224);
/*
* Diagnose 26C: Access Certain System Information
*/
static inline int __diag26c(void *req, void *resp, enum diag26c_sc subcode)
{
register unsigned long _req asm("2") = (addr_t) req;
register unsigned long _resp asm("3") = (addr_t) resp;
register unsigned long _subcode asm("4") = subcode;
register unsigned long _rc asm("5") = -EOPNOTSUPP;
asm volatile(
" sam31\n"
" diag %[rx],%[ry],0x26c\n"
"0: sam64\n"
EX_TABLE(0b,0b)
: "+d" (_rc)
: [rx] "d" (_req), "d" (_resp), [ry] "d" (_subcode)
: "cc", "memory");
return _rc;
}
int diag26c(void *req, void *resp, enum diag26c_sc subcode)
{
diag_stat_inc(DIAG_STAT_X26C);
return __diag26c(req, resp, subcode);
return diag_dma_ops.diag26c(req, resp, subcode);
}
EXPORT_SYMBOL(diag26c);

View File

@ -139,7 +139,7 @@ static void early_pgm_check_handler(void)
unsigned long addr;
addr = S390_lowcore.program_old_psw.addr;
fixup = search_exception_tables(addr);
fixup = s390_search_extables(addr);
if (!fixup)
disabled_wait(0);
/* Disable low address protection before storing into lowcore. */

View File

@ -26,7 +26,6 @@ ENTRY(startup_continue)
0: larl %r1,tod_clock_base
mvc 0(16,%r1),__LC_BOOT_CLOCK
larl %r13,.LPG1 # get base
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
larl %r0,boot_vdso_data
stg %r0,__LC_VDSO_PER_CPU
#
@ -61,22 +60,6 @@ ENTRY(startup_continue)
.align 16
.LPG1:
.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
.quad 0 # cr1: primary space segment table
.quad .Lduct # cr2: dispatchable unit control table
.quad 0 # cr3: instruction authorization
.quad 0xffff # cr4: instruction authorization
.quad .Lduct # cr5: primary-aste origin
.quad 0 # cr6: I/O interrupts
.quad 0 # cr7: secondary space segment table
.quad 0 # cr8: access registers translation
.quad 0 # cr9: tracing off
.quad 0 # cr10: tracing off
.quad 0 # cr11: tracing off
.quad 0 # cr12: tracing off
.quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off
.quad .Llinkage_stack # cr15: linkage stack operations
.Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
@ -84,14 +67,5 @@ ENTRY(startup_continue)
.Lparmaddr:
.quad PARMAREA
.align 64
.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
.long 0,0,0,0,0,0,0,0
.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
.align 128
.Lduald:.rept 8
.long 0x80000000,0,0,0 # invalid access-list entries
.endr
.Llinkage_stack:
.long 0,0,0x89000000,0,0,0,0x8a000000,0
.Ldw: .quad 0x0002000180000000,0x0000000000000000
.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0

View File

@ -1720,7 +1720,7 @@ void s390_reset_system(void)
/* Disable lowcore protection */
__ctl_clear_bit(0, 28);
diag308_reset();
diag_dma_ops.diag308_reset();
}
#ifdef CONFIG_KEXEC_FILE

View File

@ -573,7 +573,7 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
entry = search_exception_tables(regs->psw.addr);
entry = s390_search_extables(regs->psw.addr);
if (entry) {
regs->psw.addr = extable_fixup(entry);
return 1;

View File

@ -253,6 +253,8 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
vmcoreinfo_append_str("EDMA=%lx\n", __edma);
}
void machine_shutdown(void)

View File

@ -101,6 +101,14 @@ unsigned long __bootdata(memory_end);
unsigned long __bootdata(max_physmem_end);
struct mem_detect_info __bootdata(mem_detect);
struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
unsigned long __bootdata_preserved(__swsusp_reset_dma);
unsigned long __bootdata_preserved(__stext_dma);
unsigned long __bootdata_preserved(__etext_dma);
unsigned long __bootdata_preserved(__sdma);
unsigned long __bootdata_preserved(__edma);
unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START);
@ -832,6 +840,7 @@ static void __init reserve_kernel(void)
memblock_reserve(0, HEAD_END);
memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
- (unsigned long)_stext);
memblock_reserve(__sdma, __edma - __sdma);
}
static void __init setup_memory(void)

View File

@ -689,7 +689,7 @@ void __init smp_save_dump_cpus(void)
smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
}
memblock_free(page, PAGE_SIZE);
diag308_reset();
diag_dma_ops.diag308_reset();
pcpu_set_smt(0);
}
#endif /* CONFIG_CRASH_DUMP */

View File

@ -154,20 +154,13 @@ ENTRY(swsusp_arch_resume)
ptlb /* flush tlb */
/* Reset System */
larl %r1,restart_entry
larl %r2,.Lrestart_diag308_psw
og %r1,0(%r2)
stg %r1,0(%r0)
larl %r1,.Lnew_pgm_check_psw
epsw %r2,%r3
stm %r2,%r3,0(%r1)
mvc __LC_PGM_NEW_PSW(16,%r0),0(%r1)
lghi %r0,0
diag %r0,%r0,0x308
restart_entry:
lhi %r1,1
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
sam64
larl %r1,__swsusp_reset_dma
lg %r1,0(%r1)
BASR_EX %r14,%r1
#ifdef CONFIG_SMP
larl %r1,smp_cpu_mt_shift
icm %r1,15,0(%r1)
@ -275,8 +268,6 @@ restore_registers:
.Lpanic_string:
.asciz "Resume not possible because suspend CPU is no longer available\n"
.align 8
.Lrestart_diag308_psw:
.long 0x00080000,0x80000000
.Lrestart_suspend_psw:
.quad 0x0000000180000000,restart_suspend
.Lnew_pgm_check_psw:

View File

@ -49,7 +49,7 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
report_user_fault(regs, si_signo, 0);
} else {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->psw.addr);
fixup = s390_search_extables(regs->psw.addr);
if (fixup)
regs->psw.addr = extable_fixup(fixup);
else {
@ -263,5 +263,6 @@ NOKPROBE_SYMBOL(kernel_stack_overflow);
void __init trap_init(void)
{
sort_extable(__start_dma_ex_table, __stop_dma_ex_table);
local_mcck_enable();
}

View File

@ -247,12 +247,24 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
current);
}
const struct exception_table_entry *s390_search_extables(unsigned long addr)
{
const struct exception_table_entry *fixup;
fixup = search_extable(__start_dma_ex_table,
__stop_dma_ex_table - __start_dma_ex_table,
addr);
if (!fixup)
fixup = search_exception_tables(addr);
return fixup;
}
static noinline void do_no_context(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->psw.addr);
fixup = s390_search_extables(regs->psw.addr);
if (fixup) {
regs->psw.addr = extable_fixup(fixup);
return;

View File

@ -413,6 +413,8 @@ void __init vmem_map_init(void)
__set_memory((unsigned long)_sinittext,
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
__set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}