1
0
Fork 0

Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (35 commits)
  [S390] time: remove unused code
  [S390] zcore: Add prefix registers to dump header
  [S390] correct vdso version string
  [S390] add support for compressed kernels
  [S390] Define new s390 ELF note sections in elf.h
  [S390] codepage conversion of kernel parameter line
  [S390] seq_file: convert drivers/s390/
  [S390] add z9-ec/z10 instruction to kernel disassembler
  [S390] dasd: correct offline processing
  [S390] dasd: fix refcounting.
  [S390] dasd: fix online/offline race
  [S390] use kprobes_built_in() in mm/fault code
  [S390] bug: use relative pointers in bug table entries
  [S390] Cleanup struct _lowcore usage and defines.
  [S390] free_initmem: reduce code duplication
  [S390] Replace ENOTSUPP usage with EOPNOTSUPP
  [S390] spinlock: check virtual cpu running status
  [S390] sysinfo: fix SYSIB 3,2,2 structure
  [S390] add MACHINE_IS_LPAR flag
  [S390] qdio: optimize cache line usage of struct qdio_irq
  ...
hifive-unleashed-5.1
Linus Torvalds 2010-02-27 16:20:17 -08:00
commit 7981164791
95 changed files with 2213 additions and 1507 deletions

View File

@ -87,6 +87,12 @@ Command line parameters
compatibility, by the device number in hexadecimal (0xabcd or abcd). Device compatibility, by the device number in hexadecimal (0xabcd or abcd). Device
numbers given as 0xabcd will be interpreted as 0.0.abcd. numbers given as 0xabcd will be interpreted as 0.0.abcd.
* /proc/cio_settle
A write request to this file is blocked until all queued cio actions are
handled. This will allow userspace to wait for pending work affecting
device availability after changing cio_ignore or the hardware configuration.
* For some of the information present in the /proc filesystem in 2.4 (namely, * For some of the information present in the /proc filesystem in 2.4 (namely,
/proc/subchannels and /proc/chpids), see driver-model.txt. /proc/subchannels and /proc/chpids), see driver-model.txt.
Information formerly in /proc/irq_count is now in /proc/interrupts. Information formerly in /proc/irq_count is now in /proc/interrupts.

View File

@ -223,8 +223,8 @@ touched by the driver - it should use the ccwgroup device's driver_data for its
private data. private data.
To implement a ccwgroup driver, please refer to include/asm/ccwgroup.h. Keep in To implement a ccwgroup driver, please refer to include/asm/ccwgroup.h. Keep in
mind that most drivers will need to implement both a ccwgroup and a ccw driver mind that most drivers will need to implement both a ccwgroup and a ccw
(unless you have a meta ccw driver, like cu3088 for lcs and ctc). driver.
2. Channel paths 2. Channel paths

View File

@ -54,6 +54,9 @@ config GENERIC_BUG
depends on BUG depends on BUG
default y default y
config GENERIC_BUG_RELATIVE_POINTERS
def_bool y
config NO_IOMEM config NO_IOMEM
def_bool y def_bool y
@ -95,6 +98,9 @@ config S390
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE select INIT_ALL_POSSIBLE
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_TRYLOCK_BH
select ARCH_INLINE_SPIN_LOCK select ARCH_INLINE_SPIN_LOCK

View File

@ -6,4 +6,17 @@ config TRACE_IRQFLAGS_SUPPORT
source "lib/Kconfig.debug" source "lib/Kconfig.debug"
config DEBUG_STRICT_USER_COPY_CHECKS
bool "Strict user copy size checks"
---help---
Enabling this option turns a certain set of sanity checks for user
copy operations into compile time warnings.
The copy_from_user() etc checks are there to help test if there
are sufficient security checks on the length argument of
the copy operation, by having gcc prove that the argument is
within bounds.
If unsure, or if you run an older (pre 4.4) gcc, say N.
endmenu endmenu

View File

@ -14,6 +14,7 @@
# #
ifndef CONFIG_64BIT ifndef CONFIG_64BIT
LD_BFD := elf32-s390
LDFLAGS := -m elf_s390 LDFLAGS := -m elf_s390
KBUILD_CFLAGS += -m31 KBUILD_CFLAGS += -m31
KBUILD_AFLAGS += -m31 KBUILD_AFLAGS += -m31
@ -21,6 +22,7 @@ UTS_MACHINE := s390
STACK_SIZE := 8192 STACK_SIZE := 8192
CHECKFLAGS += -D__s390__ -msize-long CHECKFLAGS += -D__s390__ -msize-long
else else
LD_BFD := elf64-s390
LDFLAGS := -m elf64_s390 LDFLAGS := -m elf64_s390
MODFLAGS += -fpic -D__PIC__ MODFLAGS += -fpic -D__PIC__
KBUILD_CFLAGS += -m64 KBUILD_CFLAGS += -m64
@ -30,6 +32,8 @@ STACK_SIZE := 16384
CHECKFLAGS += -D__s390__ -D__s390x__ CHECKFLAGS += -D__s390__ -D__s390x__
endif endif
export LD_BFD
cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5) cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5)
cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
@ -85,7 +89,9 @@ KBUILD_AFLAGS += $(aflags-y)
OBJCOPYFLAGS := -O binary OBJCOPYFLAGS := -O binary
LDFLAGS_vmlinux := -e start LDFLAGS_vmlinux := -e start
head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o head-y := arch/s390/kernel/head.o
head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o)
head-y += arch/s390/kernel/init_task.o
core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
@ -99,12 +105,12 @@ drivers-$(CONFIG_OPROFILE) += arch/s390/oprofile/
boot := arch/s390/boot boot := arch/s390/boot
all: image all: image bzImage
install: vmlinux install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@ $(Q)$(MAKE) $(build)=$(boot) $@
image: vmlinux image bzImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
zfcpdump: zfcpdump:
@ -116,4 +122,5 @@ archclean:
# Don't use tabs in echo arguments # Don't use tabs in echo arguments
define archhelp define archhelp
echo '* image - Kernel image for IPL ($(boot)/image)' echo '* image - Kernel image for IPL ($(boot)/image)'
echo '* bzImage - Compressed kernel image for IPL ($(boot)/bzImage)'
endef endef

View File

@ -9,10 +9,18 @@ COMPILE_VERSION := __linux_compile_version_id__`hostname | \
EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I.
targets := image targets := image
targets += bzImage
subdir- := compressed
$(obj)/image: vmlinux FORCE $(obj)/image: vmlinux FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
$(obj)/compressed/vmlinux: FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
install: $(CONFIGURE) $(obj)/image install: $(CONFIGURE) $(obj)/image
sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \ sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \
System.map Kerntypes "$(INSTALL_PATH)" System.map Kerntypes "$(INSTALL_PATH)"

View File

@ -0,0 +1,60 @@
#
# linux/arch/s390/boot/compressed/Makefile
#
# create a compressed vmlinux image from the original vmlinux
#
BITS := $(if $(CONFIG_64BIT),64,31)
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
vmlinux.bin.lzma misc.o piggy.o sizes.h head$(BITS).o
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
GCOV_PROFILE := n
OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o)
OBJECTS += $(obj)/head$(BITS).o $(obj)/misc.o $(obj)/piggy.o
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
$(call if_changed,ld)
@:
sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 0x\1/p'
quiet_cmd_sizes = GEN $@
cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
$(obj)/sizes.h: vmlinux
$(call if_changed,sizes)
AFLAGS_head$(BITS).o += -I$(obj)
$(obj)/head$(BITS).o: $(obj)/sizes.h
CFLAGS_misc.o += -I$(obj)
$(obj)/misc.o: $(obj)/sizes.h
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux
$(call if_changed,objcopy)
vmlinux.bin.all-y := $(obj)/vmlinux.bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
suffix-$(CONFIG_KERNEL_LZMA) := lzma
$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
$(call if_changed,gzip)
$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
$(call if_changed,bzip2)
$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
$(call if_changed,lzma)
LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y)
$(call if_changed,ld)

View File

@ -0,0 +1,51 @@
/*
* Startup glue code to uncompress the kernel
*
* Copyright IBM Corp. 2010
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/init.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include "sizes.h"
__HEAD
.globl startup_continue
startup_continue:
basr %r13,0 # get base
.LPG1:
# setup stack
l %r15,.Lstack-.LPG1(%r13)
ahi %r15,-96
l %r1,.Ldecompress-.LPG1(%r13)
basr %r14,%r1
# setup registers for memory mover & branch to target
lr %r4,%r2
l %r2,.Loffset-.LPG1(%r13)
la %r4,0(%r2,%r4)
l %r3,.Lmvsize-.LPG1(%r13)
lr %r5,%r3
# move the memory mover someplace safe
la %r1,0x200
mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
# decompress image is started at 0x11000
lr %r6,%r2
br %r1
mover:
mvcle %r2,%r4,0
jo mover
br %r6
mover_end:
.align 8
.Lstack:
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
.Ldecompress:
.long decompress_kernel
.Loffset:
.long 0x11000
.Lmvsize:
.long SZ__bss_start

View File

@ -0,0 +1,48 @@
/*
* Startup glue code to uncompress the kernel
*
* Copyright IBM Corp. 2010
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/init.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include "sizes.h"
__HEAD
.globl startup_continue
startup_continue:
basr %r13,0 # get base
.LPG1:
# setup stack
lg %r15,.Lstack-.LPG1(%r13)
aghi %r15,-160
brasl %r14,decompress_kernel
# setup registers for memory mover & branch to target
lgr %r4,%r2
lg %r2,.Loffset-.LPG1(%r13)
la %r4,0(%r2,%r4)
lg %r3,.Lmvsize-.LPG1(%r13)
lgr %r5,%r3
# move the memory mover someplace safe
la %r1,0x200
mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
# decompress image is started at 0x11000
lgr %r6,%r2
br %r1
mover:
mvcle %r2,%r4,0
jo mover
br %r6
mover_end:
.align 8
.Lstack:
.quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
.Loffset:
.quad 0x11000
.Lmvsize:
.quad SZ__bss_start

View File

@ -0,0 +1,158 @@
/*
* Definitions and wrapper functions for kernel decompressor
*
* Copyright IBM Corp. 2010
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/ipl.h>
#include "sizes.h"
/*
* gzip declarations
*/
#define STATIC static
#undef memset
#undef memcpy
#undef memmove
#define memzero(s, n) memset((s), 0, (n))
/* Symbols defined by linker scripts */
extern char input_data[];
extern int input_len;
extern int _text;
extern int _end;
static void error(char *m);
static unsigned long free_mem_ptr;
static unsigned long free_mem_end_ptr;
#ifdef CONFIG_HAVE_KERNEL_BZIP2
#define HEAP_SIZE 0x400000
#else
#define HEAP_SIZE 0x10000
#endif
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
#endif
#ifdef CONFIG_KERNEL_BZIP2
#include "../../../../lib/decompress_bunzip2.c"
#endif
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
extern _sclp_print_early(const char *);
int puts(const char *s)
{
_sclp_print_early(s);
return 0;
}
void *memset(void *s, int c, size_t n)
{
char *xs;
if (c == 0)
return __builtin_memset(s, 0, n);
xs = (char *) s;
if (n > 0)
do {
*xs++ = c;
} while (--n > 0);
return s;
}
void *memcpy(void *__dest, __const void *__src, size_t __n)
{
return __builtin_memcpy(__dest, __src, __n);
}
void *memmove(void *__dest, __const void *__src, size_t __n)
{
char *d;
const char *s;
if (__dest <= __src)
return __builtin_memcpy(__dest, __src, __n);
d = __dest + __n;
s = __src + __n;
while (__n--)
*--d = *--s;
return __dest;
}
static void error(char *x)
{
unsigned long long psw = 0x000a0000deadbeefULL;
puts("\n\n");
puts(x);
puts("\n\n -- System halted");
asm volatile("lpsw %0" : : "Q" (psw));
}
/*
* Safe guard the ipl parameter block against a memory area that will be
* overwritten. The validity check for the ipl parameter block is complex
* (see cio_get_iplinfo and ipl_save_parameters) but if the pointer to
* the ipl parameter block intersects with the passed memory area we can
* safely assume that we can read from that memory. In that case just copy
* the memory to IPL_PARMBLOCK_ORIGIN even if there is no ipl parameter
* block.
*/
static void check_ipl_parmblock(void *start, unsigned long size)
{
void *src, *dst;
src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
if (src + PAGE_SIZE <= start || src >= start + size)
return;
dst = (void *) IPL_PARMBLOCK_ORIGIN;
memmove(dst, src, PAGE_SIZE);
S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
}
unsigned long decompress_kernel(void)
{
unsigned long output_addr;
unsigned char *output;
free_mem_ptr = (unsigned long)&_end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL);
check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start);
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd right behind the end of the decompressed
* kernel image.
*/
if (INITRD_START && INITRD_SIZE &&
INITRD_START < (unsigned long) output + SZ__bss_start) {
check_ipl_parmblock(output + SZ__bss_start,
INITRD_START + INITRD_SIZE);
memmove(output + SZ__bss_start,
(void *) INITRD_START, INITRD_SIZE);
INITRD_START = (unsigned long) output + SZ__bss_start;
}
#endif
puts("Uncompressing Linux... ");
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
puts("Ok, booting the kernel.\n");
return (unsigned long) output;
}

View File

@ -0,0 +1,55 @@
#include <asm-generic/vmlinux.lds.h>
#ifdef CONFIG_64BIT
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
#else
OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
OUTPUT_ARCH(s390)
#endif
ENTRY(startup)
SECTIONS
{
/* Be careful parts of head_64.S assume startup_32 is at
* address 0.
*/
. = 0;
.head.text : {
_head = . ;
HEAD_TEXT
_ehead = . ;
}
.rodata.compressed : {
*(.rodata.compressed)
}
.text : {
_text = .; /* Text */
*(.text)
*(.text.*)
_etext = . ;
}
.rodata : {
_rodata = . ;
*(.rodata) /* read-only data */
*(.rodata.*)
_erodata = . ;
}
.data : {
_data = . ;
*(.data)
*(.data.*)
_edata = . ;
}
. = ALIGN(256);
.bss : {
_bss = . ;
*(.bss)
*(.bss.*)
*(COMMON)
. = ALIGN(8); /* For convenience during zeroing */
_ebss = .;
}
_end = .;
}

View File

@ -0,0 +1,10 @@
SECTIONS
{
.rodata.compressed : {
input_len = .;
LONG(input_data_end - input_data) input_data = .;
*(.data)
output_len = . - 4;
input_data_end = .;
}
}

View File

@ -36,6 +36,13 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y CONFIG_SWAP=y
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y CONFIG_SYSVIPC_SYSCTL=y

View File

@ -488,7 +488,7 @@ out:
static int diag224(void *ptr) static int diag224(void *ptr)
{ {
int rc = -ENOTSUPP; int rc = -EOPNOTSUPP;
asm volatile( asm volatile(
" diag %1,%2,0x224\n" " diag %1,%2,0x224\n"
@ -507,7 +507,7 @@ static int diag224_get_name_table(void)
return -ENOMEM; return -ENOMEM;
if (diag224(diag224_cpu_names)) { if (diag224(diag224_cpu_names)) {
kfree(diag224_cpu_names); kfree(diag224_cpu_names);
return -ENOTSUPP; return -EOPNOTSUPP;
} }
EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16); EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
return 0; return 0;

View File

@ -18,8 +18,6 @@
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __CS_LOOP(ptr, op_val, op_string) ({ \ #define __CS_LOOP(ptr, op_val, op_string) ({ \
int old_val, new_val; \ int old_val, new_val; \
asm volatile( \ asm volatile( \
@ -35,26 +33,6 @@
new_val; \ new_val; \
}) })
#else /* __GNUC__ */
#define __CS_LOOP(ptr, op_val, op_string) ({ \
int old_val, new_val; \
asm volatile( \
" l %0,0(%3)\n" \
"0: lr %1,%0\n" \
op_string " %1,%4\n" \
" cs %0,%1,0(%3)\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \
"=m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val), \
"m" (((atomic_t *)(ptr))->counter) \
: "cc", "memory"); \
new_val; \
})
#endif /* __GNUC__ */
static inline int atomic_read(const atomic_t *v) static inline int atomic_read(const atomic_t *v)
{ {
barrier(); barrier();
@ -101,19 +79,11 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
asm volatile( asm volatile(
" cs %0,%2,%1" " cs %0,%2,%1"
: "+d" (old), "=Q" (v->counter) : "+d" (old), "=Q" (v->counter)
: "d" (new), "Q" (v->counter) : "d" (new), "Q" (v->counter)
: "cc", "memory"); : "cc", "memory");
#else /* __GNUC__ */
asm volatile(
" cs %0,%3,0(%2)"
: "+d" (old), "=m" (v->counter)
: "a" (v), "d" (new), "m" (v->counter)
: "cc", "memory");
#endif /* __GNUC__ */
return old; return old;
} }
@ -140,8 +110,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __CSG_LOOP(ptr, op_val, op_string) ({ \ #define __CSG_LOOP(ptr, op_val, op_string) ({ \
long long old_val, new_val; \ long long old_val, new_val; \
asm volatile( \ asm volatile( \
@ -157,26 +125,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
new_val; \ new_val; \
}) })
#else /* __GNUC__ */
#define __CSG_LOOP(ptr, op_val, op_string) ({ \
long long old_val, new_val; \
asm volatile( \
" lg %0,0(%3)\n" \
"0: lgr %1,%0\n" \
op_string " %1,%4\n" \
" csg %0,%1,0(%3)\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \
"=m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val), \
"m" (((atomic_t *)(ptr))->counter) \
: "cc", "memory"); \
new_val; \
})
#endif /* __GNUC__ */
static inline long long atomic64_read(const atomic64_t *v) static inline long long atomic64_read(const atomic64_t *v)
{ {
barrier(); barrier();
@ -214,19 +162,11 @@ static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
static inline long long atomic64_cmpxchg(atomic64_t *v, static inline long long atomic64_cmpxchg(atomic64_t *v,
long long old, long long new) long long old, long long new)
{ {
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
asm volatile( asm volatile(
" csg %0,%2,%1" " csg %0,%2,%1"
: "+d" (old), "=Q" (v->counter) : "+d" (old), "=Q" (v->counter)
: "d" (new), "Q" (v->counter) : "d" (new), "Q" (v->counter)
: "cc", "memory"); : "cc", "memory");
#else /* __GNUC__ */
asm volatile(
" csg %0,%3,0(%2)"
: "+d" (old), "=m" (v->counter)
: "a" (v), "d" (new), "m" (v->counter)
: "cc", "memory");
#endif /* __GNUC__ */
return old; return old;
} }
@ -243,10 +183,8 @@ static inline long long atomic64_read(const atomic64_t *v)
register_pair rp; register_pair rp;
asm volatile( asm volatile(
" lm %0,%N0,0(%1)" " lm %0,%N0,%1"
: "=&d" (rp) : "=&d" (rp) : "Q" (v->counter) );
: "a" (&v->counter), "m" (v->counter)
);
return rp.pair; return rp.pair;
} }
@ -255,10 +193,8 @@ static inline void atomic64_set(atomic64_t *v, long long i)
register_pair rp = {.pair = i}; register_pair rp = {.pair = i};
asm volatile( asm volatile(
" stm %1,%N1,0(%2)" " stm %1,%N1,%0"
: "=m" (v->counter) : "=Q" (v->counter) : "d" (rp) );
: "d" (rp), "a" (&v->counter)
);
} }
static inline long long atomic64_xchg(atomic64_t *v, long long new) static inline long long atomic64_xchg(atomic64_t *v, long long new)
@ -267,11 +203,11 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
register_pair rp_old; register_pair rp_old;
asm volatile( asm volatile(
" lm %0,%N0,0(%2)\n" " lm %0,%N0,%1\n"
"0: cds %0,%3,0(%2)\n" "0: cds %0,%2,%1\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (rp_old), "+m" (v->counter) : "=&d" (rp_old), "=Q" (v->counter)
: "a" (&v->counter), "d" (rp_new) : "d" (rp_new), "Q" (v->counter)
: "cc"); : "cc");
return rp_old.pair; return rp_old.pair;
} }
@ -283,9 +219,9 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
register_pair rp_new = {.pair = new}; register_pair rp_new = {.pair = new};
asm volatile( asm volatile(
" cds %0,%3,0(%2)" " cds %0,%2,%1"
: "+&d" (rp_old), "+m" (v->counter) : "+&d" (rp_old), "=Q" (v->counter)
: "a" (&v->counter), "d" (rp_new) : "d" (rp_new), "Q" (v->counter)
: "cc"); : "cc");
return rp_old.pair; return rp_old.pair;
} }

View File

@ -71,8 +71,6 @@ extern const char _sb_findmap[];
#define __BITOPS_AND "nr" #define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr" #define __BITOPS_XOR "xr"
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
asm volatile( \ asm volatile( \
" l %0,%2\n" \ " l %0,%2\n" \
@ -85,22 +83,6 @@ extern const char _sb_findmap[];
: "d" (__val), "Q" (*(unsigned long *) __addr) \ : "d" (__val), "Q" (*(unsigned long *) __addr) \
: "cc"); : "cc");
#else /* __GNUC__ */
#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
asm volatile( \
" l %0,0(%4)\n" \
"0: lr %1,%0\n" \
__op_string " %1,%3\n" \
" cs %0,%1,0(%4)\n" \
" jl 0b" \
: "=&d" (__old), "=&d" (__new), \
"=m" (*(unsigned long *) __addr) \
: "d" (__val), "a" (__addr), \
"m" (*(unsigned long *) __addr) : "cc");
#endif /* __GNUC__ */
#else /* __s390x__ */ #else /* __s390x__ */
#define __BITOPS_ALIGN 7 #define __BITOPS_ALIGN 7
@ -109,8 +91,6 @@ extern const char _sb_findmap[];
#define __BITOPS_AND "ngr" #define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr" #define __BITOPS_XOR "xgr"
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
asm volatile( \ asm volatile( \
" lg %0,%2\n" \ " lg %0,%2\n" \
@ -123,23 +103,6 @@ extern const char _sb_findmap[];
: "d" (__val), "Q" (*(unsigned long *) __addr) \ : "d" (__val), "Q" (*(unsigned long *) __addr) \
: "cc"); : "cc");
#else /* __GNUC__ */
#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
asm volatile( \
" lg %0,0(%4)\n" \
"0: lgr %1,%0\n" \
__op_string " %1,%3\n" \
" csg %0,%1,0(%4)\n" \
" jl 0b" \
: "=&d" (__old), "=&d" (__new), \
"=m" (*(unsigned long *) __addr) \
: "d" (__val), "a" (__addr), \
"m" (*(unsigned long *) __addr) : "cc");
#endif /* __GNUC__ */
#endif /* __s390x__ */ #endif /* __s390x__ */
#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
@ -261,9 +224,8 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
asm volatile( asm volatile(
" oc 0(1,%1),0(%2)" " oc %O0(1,%R0),%1"
: "=m" (*(char *) addr) : "a" (addr), : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
"a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
} }
static inline void static inline void
@ -290,9 +252,8 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
asm volatile( asm volatile(
" nc 0(1,%1),0(%2)" " nc %O0(1,%R0),%1"
: "=m" (*(char *) addr) : "a" (addr), : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
"a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc");
} }
static inline void static inline void
@ -318,9 +279,8 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
asm volatile( asm volatile(
" xc 0(1,%1),0(%2)" " xc %O0(1,%R0),%1"
: "=m" (*(char *) addr) : "a" (addr), : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
"a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
} }
static inline void static inline void
@ -349,10 +309,9 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
ch = *(unsigned char *) addr; ch = *(unsigned char *) addr;
asm volatile( asm volatile(
" oc 0(1,%1),0(%2)" " oc %O0(1,%R0),%1"
: "=m" (*(char *) addr) : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
: "a" (addr), "a" (_oi_bitmap + (nr & 7)), : "cc", "memory");
"m" (*(char *) addr) : "cc", "memory");
return (ch >> (nr & 7)) & 1; return (ch >> (nr & 7)) & 1;
} }
#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
@ -369,10 +328,9 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
ch = *(unsigned char *) addr; ch = *(unsigned char *) addr;
asm volatile( asm volatile(
" nc 0(1,%1),0(%2)" " nc %O0(1,%R0),%1"
: "=m" (*(char *) addr) : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
: "a" (addr), "a" (_ni_bitmap + (nr & 7)), : "cc", "memory");
"m" (*(char *) addr) : "cc", "memory");
return (ch >> (nr & 7)) & 1; return (ch >> (nr & 7)) & 1;
} }
#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
@ -389,10 +347,9 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
ch = *(unsigned char *) addr; ch = *(unsigned char *) addr;
asm volatile( asm volatile(
" xc 0(1,%1),0(%2)" " xc %O0(1,%R0),%1"
: "=m" (*(char *) addr) : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
: "a" (addr), "a" (_oi_bitmap + (nr & 7)), : "cc", "memory");
"m" (*(char *) addr) : "cc", "memory");
return (ch >> (nr & 7)) & 1; return (ch >> (nr & 7)) & 1;
} }
#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
@ -591,11 +548,11 @@ static inline unsigned long __load_ulong_le(const unsigned long *p,
p = (unsigned long *)((unsigned long) p + offset); p = (unsigned long *)((unsigned long) p + offset);
#ifndef __s390x__ #ifndef __s390x__
asm volatile( asm volatile(
" ic %0,0(%1)\n" " ic %0,%O1(%R1)\n"
" icm %0,2,1(%1)\n" " icm %0,2,%O1+1(%R1)\n"
" icm %0,4,2(%1)\n" " icm %0,4,%O1+2(%R1)\n"
" icm %0,8,3(%1)" " icm %0,8,%O1+3(%R1)"
: "=&d" (word) : "a" (p), "m" (*p) : "cc"); : "=&d" (word) : "Q" (*p) : "cc");
#else #else
asm volatile( asm volatile(
" lrvg %0,%1" " lrvg %0,%1"

View File

@ -5,12 +5,6 @@
#ifdef CONFIG_BUG #ifdef CONFIG_BUG
#ifdef CONFIG_64BIT
#define S390_LONG ".quad"
#else
#define S390_LONG ".long"
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE #ifdef CONFIG_DEBUG_BUGVERBOSE
#define __EMIT_BUG(x) do { \ #define __EMIT_BUG(x) do { \
@ -21,7 +15,7 @@
"2: .asciz \""__FILE__"\"\n" \ "2: .asciz \""__FILE__"\"\n" \
".previous\n" \ ".previous\n" \
".section __bug_table,\"a\"\n" \ ".section __bug_table,\"a\"\n" \
"3:\t" S390_LONG "\t1b,2b\n" \ "3: .long 1b-3b,2b-3b\n" \
" .short %0,%1\n" \ " .short %0,%1\n" \
" .org 3b+%2\n" \ " .org 3b+%2\n" \
".previous\n" \ ".previous\n" \
@ -37,7 +31,7 @@
"0: j 0b+2\n" \ "0: j 0b+2\n" \
"1:\n" \ "1:\n" \
".section __bug_table,\"a\"\n" \ ".section __bug_table,\"a\"\n" \
"2:\t" S390_LONG "\t1b\n" \ "2: .long 1b-2b\n" \
" .short %0\n" \ " .short %0\n" \
" .org 2b+%1\n" \ " .org 2b+%1\n" \
".previous\n" \ ".previous\n" \

View File

@ -32,6 +32,7 @@ typedef void (*crw_handler_t)(struct crw *, struct crw *, int);
extern int crw_register_handler(int rsc, crw_handler_t handler); extern int crw_register_handler(int rsc, crw_handler_t handler);
extern void crw_unregister_handler(int rsc); extern void crw_unregister_handler(int rsc);
extern void crw_handle_channel_report(void); extern void crw_handle_channel_report(void);
void crw_wait_for_channel_report(void);
#define NR_RSCS 16 #define NR_RSCS 16

View File

@ -145,11 +145,11 @@ static inline int etr_setr(struct etr_eacr *ctrl)
int rc = -ENOSYS; int rc = -ENOSYS;
asm volatile( asm volatile(
" .insn s,0xb2160000,0(%2)\n" " .insn s,0xb2160000,%1\n"
"0: la %0,0\n" "0: la %0,0\n"
"1:\n" "1:\n"
EX_TABLE(0b,1b) EX_TABLE(0b,1b)
: "+d" (rc) : "m" (*ctrl), "a" (ctrl)); : "+d" (rc) : "Q" (*ctrl));
return rc; return rc;
} }
@ -159,11 +159,11 @@ static inline int etr_stetr(struct etr_aib *aib)
int rc = -ENOSYS; int rc = -ENOSYS;
asm volatile( asm volatile(
" .insn s,0xb2170000,0(%2)\n" " .insn s,0xb2170000,%1\n"
"0: la %0,0\n" "0: la %0,0\n"
"1:\n" "1:\n"
EX_TABLE(0b,1b) EX_TABLE(0b,1b)
: "+d" (rc) : "m" (*aib), "a" (aib)); : "+d" (rc) : "Q" (*aib));
return rc; return rc;
} }
@ -174,11 +174,11 @@ static inline int etr_steai(struct etr_aib *aib, unsigned int func)
int rc = -ENOSYS; int rc = -ENOSYS;
asm volatile( asm volatile(
" .insn s,0xb2b30000,0(%2)\n" " .insn s,0xb2b30000,%1\n"
"0: la %0,0\n" "0: la %0,0\n"
"1:\n" "1:\n"
EX_TABLE(0b,1b) EX_TABLE(0b,1b)
: "+d" (rc) : "m" (*aib), "a" (aib), "d" (reg0)); : "+d" (rc) : "Q" (*aib), "d" (reg0));
return rc; return rc;
} }

View File

@ -8,8 +8,6 @@
#include <linux/types.h> #include <linux/types.h>
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
/* store then or system mask. */ /* store then or system mask. */
#define __raw_local_irq_stosm(__or) \ #define __raw_local_irq_stosm(__or) \
({ \ ({ \
@ -36,40 +34,6 @@
asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \
}) })
#else /* __GNUC__ */
/* store then or system mask. */
#define __raw_local_irq_stosm(__or) \
({ \
unsigned long __mask; \
asm volatile( \
" stosm 0(%1),%2" \
: "=m" (__mask) \
: "a" (&__mask), "i" (__or) : "memory"); \
__mask; \
})
/* store then and system mask. */
#define __raw_local_irq_stnsm(__and) \
({ \
unsigned long __mask; \
asm volatile( \
" stnsm 0(%1),%2" \
: "=m" (__mask) \
: "a" (&__mask), "i" (__and) : "memory"); \
__mask; \
})
/* set system mask. */
#define __raw_local_irq_ssm(__mask) \
({ \
asm volatile( \
" ssm 0(%0)" \
: : "a" (&__mask), "m" (__mask) : "memory"); \
})
#endif /* __GNUC__ */
/* interrupt control.. */ /* interrupt control.. */
static inline unsigned long raw_local_irq_enable(void) static inline unsigned long raw_local_irq_enable(void)
{ {

View File

@ -1,141 +1,16 @@
/* /*
* include/asm-s390/lowcore.h * Copyright IBM Corp. 1999,2010
* * Author(s): Hartmut Penner <hp@de.ibm.com>,
* S390 version * Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation * Denis Joseph Barrow,
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*/ */
#ifndef _ASM_S390_LOWCORE_H #ifndef _ASM_S390_LOWCORE_H
#define _ASM_S390_LOWCORE_H #define _ASM_S390_LOWCORE_H
#define __LC_IPL_PARMBLOCK_PTR 0x0014
#define __LC_EXT_PARAMS 0x0080
#define __LC_CPU_ADDRESS 0x0084
#define __LC_EXT_INT_CODE 0x0086
#define __LC_SVC_ILC 0x0088
#define __LC_SVC_INT_CODE 0x008a
#define __LC_PGM_ILC 0x008c
#define __LC_PGM_INT_CODE 0x008e
#define __LC_PER_ATMID 0x0096
#define __LC_PER_ADDRESS 0x0098
#define __LC_PER_ACCESS_ID 0x00a1
#define __LC_AR_MODE_ID 0x00a3
#define __LC_SUBCHANNEL_ID 0x00b8
#define __LC_SUBCHANNEL_NR 0x00ba
#define __LC_IO_INT_PARM 0x00bc
#define __LC_IO_INT_WORD 0x00c0
#define __LC_STFL_FAC_LIST 0x00c8
#define __LC_MCCK_CODE 0x00e8
#define __LC_DUMP_REIPL 0x0e00
#ifndef __s390x__
#define __LC_EXT_OLD_PSW 0x0018
#define __LC_SVC_OLD_PSW 0x0020
#define __LC_PGM_OLD_PSW 0x0028
#define __LC_MCK_OLD_PSW 0x0030
#define __LC_IO_OLD_PSW 0x0038
#define __LC_EXT_NEW_PSW 0x0058
#define __LC_SVC_NEW_PSW 0x0060
#define __LC_PGM_NEW_PSW 0x0068
#define __LC_MCK_NEW_PSW 0x0070
#define __LC_IO_NEW_PSW 0x0078
#define __LC_SAVE_AREA 0x0200
#define __LC_RETURN_PSW 0x0240
#define __LC_RETURN_MCCK_PSW 0x0248
#define __LC_SYNC_ENTER_TIMER 0x0250
#define __LC_ASYNC_ENTER_TIMER 0x0258
#define __LC_EXIT_TIMER 0x0260
#define __LC_USER_TIMER 0x0268
#define __LC_SYSTEM_TIMER 0x0270
#define __LC_STEAL_TIMER 0x0278
#define __LC_LAST_UPDATE_TIMER 0x0280
#define __LC_LAST_UPDATE_CLOCK 0x0288
#define __LC_CURRENT 0x0290
#define __LC_THREAD_INFO 0x0294
#define __LC_KERNEL_STACK 0x0298
#define __LC_ASYNC_STACK 0x029c
#define __LC_PANIC_STACK 0x02a0
#define __LC_KERNEL_ASCE 0x02a4
#define __LC_USER_ASCE 0x02a8
#define __LC_USER_EXEC_ASCE 0x02ac
#define __LC_CPUID 0x02b0
#define __LC_INT_CLOCK 0x02c8
#define __LC_MACHINE_FLAGS 0x02d8
#define __LC_FTRACE_FUNC 0x02dc
#define __LC_IRB 0x0300
#define __LC_PFAULT_INTPARM 0x0080
#define __LC_CPU_TIMER_SAVE_AREA 0x00d8
#define __LC_CLOCK_COMP_SAVE_AREA 0x00e0
#define __LC_PSW_SAVE_AREA 0x0100
#define __LC_PREFIX_SAVE_AREA 0x0108
#define __LC_AREGS_SAVE_AREA 0x0120
#define __LC_FPREGS_SAVE_AREA 0x0160
#define __LC_GPREGS_SAVE_AREA 0x0180
#define __LC_CREGS_SAVE_AREA 0x01c0
#else /* __s390x__ */
#define __LC_LAST_BREAK 0x0110
#define __LC_EXT_OLD_PSW 0x0130
#define __LC_SVC_OLD_PSW 0x0140
#define __LC_PGM_OLD_PSW 0x0150
#define __LC_MCK_OLD_PSW 0x0160
#define __LC_IO_OLD_PSW 0x0170
#define __LC_RESTART_PSW 0x01a0
#define __LC_EXT_NEW_PSW 0x01b0
#define __LC_SVC_NEW_PSW 0x01c0
#define __LC_PGM_NEW_PSW 0x01d0
#define __LC_MCK_NEW_PSW 0x01e0
#define __LC_IO_NEW_PSW 0x01f0
#define __LC_SAVE_AREA 0x0200
#define __LC_RETURN_PSW 0x0280
#define __LC_RETURN_MCCK_PSW 0x0290
#define __LC_SYNC_ENTER_TIMER 0x02a0
#define __LC_ASYNC_ENTER_TIMER 0x02a8
#define __LC_EXIT_TIMER 0x02b0
#define __LC_USER_TIMER 0x02b8
#define __LC_SYSTEM_TIMER 0x02c0
#define __LC_STEAL_TIMER 0x02c8
#define __LC_LAST_UPDATE_TIMER 0x02d0
#define __LC_LAST_UPDATE_CLOCK 0x02d8
#define __LC_CURRENT 0x02e0
#define __LC_THREAD_INFO 0x02e8
#define __LC_KERNEL_STACK 0x02f0
#define __LC_ASYNC_STACK 0x02f8
#define __LC_PANIC_STACK 0x0300
#define __LC_KERNEL_ASCE 0x0308
#define __LC_USER_ASCE 0x0310
#define __LC_USER_EXEC_ASCE 0x0318
#define __LC_CPUID 0x0320
#define __LC_INT_CLOCK 0x0340
#define __LC_VDSO_PER_CPU 0x0350
#define __LC_MACHINE_FLAGS 0x0358
#define __LC_FTRACE_FUNC 0x0360
#define __LC_IRB 0x0380
#define __LC_PASTE 0x03c0
#define __LC_PFAULT_INTPARM 0x11b8
#define __LC_FPREGS_SAVE_AREA 0x1200
#define __LC_GPREGS_SAVE_AREA 0x1280
#define __LC_PSW_SAVE_AREA 0x1300
#define __LC_PREFIX_SAVE_AREA 0x1318
#define __LC_FP_CREG_SAVE_AREA 0x131c
#define __LC_TODREG_SAVE_AREA 0x1324
#define __LC_CPU_TIMER_SAVE_AREA 0x1328
#define __LC_CLOCK_COMP_SAVE_AREA 0x1331
#define __LC_AREGS_SAVE_AREA 0x1340
#define __LC_CREGS_SAVE_AREA 0x1380
#endif /* __s390x__ */
#ifndef __ASSEMBLY__
#include <asm/cpu.h>
#include <asm/ptrace.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/ptrace.h>
#include <asm/cpu.h>
void restart_int_handler(void); void restart_int_handler(void);
void ext_int_handler(void); void ext_int_handler(void);
@ -144,7 +19,12 @@ void pgm_check_handler(void);
void mcck_int_handler(void); void mcck_int_handler(void);
void io_int_handler(void); void io_int_handler(void);
struct save_area_s390 { #ifdef CONFIG_32BIT
#define LC_ORDER 0
#define LC_PAGES 1
struct save_area {
u32 ext_save; u32 ext_save;
u64 timer; u64 timer;
u64 clk_cmp; u64 clk_cmp;
@ -156,54 +36,13 @@ struct save_area_s390 {
u64 fp_regs[4]; u64 fp_regs[4];
u32 gp_regs[16]; u32 gp_regs[16];
u32 ctrl_regs[16]; u32 ctrl_regs[16];
} __attribute__((packed)); } __packed;
struct save_area_s390x { struct _lowcore {
u64 fp_regs[16];
u64 gp_regs[16];
u8 psw[16];
u8 pad1[8];
u32 pref_reg;
u32 fp_ctrl_reg;
u8 pad2[4];
u32 tod_reg;
u64 timer;
u64 clk_cmp;
u8 pad3[8];
u32 acc_regs[16];
u64 ctrl_regs[16];
} __attribute__((packed));
union save_area {
struct save_area_s390 s390;
struct save_area_s390x s390x;
};
#define SAVE_AREA_BASE_S390 0xd4
#define SAVE_AREA_BASE_S390X 0x1200
#ifndef __s390x__
#define SAVE_AREA_SIZE sizeof(struct save_area_s390)
#define SAVE_AREA_BASE SAVE_AREA_BASE_S390
#else
#define SAVE_AREA_SIZE sizeof(struct save_area_s390x)
#define SAVE_AREA_BASE SAVE_AREA_BASE_S390X
#endif
#ifndef __s390x__
#define LC_ORDER 0
#else
#define LC_ORDER 1
#endif
#define LC_PAGES (1UL << LC_ORDER)
struct _lowcore
{
#ifndef __s390x__
/* 0x0000 - 0x01ff: defined by architecture */
psw_t restart_psw; /* 0x0000 */ psw_t restart_psw; /* 0x0000 */
__u32 ccw2[4]; /* 0x0008 */ psw_t restart_old_psw; /* 0x0008 */
__u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */
__u32 ipl_parmblock_ptr; /* 0x0014 */
psw_t external_old_psw; /* 0x0018 */ psw_t external_old_psw; /* 0x0018 */
psw_t svc_old_psw; /* 0x0020 */ psw_t svc_old_psw; /* 0x0020 */
psw_t program_old_psw; /* 0x0028 */ psw_t program_old_psw; /* 0x0028 */
@ -229,7 +68,9 @@ struct _lowcore
__u32 monitor_code; /* 0x009c */ __u32 monitor_code; /* 0x009c */
__u8 exc_access_id; /* 0x00a0 */ __u8 exc_access_id; /* 0x00a0 */
__u8 per_access_id; /* 0x00a1 */ __u8 per_access_id; /* 0x00a1 */
__u8 pad_0x00a2[0x00b8-0x00a2]; /* 0x00a2 */ __u8 op_access_id; /* 0x00a2 */
__u8 ar_access_id; /* 0x00a3 */
__u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */
__u16 subchannel_id; /* 0x00b8 */ __u16 subchannel_id; /* 0x00b8 */
__u16 subchannel_nr; /* 0x00ba */ __u16 subchannel_nr; /* 0x00ba */
__u32 io_int_parm; /* 0x00bc */ __u32 io_int_parm; /* 0x00bc */
@ -245,8 +86,9 @@ struct _lowcore
__u32 external_damage_code; /* 0x00f4 */ __u32 external_damage_code; /* 0x00f4 */
__u32 failing_storage_address; /* 0x00f8 */ __u32 failing_storage_address; /* 0x00f8 */
__u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */ __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */
__u32 st_status_fixed_logout[4]; /* 0x0100 */ psw_t psw_save_area; /* 0x0100 */
__u8 pad_0x0110[0x0120-0x0110]; /* 0x0110 */ __u32 prefixreg_save_area; /* 0x0108 */
__u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */
/* CPU register save area: defined by architecture */ /* CPU register save area: defined by architecture */
__u32 access_regs_save_area[16]; /* 0x0120 */ __u32 access_regs_save_area[16]; /* 0x0120 */
@ -310,10 +152,32 @@ struct _lowcore
/* Align to the top 1k of prefix area */ /* Align to the top 1k of prefix area */
__u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */ __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */
#else /* !__s390x__ */ } __packed;
/* 0x0000 - 0x01ff: defined by architecture */
__u32 ccw1[2]; /* 0x0000 */ #else /* CONFIG_32BIT */
__u32 ccw2[4]; /* 0x0008 */
#define LC_ORDER 1
#define LC_PAGES 2
struct save_area {
u64 fp_regs[16];
u64 gp_regs[16];
u8 psw[16];
u8 pad1[8];
u32 pref_reg;
u32 fp_ctrl_reg;
u8 pad2[4];
u32 tod_reg;
u64 timer;
u64 clk_cmp;
u8 pad3[8];
u32 acc_regs[16];
u64 ctrl_regs[16];
} __packed;
struct _lowcore {
__u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
__u32 ipl_parmblock_ptr; /* 0x0014 */
__u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */ __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */
__u32 ext_params; /* 0x0080 */ __u32 ext_params; /* 0x0080 */
__u16 cpu_addr; /* 0x0084 */ __u16 cpu_addr; /* 0x0084 */
@ -344,7 +208,9 @@ struct _lowcore
__u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
__u32 external_damage_code; /* 0x00f4 */ __u32 external_damage_code; /* 0x00f4 */
addr_t failing_storage_address; /* 0x00f8 */ addr_t failing_storage_address; /* 0x00f8 */
__u8 pad_0x0100[0x0120-0x0100]; /* 0x0100 */ __u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */
__u64 breaking_event_addr; /* 0x0110 */
__u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */
psw_t restart_old_psw; /* 0x0120 */ psw_t restart_old_psw; /* 0x0120 */
psw_t external_old_psw; /* 0x0130 */ psw_t external_old_psw; /* 0x0130 */
psw_t svc_old_psw; /* 0x0140 */ psw_t svc_old_psw; /* 0x0140 */
@ -425,7 +291,7 @@ struct _lowcore
/* CPU register save area: defined by architecture */ /* CPU register save area: defined by architecture */
__u64 floating_pt_save_area[16]; /* 0x1200 */ __u64 floating_pt_save_area[16]; /* 0x1200 */
__u64 gpregs_save_area[16]; /* 0x1280 */ __u64 gpregs_save_area[16]; /* 0x1280 */
__u32 st_status_fixed_logout[4]; /* 0x1300 */ psw_t psw_save_area; /* 0x1300 */
__u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */ __u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */
__u32 prefixreg_save_area; /* 0x1318 */ __u32 prefixreg_save_area; /* 0x1318 */
__u32 fpt_creg_save_area; /* 0x131c */ __u32 fpt_creg_save_area; /* 0x131c */
@ -439,10 +305,12 @@ struct _lowcore
/* align to the top of the prefix area */ /* align to the top of the prefix area */
__u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */ __u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */
#endif /* !__s390x__ */ } __packed;
} __attribute__((packed)); /* End structure*/
#endif /* CONFIG_32BIT */
#define S390_lowcore (*((struct _lowcore *) 0)) #define S390_lowcore (*((struct _lowcore *) 0))
extern struct _lowcore *lowcore_ptr[]; extern struct _lowcore *lowcore_ptr[];
static inline void set_prefix(__u32 address) static inline void set_prefix(__u32 address)
@ -458,6 +326,4 @@ static inline __u32 store_prefix(void)
return address; return address;
} }
#endif #endif /* _ASM_S390_LOWCORE_H */
#endif

View File

@ -107,9 +107,6 @@ typedef pte_t *pgtable_t;
#define __pgd(x) ((pgd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } )
/* default storage key used for all pages */
extern unsigned int default_storage_key;
static inline void static inline void
page_set_storage_key(unsigned long addr, unsigned int skey) page_set_storage_key(unsigned long addr, unsigned int skey)
{ {

View File

@ -28,7 +28,7 @@
static inline void get_cpu_id(struct cpuid *ptr) static inline void get_cpu_id(struct cpuid *ptr)
{ {
asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); asm volatile("stidp %0" : "=Q" (*ptr));
} }
extern void s390_adjust_jiffies(void); extern void s390_adjust_jiffies(void);
@ -184,9 +184,9 @@ static inline void psw_set_key(unsigned int key)
static inline void __load_psw(psw_t psw) static inline void __load_psw(psw_t psw)
{ {
#ifndef __s390x__ #ifndef __s390x__
asm volatile("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); asm volatile("lpsw %0" : : "Q" (psw) : "cc");
#else #else
asm volatile("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); asm volatile("lpswe %0" : : "Q" (psw) : "cc");
#endif #endif
} }
@ -206,17 +206,17 @@ static inline void __load_psw_mask (unsigned long mask)
asm volatile( asm volatile(
" basr %0,0\n" " basr %0,0\n"
"0: ahi %0,1f-0b\n" "0: ahi %0,1f-0b\n"
" st %0,4(%1)\n" " st %0,%O1+4(%R1)\n"
" lpsw 0(%1)\n" " lpsw %1\n"
"1:" "1:"
: "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
#else /* __s390x__ */ #else /* __s390x__ */
asm volatile( asm volatile(
" larl %0,1f\n" " larl %0,1f\n"
" stg %0,8(%1)\n" " stg %0,%O1+8(%R1)\n"
" lpswe 0(%1)\n" " lpswe %1\n"
"1:" "1:"
: "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
#endif /* __s390x__ */ #endif /* __s390x__ */
} }

View File

@ -13,7 +13,8 @@
#include <asm/cio.h> #include <asm/cio.h>
#include <asm/ccwdev.h> #include <asm/ccwdev.h>
#define QDIO_MAX_QUEUES_PER_IRQ 32 /* only use 4 queues to save some cachelines */
#define QDIO_MAX_QUEUES_PER_IRQ 4
#define QDIO_MAX_BUFFERS_PER_Q 128 #define QDIO_MAX_BUFFERS_PER_Q 128
#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1) #define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1)
#define QDIO_MAX_ELEMENTS_PER_BUFFER 16 #define QDIO_MAX_ELEMENTS_PER_BUFFER 16

View File

@ -124,21 +124,21 @@ static inline void __down_read(struct rw_semaphore *sem)
asm volatile( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,%2\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" ahi %1,%5\n" " ahi %1,%4\n"
" cs %0,%1,0(%3)\n" " cs %0,%1,%2\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%3)\n" " lg %0,%2\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" aghi %1,%5\n" " aghi %1,%4\n"
" csg %0,%1,0(%3)\n" " csg %0,%1,%2\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "a" (&sem->count), "m" (sem->count), : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
"i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); : "cc", "memory");
if (old < 0) if (old < 0)
rwsem_down_read_failed(sem); rwsem_down_read_failed(sem);
} }
@ -152,25 +152,25 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
asm volatile( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,%2\n"
"0: ltr %1,%0\n" "0: ltr %1,%0\n"
" jm 1f\n" " jm 1f\n"
" ahi %1,%5\n" " ahi %1,%4\n"
" cs %0,%1,0(%3)\n" " cs %0,%1,%2\n"
" jl 0b\n" " jl 0b\n"
"1:" "1:"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%3)\n" " lg %0,%2\n"
"0: ltgr %1,%0\n" "0: ltgr %1,%0\n"
" jm 1f\n" " jm 1f\n"
" aghi %1,%5\n" " aghi %1,%4\n"
" csg %0,%1,0(%3)\n" " csg %0,%1,%2\n"
" jl 0b\n" " jl 0b\n"
"1:" "1:"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "a" (&sem->count), "m" (sem->count), : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
"i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); : "cc", "memory");
return old >= 0 ? 1 : 0; return old >= 0 ? 1 : 0;
} }
@ -184,20 +184,20 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
tmp = RWSEM_ACTIVE_WRITE_BIAS; tmp = RWSEM_ACTIVE_WRITE_BIAS;
asm volatile( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,%2\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" a %1,%5\n" " a %1,%4\n"
" cs %0,%1,0(%3)\n" " cs %0,%1,%2\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%3)\n" " lg %0,%2\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" ag %1,%5\n" " ag %1,%4\n"
" csg %0,%1,0(%3)\n" " csg %0,%1,%2\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "a" (&sem->count), "m" (sem->count), "m" (tmp) : "Q" (sem->count), "m" (tmp)
: "cc", "memory"); : "cc", "memory");
if (old != 0) if (old != 0)
rwsem_down_write_failed(sem); rwsem_down_write_failed(sem);
@ -217,22 +217,22 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
asm volatile( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%2)\n" " l %0,%1\n"
"0: ltr %0,%0\n" "0: ltr %0,%0\n"
" jnz 1f\n" " jnz 1f\n"
" cs %0,%4,0(%2)\n" " cs %0,%3,%1\n"
" jl 0b\n" " jl 0b\n"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%2)\n" " lg %0,%1\n"
"0: ltgr %0,%0\n" "0: ltgr %0,%0\n"
" jnz 1f\n" " jnz 1f\n"
" csg %0,%4,0(%2)\n" " csg %0,%3,%1\n"
" jl 0b\n" " jl 0b\n"
#endif /* __s390x__ */ #endif /* __s390x__ */
"1:" "1:"
: "=&d" (old), "=m" (sem->count) : "=&d" (old), "=Q" (sem->count)
: "a" (&sem->count), "m" (sem->count), : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
"d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory"); : "cc", "memory");
return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
} }
@ -245,21 +245,20 @@ static inline void __up_read(struct rw_semaphore *sem)
asm volatile( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,%2\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" ahi %1,%5\n" " ahi %1,%4\n"
" cs %0,%1,0(%3)\n" " cs %0,%1,%2\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%3)\n" " lg %0,%2\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" aghi %1,%5\n" " aghi %1,%4\n"
" csg %0,%1,0(%3)\n" " csg %0,%1,%2\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "a" (&sem->count), "m" (sem->count), : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
"i" (-RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory"); : "cc", "memory");
if (new < 0) if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0) if ((new & RWSEM_ACTIVE_MASK) == 0)
@ -276,20 +275,20 @@ static inline void __up_write(struct rw_semaphore *sem)
tmp = -RWSEM_ACTIVE_WRITE_BIAS; tmp = -RWSEM_ACTIVE_WRITE_BIAS;
asm volatile( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,%2\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" a %1,%5\n" " a %1,%4\n"
" cs %0,%1,0(%3)\n" " cs %0,%1,%2\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%3)\n" " lg %0,%2\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" ag %1,%5\n" " ag %1,%4\n"
" csg %0,%1,0(%3)\n" " csg %0,%1,%2\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "a" (&sem->count), "m" (sem->count), "m" (tmp) : "Q" (sem->count), "m" (tmp)
: "cc", "memory"); : "cc", "memory");
if (new < 0) if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0) if ((new & RWSEM_ACTIVE_MASK) == 0)
@ -306,20 +305,20 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
tmp = -RWSEM_WAITING_BIAS; tmp = -RWSEM_WAITING_BIAS;
asm volatile( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,%2\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" a %1,%5\n" " a %1,%4\n"
" cs %0,%1,0(%3)\n" " cs %0,%1,%2\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%3)\n" " lg %0,%2\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" ag %1,%5\n" " ag %1,%4\n"
" csg %0,%1,0(%3)\n" " csg %0,%1,%2\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "a" (&sem->count), "m" (sem->count), "m" (tmp) : "Q" (sem->count), "m" (tmp)
: "cc", "memory"); : "cc", "memory");
if (new > 1) if (new > 1)
rwsem_downgrade_wake(sem); rwsem_downgrade_wake(sem);
@ -334,20 +333,20 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
asm volatile( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,%2\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" ar %1,%5\n" " ar %1,%4\n"
" cs %0,%1,0(%3)\n" " cs %0,%1,%2\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%3)\n" " lg %0,%2\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" agr %1,%5\n" " agr %1,%4\n"
" csg %0,%1,0(%3)\n" " csg %0,%1,%2\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "a" (&sem->count), "m" (sem->count), "d" (delta) : "Q" (sem->count), "d" (delta)
: "cc", "memory"); : "cc", "memory");
} }
@ -360,20 +359,20 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
asm volatile( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,%2\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
" ar %1,%5\n" " ar %1,%4\n"
" cs %0,%1,0(%3)\n" " cs %0,%1,%2\n"
" jl 0b" " jl 0b"
#else /* __s390x__ */ #else /* __s390x__ */
" lg %0,0(%3)\n" " lg %0,%2\n"
"0: lgr %1,%0\n" "0: lgr %1,%0\n"
" agr %1,%5\n" " agr %1,%4\n"
" csg %0,%1,0(%3)\n" " csg %0,%1,%2\n"
" jl 0b" " jl 0b"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "a" (&sem->count), "m" (sem->count), "d" (delta) : "Q" (sem->count), "d" (delta)
: "cc", "memory"); : "cc", "memory");
return new; return new;
} }

View File

@ -14,14 +14,14 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/lowcore.h>
#include <asm/types.h>
#define PARMAREA 0x10400 #define PARMAREA 0x10400
#define MEMORY_CHUNKS 256 #define MEMORY_CHUNKS 256
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/lowcore.h>
#include <asm/types.h>
#ifndef __s390x__ #ifndef __s390x__
#define IPL_DEVICE (*(unsigned long *) (0x10404)) #define IPL_DEVICE (*(unsigned long *) (0x10404))
#define INITRD_START (*(unsigned long *) (0x1040C)) #define INITRD_START (*(unsigned long *) (0x1040C))
@ -71,9 +71,12 @@ extern unsigned int user_mode;
#define MACHINE_FLAG_KVM (1UL << 9) #define MACHINE_FLAG_KVM (1UL << 9)
#define MACHINE_FLAG_HPAGE (1UL << 10) #define MACHINE_FLAG_HPAGE (1UL << 10)
#define MACHINE_FLAG_PFMF (1UL << 11) #define MACHINE_FLAG_PFMF (1UL << 11)
#define MACHINE_FLAG_LPAR (1UL << 12)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) #define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
#ifndef __s390x__ #ifndef __s390x__

View File

@ -1,24 +1,19 @@
/* /*
* include/asm-s390/sigp.h * Routines and structures for signalling other processors.
* *
* S390 version * Copyright IBM Corp. 1999,2010
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Denis Joseph Barrow,
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Martin Schwidefsky (schwidefsky@de.ibm.com) * Heiko Carstens <heiko.carstens@de.ibm.com>,
* Heiko Carstens (heiko.carstens@de.ibm.com)
*
* sigp.h by D.J. Barrow (c) IBM 1999
* contains routines / structures for signalling other S/390 processors in an
* SMP configuration.
*/ */
#ifndef __SIGP__ #ifndef __ASM_SIGP_H
#define __SIGP__ #define __ASM_SIGP_H
#include <asm/system.h> #include <asm/system.h>
/* get real cpu address from logical cpu number */ /* Get real cpu address from logical cpu number. */
extern int __cpu_logical_map[]; extern unsigned short __cpu_logical_map[];
static inline int cpu_logical_map(int cpu) static inline int cpu_logical_map(int cpu)
{ {
@ -29,107 +24,108 @@ static inline int cpu_logical_map(int cpu)
#endif #endif
} }
typedef enum enum {
{ sigp_sense = 1,
sigp_unassigned=0x0, sigp_external_call = 2,
sigp_sense, sigp_emergency_signal = 3,
sigp_external_call, sigp_start = 4,
sigp_emergency_signal, sigp_stop = 5,
sigp_start, sigp_restart = 6,
sigp_stop, sigp_stop_and_store_status = 9,
sigp_restart, sigp_initial_cpu_reset = 11,
sigp_unassigned1, sigp_cpu_reset = 12,
sigp_unassigned2, sigp_set_prefix = 13,
sigp_stop_and_store_status, sigp_store_status_at_address = 14,
sigp_unassigned3, sigp_store_extended_status_at_address = 15,
sigp_initial_cpu_reset, sigp_set_architecture = 18,
sigp_cpu_reset, sigp_conditional_emergency_signal = 19,
sigp_set_prefix, sigp_sense_running = 21,
sigp_store_status_at_address, };
sigp_store_extended_status_at_address
} sigp_order_code;
typedef __u32 sigp_status_word;
typedef enum
{
sigp_order_code_accepted=0,
sigp_status_stored,
sigp_busy,
sigp_not_operational
} sigp_ccode;
enum {
sigp_order_code_accepted = 0,
sigp_status_stored = 1,
sigp_busy = 2,
sigp_not_operational = 3,
};
/* /*
* Definitions for the external call * Definitions for external call.
*/ */
enum {
/* 'Bit' signals, asynchronous */ ec_schedule = 0,
typedef enum
{
ec_schedule=0,
ec_call_function, ec_call_function,
ec_call_function_single, ec_call_function_single,
ec_bit_last };
} ec_bit_sig;
/* /*
* Signal processor * Signal processor.
*/ */
static inline sigp_ccode static inline int raw_sigp(u16 cpu, int order)
signal_processor(__u16 cpu_addr, sigp_order_code order_code)
{ {
register unsigned long reg1 asm ("1") = 0; register unsigned long reg1 asm ("1") = 0;
sigp_ccode ccode; int ccode;
asm volatile( asm volatile(
" sigp %1,%2,0(%3)\n" " sigp %1,%2,0(%3)\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
: "=d" (ccode) : "=d" (ccode)
: "d" (reg1), "d" (cpu_logical_map(cpu_addr)), : "d" (reg1), "d" (cpu),
"a" (order_code) : "cc" , "memory"); "a" (order) : "cc" , "memory");
return ccode; return ccode;
} }
/* /*
* Signal processor with parameter * Signal processor with parameter.
*/ */
static inline sigp_ccode static inline int raw_sigp_p(u32 parameter, u16 cpu, int order)
signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code)
{ {
register unsigned int reg1 asm ("1") = parameter; register unsigned int reg1 asm ("1") = parameter;
sigp_ccode ccode; int ccode;
asm volatile( asm volatile(
" sigp %1,%2,0(%3)\n" " sigp %1,%2,0(%3)\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
: "=d" (ccode) : "=d" (ccode)
: "d" (reg1), "d" (cpu_logical_map(cpu_addr)), : "d" (reg1), "d" (cpu),
"a" (order_code) : "cc" , "memory"); "a" (order) : "cc" , "memory");
return ccode; return ccode;
} }
/* /*
* Signal processor with parameter and return status * Signal processor with parameter and return status.
*/ */
static inline sigp_ccode static inline int raw_sigp_ps(u32 *status, u32 parm, u16 cpu, int order)
signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr,
sigp_order_code order_code)
{ {
register unsigned int reg1 asm ("1") = parameter; register unsigned int reg1 asm ("1") = parm;
sigp_ccode ccode; int ccode;
asm volatile( asm volatile(
" sigp %1,%2,0(%3)\n" " sigp %1,%2,0(%3)\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
: "=d" (ccode), "+d" (reg1) : "=d" (ccode), "+d" (reg1)
: "d" (cpu_logical_map(cpu_addr)), "a" (order_code) : "d" (cpu), "a" (order)
: "cc" , "memory"); : "cc" , "memory");
*statusptr = reg1; *status = reg1;
return ccode; return ccode;
} }
#endif /* __SIGP__ */ static inline int sigp(int cpu, int order)
{
return raw_sigp(cpu_logical_map(cpu), order);
}
static inline int sigp_p(u32 parameter, int cpu, int order)
{
return raw_sigp_p(parameter, cpu_logical_map(cpu), order);
}
static inline int sigp_ps(u32 *status, u32 parm, int cpu, int order)
{
return raw_sigp_ps(status, parm, cpu_logical_map(cpu), order);
}
#endif /* __ASM_SIGP_H */

View File

@ -29,7 +29,43 @@ extern int smp_cpu_polarization[];
extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *);
extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp,
int from, int to);
extern void smp_restart_cpu(void);
/*
* returns 1 if (virtual) cpu is scheduled
* returns 0 otherwise
*/
static inline int smp_vcpu_scheduled(int cpu)
{
u32 status;
switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) {
case sigp_status_stored:
/* Check for running status */
if (status & 0x400)
return 0;
break;
case sigp_not_operational:
return 0;
default:
break;
}
return 1;
}
#else /* CONFIG_SMP */
static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
{
func(data);
}
#define smp_vcpu_scheduled (1)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */

View File

@ -13,8 +13,6 @@
#include <linux/smp.h> #include <linux/smp.h>
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
static inline int static inline int
_raw_compare_and_swap(volatile unsigned int *lock, _raw_compare_and_swap(volatile unsigned int *lock,
unsigned int old, unsigned int new) unsigned int old, unsigned int new)
@ -27,22 +25,6 @@ _raw_compare_and_swap(volatile unsigned int *lock,
return old; return old;
} }
#else /* __GNUC__ */
static inline int
_raw_compare_and_swap(volatile unsigned int *lock,
unsigned int old, unsigned int new)
{
asm volatile(
" cs %0,%3,0(%4)"
: "=d" (old), "=m" (*lock)
: "0" (old), "d" (new), "a" (lock), "m" (*lock)
: "cc", "memory" );
return old;
}
#endif /* __GNUC__ */
/* /*
* Simple spin lock operations. There are two variants, one clears IRQ's * Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not. * on the local processor, one does not.

View File

@ -47,11 +47,11 @@ static inline __u32 __arch_swab32p(const __u32 *x)
asm volatile( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" icm %0,8,3(%1)\n" " icm %0,8,%O1+3(%R1)\n"
" icm %0,4,2(%1)\n" " icm %0,4,%O1+2(%R1)\n"
" icm %0,2,1(%1)\n" " icm %0,2,%O1+1(%R1)\n"
" ic %0,0(%1)" " ic %0,%1"
: "=&d" (result) : "a" (x), "m" (*x) : "cc"); : "=&d" (result) : "Q" (*x) : "cc");
#else /* __s390x__ */ #else /* __s390x__ */
" lrv %0,%1" " lrv %0,%1"
: "=d" (result) : "m" (*x)); : "=d" (result) : "m" (*x));
@ -77,9 +77,9 @@ static inline __u16 __arch_swab16p(const __u16 *x)
asm volatile( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" icm %0,2,1(%1)\n" " icm %0,2,%O+1(%R1)\n"
" ic %0,0(%1)\n" " ic %0,%1\n"
: "=&d" (result) : "a" (x), "m" (*x) : "cc"); : "=&d" (result) : "Q" (*x) : "cc");
#else /* __s390x__ */ #else /* __s390x__ */
" lrvh %0,%1" " lrvh %0,%1"
: "=d" (result) : "m" (*x)); : "=d" (result) : "m" (*x));

View File

@ -87,7 +87,8 @@ struct sysinfo_2_2_2 {
struct sysinfo_3_2_2 { struct sysinfo_3_2_2 {
char reserved_0[31]; char reserved_0[31];
unsigned char count; unsigned char :4;
unsigned char count:4;
struct { struct {
char reserved_0[4]; char reserved_0[4];
unsigned short cpus_total; unsigned short cpus_total;

View File

@ -24,65 +24,65 @@ extern struct task_struct *__switch_to(void *, void *);
static inline void save_fp_regs(s390_fp_regs *fpregs) static inline void save_fp_regs(s390_fp_regs *fpregs)
{ {
asm volatile( asm volatile(
" std 0,8(%1)\n" " std 0,%O0+8(%R0)\n"
" std 2,24(%1)\n" " std 2,%O0+24(%R0)\n"
" std 4,40(%1)\n" " std 4,%O0+40(%R0)\n"
" std 6,56(%1)" " std 6,%O0+56(%R0)"
: "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); : "=Q" (*fpregs) : "Q" (*fpregs));
if (!MACHINE_HAS_IEEE) if (!MACHINE_HAS_IEEE)
return; return;
asm volatile( asm volatile(
" stfpc 0(%1)\n" " stfpc %0\n"
" std 1,16(%1)\n" " std 1,%O0+16(%R0)\n"
" std 3,32(%1)\n" " std 3,%O0+32(%R0)\n"
" std 5,48(%1)\n" " std 5,%O0+48(%R0)\n"
" std 7,64(%1)\n" " std 7,%O0+64(%R0)\n"
" std 8,72(%1)\n" " std 8,%O0+72(%R0)\n"
" std 9,80(%1)\n" " std 9,%O0+80(%R0)\n"
" std 10,88(%1)\n" " std 10,%O0+88(%R0)\n"
" std 11,96(%1)\n" " std 11,%O0+96(%R0)\n"
" std 12,104(%1)\n" " std 12,%O0+104(%R0)\n"
" std 13,112(%1)\n" " std 13,%O0+112(%R0)\n"
" std 14,120(%1)\n" " std 14,%O0+120(%R0)\n"
" std 15,128(%1)\n" " std 15,%O0+128(%R0)\n"
: "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); : "=Q" (*fpregs) : "Q" (*fpregs));
} }
static inline void restore_fp_regs(s390_fp_regs *fpregs) static inline void restore_fp_regs(s390_fp_regs *fpregs)
{ {
asm volatile( asm volatile(
" ld 0,8(%0)\n" " ld 0,%O0+8(%R0)\n"
" ld 2,24(%0)\n" " ld 2,%O0+24(%R0)\n"
" ld 4,40(%0)\n" " ld 4,%O0+40(%R0)\n"
" ld 6,56(%0)" " ld 6,%O0+56(%R0)"
: : "a" (fpregs), "m" (*fpregs)); : : "Q" (*fpregs));
if (!MACHINE_HAS_IEEE) if (!MACHINE_HAS_IEEE)
return; return;
asm volatile( asm volatile(
" lfpc 0(%0)\n" " lfpc %0\n"
" ld 1,16(%0)\n" " ld 1,%O0+16(%R0)\n"
" ld 3,32(%0)\n" " ld 3,%O0+32(%R0)\n"
" ld 5,48(%0)\n" " ld 5,%O0+48(%R0)\n"
" ld 7,64(%0)\n" " ld 7,%O0+64(%R0)\n"
" ld 8,72(%0)\n" " ld 8,%O0+72(%R0)\n"
" ld 9,80(%0)\n" " ld 9,%O0+80(%R0)\n"
" ld 10,88(%0)\n" " ld 10,%O0+88(%R0)\n"
" ld 11,96(%0)\n" " ld 11,%O0+96(%R0)\n"
" ld 12,104(%0)\n" " ld 12,%O0+104(%R0)\n"
" ld 13,112(%0)\n" " ld 13,%O0+112(%R0)\n"
" ld 14,120(%0)\n" " ld 14,%O0+120(%R0)\n"
" ld 15,128(%0)\n" " ld 15,%O0+128(%R0)\n"
: : "a" (fpregs), "m" (*fpregs)); : : "Q" (*fpregs));
} }
static inline void save_access_regs(unsigned int *acrs) static inline void save_access_regs(unsigned int *acrs)
{ {
asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory"); asm volatile("stam 0,15,%0" : "=Q" (*acrs));
} }
static inline void restore_access_regs(unsigned int *acrs) static inline void restore_access_regs(unsigned int *acrs)
{ {
asm volatile("lam 0,15,0(%0)" : : "a" (acrs)); asm volatile("lam 0,15,%0" : : "Q" (*acrs));
} }
#define switch_to(prev,next,last) do { \ #define switch_to(prev,next,last) do { \
@ -139,48 +139,48 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
shift = (3 ^ (addr & 3)) << 3; shift = (3 ^ (addr & 3)) << 3;
addr ^= addr & 3; addr ^= addr & 3;
asm volatile( asm volatile(
" l %0,0(%4)\n" " l %0,%4\n"
"0: lr 0,%0\n" "0: lr 0,%0\n"
" nr 0,%3\n" " nr 0,%3\n"
" or 0,%2\n" " or 0,%2\n"
" cs %0,0,0(%4)\n" " cs %0,0,%4\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (old), "=m" (*(int *) addr) : "=&d" (old), "=Q" (*(int *) addr)
: "d" (x << shift), "d" (~(255 << shift)), "a" (addr), : "d" (x << shift), "d" (~(255 << shift)),
"m" (*(int *) addr) : "memory", "cc", "0"); "Q" (*(int *) addr) : "memory", "cc", "0");
return old >> shift; return old >> shift;
case 2: case 2:
addr = (unsigned long) ptr; addr = (unsigned long) ptr;
shift = (2 ^ (addr & 2)) << 3; shift = (2 ^ (addr & 2)) << 3;
addr ^= addr & 2; addr ^= addr & 2;
asm volatile( asm volatile(
" l %0,0(%4)\n" " l %0,%4\n"
"0: lr 0,%0\n" "0: lr 0,%0\n"
" nr 0,%3\n" " nr 0,%3\n"
" or 0,%2\n" " or 0,%2\n"
" cs %0,0,0(%4)\n" " cs %0,0,%4\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (old), "=m" (*(int *) addr) : "=&d" (old), "=Q" (*(int *) addr)
: "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), : "d" (x << shift), "d" (~(65535 << shift)),
"m" (*(int *) addr) : "memory", "cc", "0"); "Q" (*(int *) addr) : "memory", "cc", "0");
return old >> shift; return old >> shift;
case 4: case 4:
asm volatile( asm volatile(
" l %0,0(%3)\n" " l %0,%3\n"
"0: cs %0,%2,0(%3)\n" "0: cs %0,%2,%3\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (old), "=m" (*(int *) ptr) : "=&d" (old), "=Q" (*(int *) ptr)
: "d" (x), "a" (ptr), "m" (*(int *) ptr) : "d" (x), "Q" (*(int *) ptr)
: "memory", "cc"); : "memory", "cc");
return old; return old;
#ifdef __s390x__ #ifdef __s390x__
case 8: case 8:
asm volatile( asm volatile(
" lg %0,0(%3)\n" " lg %0,%3\n"
"0: csg %0,%2,0(%3)\n" "0: csg %0,%2,%3\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (old), "=m" (*(long *) ptr) : "=&d" (old), "=m" (*(long *) ptr)
: "d" (x), "a" (ptr), "m" (*(long *) ptr) : "d" (x), "Q" (*(long *) ptr)
: "memory", "cc"); : "memory", "cc");
return old; return old;
#endif /* __s390x__ */ #endif /* __s390x__ */
@ -215,20 +215,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
shift = (3 ^ (addr & 3)) << 3; shift = (3 ^ (addr & 3)) << 3;
addr ^= addr & 3; addr ^= addr & 3;
asm volatile( asm volatile(
" l %0,0(%4)\n" " l %0,%2\n"
"0: nr %0,%5\n" "0: nr %0,%5\n"
" lr %1,%0\n" " lr %1,%0\n"
" or %0,%2\n" " or %0,%2\n"
" or %1,%3\n" " or %1,%3\n"
" cs %0,%1,0(%4)\n" " cs %0,%1,%2\n"
" jnl 1f\n" " jnl 1f\n"
" xr %1,%0\n" " xr %1,%0\n"
" nr %1,%5\n" " nr %1,%5\n"
" jnz 0b\n" " jnz 0b\n"
"1:" "1:"
: "=&d" (prev), "=&d" (tmp) : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
: "d" (old << shift), "d" (new << shift), "a" (ptr), : "d" (old << shift), "d" (new << shift),
"d" (~(255 << shift)) "d" (~(255 << shift)), "Q" (*(int *) ptr)
: "memory", "cc"); : "memory", "cc");
return prev >> shift; return prev >> shift;
case 2: case 2:
@ -236,33 +236,35 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
shift = (2 ^ (addr & 2)) << 3; shift = (2 ^ (addr & 2)) << 3;
addr ^= addr & 2; addr ^= addr & 2;
asm volatile( asm volatile(
" l %0,0(%4)\n" " l %0,%2\n"
"0: nr %0,%5\n" "0: nr %0,%5\n"
" lr %1,%0\n" " lr %1,%0\n"
" or %0,%2\n" " or %0,%2\n"
" or %1,%3\n" " or %1,%3\n"
" cs %0,%1,0(%4)\n" " cs %0,%1,%2\n"
" jnl 1f\n" " jnl 1f\n"
" xr %1,%0\n" " xr %1,%0\n"
" nr %1,%5\n" " nr %1,%5\n"
" jnz 0b\n" " jnz 0b\n"
"1:" "1:"
: "=&d" (prev), "=&d" (tmp) : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
: "d" (old << shift), "d" (new << shift), "a" (ptr), : "d" (old << shift), "d" (new << shift),
"d" (~(65535 << shift)) "d" (~(65535 << shift)), "Q" (*(int *) ptr)
: "memory", "cc"); : "memory", "cc");
return prev >> shift; return prev >> shift;
case 4: case 4:
asm volatile( asm volatile(
" cs %0,%2,0(%3)\n" " cs %0,%3,%1\n"
: "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) : "=&d" (prev), "=Q" (*(int *) ptr)
: "0" (old), "d" (new), "Q" (*(int *) ptr)
: "memory", "cc"); : "memory", "cc");
return prev; return prev;
#ifdef __s390x__ #ifdef __s390x__
case 8: case 8:
asm volatile( asm volatile(
" csg %0,%2,0(%3)\n" " csg %0,%3,%1\n"
: "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) : "=&d" (prev), "=Q" (*(long *) ptr)
: "0" (old), "d" (new), "Q" (*(long *) ptr)
: "memory", "cc"); : "memory", "cc");
return prev; return prev;
#endif /* __s390x__ */ #endif /* __s390x__ */
@ -302,17 +304,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#define __ctl_load(array, low, high) ({ \ #define __ctl_load(array, low, high) ({ \
typedef struct { char _[sizeof(array)]; } addrtype; \ typedef struct { char _[sizeof(array)]; } addrtype; \
asm volatile( \ asm volatile( \
" lctlg %1,%2,0(%0)\n" \ " lctlg %1,%2,%0\n" \
: : "a" (&array), "i" (low), "i" (high), \ : : "Q" (*(addrtype *)(&array)), \
"m" (*(addrtype *)(&array))); \ "i" (low), "i" (high)); \
}) })
#define __ctl_store(array, low, high) ({ \ #define __ctl_store(array, low, high) ({ \
typedef struct { char _[sizeof(array)]; } addrtype; \ typedef struct { char _[sizeof(array)]; } addrtype; \
asm volatile( \ asm volatile( \
" stctg %2,%3,0(%1)\n" \ " stctg %1,%2,%0\n" \
: "=m" (*(addrtype *)(&array)) \ : "=Q" (*(addrtype *)(&array)) \
: "a" (&array), "i" (low), "i" (high)); \ : "i" (low), "i" (high)); \
}) })
#else /* __s390x__ */ #else /* __s390x__ */
@ -320,17 +322,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#define __ctl_load(array, low, high) ({ \ #define __ctl_load(array, low, high) ({ \
typedef struct { char _[sizeof(array)]; } addrtype; \ typedef struct { char _[sizeof(array)]; } addrtype; \
asm volatile( \ asm volatile( \
" lctl %1,%2,0(%0)\n" \ " lctl %1,%2,%0\n" \
: : "a" (&array), "i" (low), "i" (high), \ : : "Q" (*(addrtype *)(&array)), \
"m" (*(addrtype *)(&array))); \ "i" (low), "i" (high)); \
}) })
#define __ctl_store(array, low, high) ({ \ #define __ctl_store(array, low, high) ({ \
typedef struct { char _[sizeof(array)]; } addrtype; \ typedef struct { char _[sizeof(array)]; } addrtype; \
asm volatile( \ asm volatile( \
" stctl %2,%3,0(%1)\n" \ " stctl %1,%2,%0\n" \
: "=m" (*(addrtype *)(&array)) \ : "=Q" (*(addrtype *)(&array)) \
: "a" (&array), "i" (low), "i" (high)); \ : "i" (low), "i" (high)); \
}) })
#endif /* __s390x__ */ #endif /* __s390x__ */

View File

@ -73,7 +73,7 @@ struct thread_info {
/* how to get the thread information struct from C */ /* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
{ {
return (struct thread_info *)((*(unsigned long *) __LC_KERNEL_STACK)-THREAD_SIZE); return (struct thread_info *)(S390_lowcore.kernel_stack - THREAD_SIZE);
} }
#define THREAD_SIZE_ORDER THREAD_ORDER #define THREAD_SIZE_ORDER THREAD_ORDER

View File

@ -20,10 +20,10 @@ static inline int set_clock(__u64 time)
int cc; int cc;
asm volatile( asm volatile(
" sck 0(%2)\n" " sck %1\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
: "=d" (cc) : "m" (time), "a" (&time) : "cc"); : "=d" (cc) : "Q" (time) : "cc");
return cc; return cc;
} }
@ -32,21 +32,21 @@ static inline int store_clock(__u64 *time)
int cc; int cc;
asm volatile( asm volatile(
" stck 0(%2)\n" " stck %1\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
: "=d" (cc), "=m" (*time) : "a" (time) : "cc"); : "=d" (cc), "=Q" (*time) : : "cc");
return cc; return cc;
} }
static inline void set_clock_comparator(__u64 time) static inline void set_clock_comparator(__u64 time)
{ {
asm volatile("sckc 0(%1)" : : "m" (time), "a" (&time)); asm volatile("sckc %0" : : "Q" (time));
} }
static inline void store_clock_comparator(__u64 *time) static inline void store_clock_comparator(__u64 *time)
{ {
asm volatile("stckc 0(%1)" : "=m" (*time) : "a" (time)); asm volatile("stckc %0" : "=Q" (*time));
} }
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
@ -57,11 +57,7 @@ static inline unsigned long long get_clock (void)
{ {
unsigned long long clk; unsigned long long clk;
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
asm volatile("stck %0" : "=Q" (clk) : : "cc"); asm volatile("stck %0" : "=Q" (clk) : : "cc");
#else /* __GNUC__ */
asm volatile("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc");
#endif /* __GNUC__ */
return clk; return clk;
} }
@ -69,13 +65,7 @@ static inline unsigned long long get_clock_xt(void)
{ {
unsigned char clk[16]; unsigned char clk[16];
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
asm volatile("stcke %0" : "=Q" (clk) : : "cc"); asm volatile("stcke %0" : "=Q" (clk) : : "cc");
#else /* __GNUC__ */
asm volatile("stcke 0(%1)" : "=m" (clk)
: "a" (clk) : "cc");
#endif /* __GNUC__ */
return *((unsigned long long *)&clk[1]); return *((unsigned long long *)&clk[1]);
} }

View File

@ -265,6 +265,12 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
return uaccess.copy_from_user(n, from, to); return uaccess.copy_from_user(n, from, to);
} }
extern void copy_from_user_overflow(void)
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
__compiletime_warning("copy_from_user() buffer size is not provably correct")
#endif
;
/** /**
* copy_from_user: - Copy a block of data from user space. * copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space. * @to: Destination address, in kernel space.
@ -284,7 +290,13 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static inline unsigned long __must_check static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n) copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
unsigned int sz = __compiletime_object_size(to);
might_fault(); might_fault();
if (unlikely(sz != -1 && sz < n)) {
copy_from_user_overflow();
return n;
}
if (access_ok(VERIFY_READ, from, n)) if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n); n = __copy_from_user(to, from, n);
else else

View File

@ -7,7 +7,7 @@
#define VDSO32_LBASE 0 #define VDSO32_LBASE 0
#define VDSO64_LBASE 0 #define VDSO64_LBASE 0
#define VDSO_VERSION_STRING LINUX_2.6.26 #define VDSO_VERSION_STRING LINUX_2.6.29
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__

View File

@ -29,9 +29,12 @@ obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
extra-y += head.o init_task.o vmlinux.lds extra-y += head.o init_task.o vmlinux.lds
extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o topology.o obj-$(CONFIG_SMP) += smp.o topology.o
obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \
switch_cpu.o)
obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
obj-$(CONFIG_AUDIT) += audit.o obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o compat-obj-$(CONFIG_AUDIT) += compat_audit.o

View File

@ -4,18 +4,27 @@
* and format the required data. * and format the required data.
*/ */
#include <linux/sched.h> #define ASM_OFFSETS_C
#include <linux/kbuild.h> #include <linux/kbuild.h>
#include <linux/sched.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/sigp.h> #include <asm/sigp.h>
/*
* Make sure that the compiler is new enough. We want a compiler that
* is known to work with the "Q" assembler constraint.
*/
#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
#error Your compiler is too old; please use version 3.3.3 or newer
#endif
int main(void) int main(void)
{ {
DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info)); DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
DEFINE(__THREAD_mm_segment, DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
offsetof(struct task_struct, thread.mm_segment));
BLANK(); BLANK();
DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
BLANK(); BLANK();
@ -52,18 +61,94 @@ int main(void)
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
DEFINE(__VDSO_ECTG_BASE, DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
offsetof(struct vdso_per_cpu_data, ectg_timer_base)); DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
DEFINE(__VDSO_ECTG_USER,
offsetof(struct vdso_per_cpu_data, ectg_user_time));
/* constants used by the vdso */ /* constants used by the vdso */
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
BLANK();
/* constants for SIGP */ /* constants for SIGP */
DEFINE(__SIGP_STOP, sigp_stop); DEFINE(__SIGP_STOP, sigp_stop);
DEFINE(__SIGP_RESTART, sigp_restart); DEFINE(__SIGP_RESTART, sigp_restart);
DEFINE(__SIGP_SENSE, sigp_sense); DEFINE(__SIGP_SENSE, sigp_sense);
DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset); DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset);
BLANK();
/* lowcore offsets */
DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
DEFINE(__LC_CPU_ADDRESS, offsetof(struct _lowcore, cpu_addr));
DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code));
DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc));
DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid));
DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id));
DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm));
DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
BLANK();
DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area));
DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw));
DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer));
DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer));
DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer));
DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer));
DEFINE(__LC_STEAL_TIMER, offsetof(struct _lowcore, steal_timer));
DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer));
DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock));
DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task));
DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info));
DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce));
DEFINE(__LC_CPUID, offsetof(struct _lowcore, cpu_id));
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));
DEFINE(__LC_PREFIX_SAVE_AREA, offsetof(struct _lowcore, prefixreg_save_area));
DEFINE(__LC_AREGS_SAVE_AREA, offsetof(struct _lowcore, access_regs_save_area));
DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
#ifdef CONFIG_32BIT
DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params));
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
#else /* CONFIG_32BIT */
DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params2));
DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
#endif /* CONFIG_32BIT */
return 0; return 0;
} }

View File

@ -6,8 +6,8 @@
* Michael Holzheu <holzheu@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com>
*/ */
#include <asm/asm-offsets.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/lowcore.h>
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT

View File

@ -86,10 +86,17 @@ enum {
U4_12, /* 4 bit unsigned value starting at 12 */ U4_12, /* 4 bit unsigned value starting at 12 */
U4_16, /* 4 bit unsigned value starting at 16 */ U4_16, /* 4 bit unsigned value starting at 16 */
U4_20, /* 4 bit unsigned value starting at 20 */ U4_20, /* 4 bit unsigned value starting at 20 */
U4_32, /* 4 bit unsigned value starting at 32 */
U8_8, /* 8 bit unsigned value starting at 8 */ U8_8, /* 8 bit unsigned value starting at 8 */
U8_16, /* 8 bit unsigned value starting at 16 */ U8_16, /* 8 bit unsigned value starting at 16 */
U8_24, /* 8 bit unsigned value starting at 24 */
U8_32, /* 8 bit unsigned value starting at 32 */
I8_8, /* 8 bit signed value starting at 8 */
I8_32, /* 8 bit signed value starting at 32 */
I16_16, /* 16 bit signed value starting at 16 */ I16_16, /* 16 bit signed value starting at 16 */
I16_32, /* 32 bit signed value starting at 16 */
U16_16, /* 16 bit unsigned value starting at 16 */ U16_16, /* 16 bit unsigned value starting at 16 */
U16_32, /* 32 bit unsigned value starting at 16 */
J16_16, /* PC relative jump offset at 16 */ J16_16, /* PC relative jump offset at 16 */
J32_16, /* PC relative long offset at 16 */ J32_16, /* PC relative long offset at 16 */
I32_16, /* 32 bit signed value starting at 16 */ I32_16, /* 32 bit signed value starting at 16 */
@ -104,21 +111,37 @@ enum {
*/ */
enum { enum {
INSTR_INVALID, INSTR_INVALID,
INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_E,
INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU,
INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU,
INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP,
INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU,
INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP,
INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0, INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0,
INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR, INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF,
INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRE_RR, INSTR_RRE_RR_OPT,
INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR,
INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR,
INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR,
INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP, INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD,
INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSI_RRP,
INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RSL_R0RD,
INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD, INSTR_RS_RURD,
INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_RXE_FRRD, INSTR_RXE_RRRD,
INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, INSTR_RXF_FRRDF,
INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD,
INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD,
INSTR_SIL_RDI, INSTR_SIL_RDU,
INSTR_SIY_IRD, INSTR_SIY_URD,
INSTR_SI_URD,
INSTR_SSE_RDRD,
INSTR_SSF_RRDRD,
INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
INSTR_S_00, INSTR_S_RD, INSTR_S_00, INSTR_S_RD,
}; };
@ -129,7 +152,7 @@ struct operand {
}; };
struct insn { struct insn {
const char name[5]; const char name[6];
unsigned char opfrag; unsigned char opfrag;
unsigned char format; unsigned char format;
}; };
@ -170,11 +193,16 @@ static const struct operand operands[] =
[U4_12] = { 4, 12, 0 }, [U4_12] = { 4, 12, 0 },
[U4_16] = { 4, 16, 0 }, [U4_16] = { 4, 16, 0 },
[U4_20] = { 4, 20, 0 }, [U4_20] = { 4, 20, 0 },
[U4_32] = { 4, 32, 0 },
[U8_8] = { 8, 8, 0 }, [U8_8] = { 8, 8, 0 },
[U8_16] = { 8, 16, 0 }, [U8_16] = { 8, 16, 0 },
[U8_24] = { 8, 24, 0 },
[U8_32] = { 8, 32, 0 },
[I16_16] = { 16, 16, OPERAND_SIGNED }, [I16_16] = { 16, 16, OPERAND_SIGNED },
[U16_16] = { 16, 16, 0 }, [U16_16] = { 16, 16, 0 },
[U16_32] = { 16, 32, 0 },
[J16_16] = { 16, 16, OPERAND_PCREL }, [J16_16] = { 16, 16, OPERAND_PCREL },
[I16_32] = { 16, 32, OPERAND_SIGNED },
[J32_16] = { 32, 16, OPERAND_PCREL }, [J32_16] = { 32, 16, OPERAND_PCREL },
[I32_16] = { 32, 16, OPERAND_SIGNED }, [I32_16] = { 32, 16, OPERAND_SIGNED },
[U32_16] = { 32, 16, 0 }, [U32_16] = { 32, 16, 0 },
@ -183,82 +211,93 @@ static const struct operand operands[] =
}; };
static const unsigned char formats[][7] = { static const unsigned char formats[][7] = {
[INSTR_E] = { 0xff, 0,0,0,0,0,0 }, /* e.g. pr */ [INSTR_E] = { 0xff, 0,0,0,0,0,0 },
[INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxhg */ [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 },
[INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, /* e.g. brasl */ [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 },
[INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, /* e.g. brcl */ [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
[INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, /* e.g. afi */ [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 },
[INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, /* e.g. alfi */ [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 },
[INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, /* e.g. ahi */ [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 },
[INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, /* e.g. brct */ [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 },
[INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, /* e.g. tml */ [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 },
[INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, /* e.g. brc */ [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 },
[INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. palb */ [INSTR_RIS_R0RDU] = { 0xff, R_8,U8_32,D_20,B_16,0,0 },
[INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, /* e.g. tb */ [INSTR_RIS_RURDI] = { 0xff, R_8,I8_32,U4_12,D_20,B_16,0 },
[INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, /* e.g. cpya */ [INSTR_RIS_RURDU] = { 0xff, R_8,U8_32,U4_12,D_20,B_16,0 },
[INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, /* e.g. sar */ [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 },
[INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, /* e.g. sqer */ [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 },
[INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, /* e.g. debr */ [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 },
[INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, /* e.g. ipm */ [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 },
[INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, /* e.g. ear */ [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 },
[INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, /* e.g. cefbr */ [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 },
[INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, /* e.g. lura */ [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 },
[INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, /* efpc, sfpc */ [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 },
[INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 },
[INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 },
[INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ [INSTR_RRE_FR] = { 0xff, F_24,R_28,0,0,0,0 },
[INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, /* e.g. idte */ [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 },
[INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 },
[INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 },
[INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 },
[INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, /* e.g. adr */ [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 },
[INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, /* e.g. spm */ [INSTR_RRF_0UFF] = { 0xff, F_24,F_28,U4_20,0,0,0 },
[INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, /* e.g. lr */ [INSTR_RRF_F0FF2] = { 0xff, F_24,F_16,F_28,0,0,0 },
[INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, /* e.g. svc */ [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 },
[INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, /* e.g. bcr */ [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 },
[INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. lmh */ [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 },
[INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lmh */ [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },
[INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh */ [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 },
[INSTR_RSL_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. tp */ [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 },
[INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxh */ [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },
[INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy */ [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 },
[INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 },
[INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 },
[INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 },
[INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 },
[INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 },
[INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 },
[INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 },
[INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 },
[INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 },
[INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 },
[INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
[INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
[INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
[INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
[INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 },
[INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
[INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
[INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
[INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
/* e.g. icmh */ [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
[INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy */ [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
[INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy */ [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 },
[INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, /* e.g. lam */ [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
[INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lctl */ [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
[INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. sll */ [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
[INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. cs */ [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
[INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm */
[INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. axbr */
[INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. lg */
[INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
/* e.g. madb */ [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },
[INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly */ [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },
[INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley */ [INSTR_RXY_URRD] = { 0xff, U4_8,D20_20,X_12,B_16,0,0 },
[INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. ae */ [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
[INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. l */ [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
[INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc */ [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 },
[INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, /* e.g. cli */ [INSTR_SIL_RDI] = { 0xff, D_20,B_16,I16_32,0,0,0 },
[INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, /* e.g. tmy */ [INSTR_SIL_RDU] = { 0xff, D_20,B_16,U16_32,0,0,0 },
[INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */ [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 },
[INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 },
/* e.g. mvc */ [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 },
[INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 },
/* e.g. srp */
[INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
/* e.g. pack */
[INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
/* e.g. mvck */
[INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 },
/* e.g. plo */
[INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 },
/* e.g. lmd */
[INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. hsch */
[INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, /* e.g. lpsw */
[INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
/* e.g. mvcos */ [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
[INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
[INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
[INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 },
[INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 },
[INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
[INSTR_S_00] = { 0xff, 0,0,0,0,0,0 },
[INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 },
}; };
static struct insn opcode[] = { static struct insn opcode[] = {
@ -454,6 +493,8 @@ static struct insn opcode[] = {
static struct insn opcode_01[] = { static struct insn opcode_01[] = {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
{ "sam64", 0x0e, INSTR_E }, { "sam64", 0x0e, INSTR_E },
{ "pfpo", 0x0a, INSTR_E },
{ "ptff", 0x04, INSTR_E },
#endif #endif
{ "pr", 0x01, INSTR_E }, { "pr", 0x01, INSTR_E },
{ "upt", 0x02, INSTR_E }, { "upt", 0x02, INSTR_E },
@ -519,6 +560,8 @@ static struct insn opcode_b2[] = {
{ "cutfu", 0xa7, INSTR_RRF_M0RR }, { "cutfu", 0xa7, INSTR_RRF_M0RR },
{ "stfle", 0xb0, INSTR_S_RD }, { "stfle", 0xb0, INSTR_S_RD },
{ "lpswe", 0xb2, INSTR_S_RD }, { "lpswe", 0xb2, INSTR_S_RD },
{ "srnmt", 0xb9, INSTR_S_RD },
{ "lfas", 0xbd, INSTR_S_RD },
#endif #endif
{ "stidp", 0x02, INSTR_S_RD }, { "stidp", 0x02, INSTR_S_RD },
{ "sck", 0x04, INSTR_S_RD }, { "sck", 0x04, INSTR_S_RD },
@ -589,7 +632,6 @@ static struct insn opcode_b2[] = {
{ "clst", 0x5d, INSTR_RRE_RR }, { "clst", 0x5d, INSTR_RRE_RR },
{ "srst", 0x5e, INSTR_RRE_RR }, { "srst", 0x5e, INSTR_RRE_RR },
{ "cmpsc", 0x63, INSTR_RRE_RR }, { "cmpsc", 0x63, INSTR_RRE_RR },
{ "cmpsc", 0x63, INSTR_RRE_RR },
{ "siga", 0x74, INSTR_S_RD }, { "siga", 0x74, INSTR_S_RD },
{ "xsch", 0x76, INSTR_S_00 }, { "xsch", 0x76, INSTR_S_00 },
{ "rp", 0x77, INSTR_S_RD }, { "rp", 0x77, INSTR_S_RD },
@ -630,6 +672,57 @@ static struct insn opcode_b3[] = {
{ "cger", 0xc8, INSTR_RRF_U0RF }, { "cger", 0xc8, INSTR_RRF_U0RF },
{ "cgdr", 0xc9, INSTR_RRF_U0RF }, { "cgdr", 0xc9, INSTR_RRF_U0RF },
{ "cgxr", 0xca, INSTR_RRF_U0RF }, { "cgxr", 0xca, INSTR_RRF_U0RF },
{ "lpdfr", 0x70, INSTR_RRE_FF },
{ "lndfr", 0x71, INSTR_RRE_FF },
{ "cpsdr", 0x72, INSTR_RRF_F0FF2 },
{ "lcdfr", 0x73, INSTR_RRE_FF },
{ "ldgr", 0xc1, INSTR_RRE_FR },
{ "lgdr", 0xcd, INSTR_RRE_RF },
{ "adtr", 0xd2, INSTR_RRR_F0FF },
{ "axtr", 0xda, INSTR_RRR_F0FF },
{ "cdtr", 0xe4, INSTR_RRE_FF },
{ "cxtr", 0xec, INSTR_RRE_FF },
{ "kdtr", 0xe0, INSTR_RRE_FF },
{ "kxtr", 0xe8, INSTR_RRE_FF },
{ "cedtr", 0xf4, INSTR_RRE_FF },
{ "cextr", 0xfc, INSTR_RRE_FF },
{ "cdgtr", 0xf1, INSTR_RRE_FR },
{ "cxgtr", 0xf9, INSTR_RRE_FR },
{ "cdstr", 0xf3, INSTR_RRE_FR },
{ "cxstr", 0xfb, INSTR_RRE_FR },
{ "cdutr", 0xf2, INSTR_RRE_FR },
{ "cxutr", 0xfa, INSTR_RRE_FR },
{ "cgdtr", 0xe1, INSTR_RRF_U0RF },
{ "cgxtr", 0xe9, INSTR_RRF_U0RF },
{ "csdtr", 0xe3, INSTR_RRE_RF },
{ "csxtr", 0xeb, INSTR_RRE_RF },
{ "cudtr", 0xe2, INSTR_RRE_RF },
{ "cuxtr", 0xea, INSTR_RRE_RF },
{ "ddtr", 0xd1, INSTR_RRR_F0FF },
{ "dxtr", 0xd9, INSTR_RRR_F0FF },
{ "eedtr", 0xe5, INSTR_RRE_RF },
{ "eextr", 0xed, INSTR_RRE_RF },
{ "esdtr", 0xe7, INSTR_RRE_RF },
{ "esxtr", 0xef, INSTR_RRE_RF },
{ "iedtr", 0xf6, INSTR_RRF_F0FR },
{ "iextr", 0xfe, INSTR_RRF_F0FR },
{ "ltdtr", 0xd6, INSTR_RRE_FF },
{ "ltxtr", 0xde, INSTR_RRE_FF },
{ "fidtr", 0xd7, INSTR_RRF_UUFF },
{ "fixtr", 0xdf, INSTR_RRF_UUFF },
{ "ldetr", 0xd4, INSTR_RRF_0UFF },
{ "lxdtr", 0xdc, INSTR_RRF_0UFF },
{ "ledtr", 0xd5, INSTR_RRF_UUFF },
{ "ldxtr", 0xdd, INSTR_RRF_UUFF },
{ "mdtr", 0xd0, INSTR_RRR_F0FF },
{ "mxtr", 0xd8, INSTR_RRR_F0FF },
{ "qadtr", 0xf5, INSTR_RRF_FUFF },
{ "qaxtr", 0xfd, INSTR_RRF_FUFF },
{ "rrdtr", 0xf7, INSTR_RRF_FFRU },
{ "rrxtr", 0xff, INSTR_RRF_FFRU },
{ "sfasr", 0x85, INSTR_RRE_R0 },
{ "sdtr", 0xd3, INSTR_RRR_F0FF },
{ "sxtr", 0xdb, INSTR_RRR_F0FF },
#endif #endif
{ "lpebr", 0x00, INSTR_RRE_FF }, { "lpebr", 0x00, INSTR_RRE_FF },
{ "lnebr", 0x01, INSTR_RRE_FF }, { "lnebr", 0x01, INSTR_RRE_FF },
@ -780,6 +873,14 @@ static struct insn opcode_b9[] = {
{ "cu24", 0xb1, INSTR_RRF_M0RR }, { "cu24", 0xb1, INSTR_RRF_M0RR },
{ "cu41", 0xb2, INSTR_RRF_M0RR }, { "cu41", 0xb2, INSTR_RRF_M0RR },
{ "cu42", 0xb3, INSTR_RRF_M0RR }, { "cu42", 0xb3, INSTR_RRF_M0RR },
{ "crt", 0x72, INSTR_RRF_U0RR },
{ "cgrt", 0x60, INSTR_RRF_U0RR },
{ "clrt", 0x73, INSTR_RRF_U0RR },
{ "clgrt", 0x61, INSTR_RRF_U0RR },
{ "ptf", 0xa2, INSTR_RRE_R0 },
{ "pfmf", 0xaf, INSTR_RRE_RR },
{ "trte", 0xbf, INSTR_RRF_M0RR },
{ "trtre", 0xbd, INSTR_RRF_M0RR },
#endif #endif
{ "kmac", 0x1e, INSTR_RRE_RR }, { "kmac", 0x1e, INSTR_RRE_RR },
{ "lrvr", 0x1f, INSTR_RRE_RR }, { "lrvr", 0x1f, INSTR_RRE_RR },
@ -835,6 +936,43 @@ static struct insn opcode_c2[] = {
{ "cfi", 0x0d, INSTR_RIL_RI }, { "cfi", 0x0d, INSTR_RIL_RI },
{ "clgfi", 0x0e, INSTR_RIL_RU }, { "clgfi", 0x0e, INSTR_RIL_RU },
{ "clfi", 0x0f, INSTR_RIL_RU }, { "clfi", 0x0f, INSTR_RIL_RU },
{ "msfi", 0x01, INSTR_RIL_RI },
{ "msgfi", 0x00, INSTR_RIL_RI },
#endif
{ "", 0, INSTR_INVALID }
};
static struct insn opcode_c4[] = {
#ifdef CONFIG_64BIT
{ "lrl", 0x0d, INSTR_RIL_RP },
{ "lgrl", 0x08, INSTR_RIL_RP },
{ "lgfrl", 0x0c, INSTR_RIL_RP },
{ "lhrl", 0x05, INSTR_RIL_RP },
{ "lghrl", 0x04, INSTR_RIL_RP },
{ "llgfrl", 0x0e, INSTR_RIL_RP },
{ "llhrl", 0x02, INSTR_RIL_RP },
{ "llghrl", 0x06, INSTR_RIL_RP },
{ "strl", 0x0f, INSTR_RIL_RP },
{ "stgrl", 0x0b, INSTR_RIL_RP },
{ "sthrl", 0x07, INSTR_RIL_RP },
#endif
{ "", 0, INSTR_INVALID }
};
static struct insn opcode_c6[] = {
#ifdef CONFIG_64BIT
{ "crl", 0x0d, INSTR_RIL_RP },
{ "cgrl", 0x08, INSTR_RIL_RP },
{ "cgfrl", 0x0c, INSTR_RIL_RP },
{ "chrl", 0x05, INSTR_RIL_RP },
{ "cghrl", 0x04, INSTR_RIL_RP },
{ "clrl", 0x0f, INSTR_RIL_RP },
{ "clgrl", 0x0a, INSTR_RIL_RP },
{ "clgfrl", 0x0e, INSTR_RIL_RP },
{ "clhrl", 0x07, INSTR_RIL_RP },
{ "clghrl", 0x06, INSTR_RIL_RP },
{ "pfdrl", 0x02, INSTR_RIL_UP },
{ "exrl", 0x00, INSTR_RIL_RP },
#endif #endif
{ "", 0, INSTR_INVALID } { "", 0, INSTR_INVALID }
}; };
@ -842,6 +980,8 @@ static struct insn opcode_c2[] = {
static struct insn opcode_c8[] = { static struct insn opcode_c8[] = {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
{ "mvcos", 0x00, INSTR_SSF_RRDRD }, { "mvcos", 0x00, INSTR_SSF_RRDRD },
{ "ectg", 0x01, INSTR_SSF_RRDRD },
{ "csst", 0x02, INSTR_SSF_RRDRD },
#endif #endif
{ "", 0, INSTR_INVALID } { "", 0, INSTR_INVALID }
}; };
@ -917,6 +1057,12 @@ static struct insn opcode_e3[] = {
{ "llgh", 0x91, INSTR_RXY_RRRD }, { "llgh", 0x91, INSTR_RXY_RRRD },
{ "llc", 0x94, INSTR_RXY_RRRD }, { "llc", 0x94, INSTR_RXY_RRRD },
{ "llh", 0x95, INSTR_RXY_RRRD }, { "llh", 0x95, INSTR_RXY_RRRD },
{ "cgh", 0x34, INSTR_RXY_RRRD },
{ "laey", 0x75, INSTR_RXY_RRRD },
{ "ltgf", 0x32, INSTR_RXY_RRRD },
{ "mfy", 0x5c, INSTR_RXY_RRRD },
{ "mhy", 0x7c, INSTR_RXY_RRRD },
{ "pfd", 0x36, INSTR_RXY_URRD },
#endif #endif
{ "lrv", 0x1e, INSTR_RXY_RRRD }, { "lrv", 0x1e, INSTR_RXY_RRRD },
{ "lrvh", 0x1f, INSTR_RXY_RRRD }, { "lrvh", 0x1f, INSTR_RXY_RRRD },
@ -931,6 +1077,15 @@ static struct insn opcode_e3[] = {
static struct insn opcode_e5[] = { static struct insn opcode_e5[] = {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
{ "strag", 0x02, INSTR_SSE_RDRD }, { "strag", 0x02, INSTR_SSE_RDRD },
{ "chhsi", 0x54, INSTR_SIL_RDI },
{ "chsi", 0x5c, INSTR_SIL_RDI },
{ "cghsi", 0x58, INSTR_SIL_RDI },
{ "clhhsi", 0x55, INSTR_SIL_RDU },
{ "clfhsi", 0x5d, INSTR_SIL_RDU },
{ "clghsi", 0x59, INSTR_SIL_RDU },
{ "mvhhi", 0x44, INSTR_SIL_RDI },
{ "mvhi", 0x4c, INSTR_SIL_RDI },
{ "mvghi", 0x48, INSTR_SIL_RDI },
#endif #endif
{ "lasp", 0x00, INSTR_SSE_RDRD }, { "lasp", 0x00, INSTR_SSE_RDRD },
{ "tprot", 0x01, INSTR_SSE_RDRD }, { "tprot", 0x01, INSTR_SSE_RDRD },
@ -977,6 +1132,11 @@ static struct insn opcode_eb[] = {
{ "lmy", 0x98, INSTR_RSY_RRRD }, { "lmy", 0x98, INSTR_RSY_RRRD },
{ "lamy", 0x9a, INSTR_RSY_AARD }, { "lamy", 0x9a, INSTR_RSY_AARD },
{ "stamy", 0x9b, INSTR_RSY_AARD }, { "stamy", 0x9b, INSTR_RSY_AARD },
{ "asi", 0x6a, INSTR_SIY_IRD },
{ "agsi", 0x7a, INSTR_SIY_IRD },
{ "alsi", 0x6e, INSTR_SIY_IRD },
{ "algsi", 0x7e, INSTR_SIY_IRD },
{ "ecag", 0x4c, INSTR_RSY_RRRD },
#endif #endif
{ "rll", 0x1d, INSTR_RSY_RRRD }, { "rll", 0x1d, INSTR_RSY_RRRD },
{ "mvclu", 0x8e, INSTR_RSY_RRRD }, { "mvclu", 0x8e, INSTR_RSY_RRRD },
@ -988,6 +1148,30 @@ static struct insn opcode_ec[] = {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
{ "brxhg", 0x44, INSTR_RIE_RRP }, { "brxhg", 0x44, INSTR_RIE_RRP },
{ "brxlg", 0x45, INSTR_RIE_RRP }, { "brxlg", 0x45, INSTR_RIE_RRP },
{ "crb", 0xf6, INSTR_RRS_RRRDU },
{ "cgrb", 0xe4, INSTR_RRS_RRRDU },
{ "crj", 0x76, INSTR_RIE_RRPU },
{ "cgrj", 0x64, INSTR_RIE_RRPU },
{ "cib", 0xfe, INSTR_RIS_RURDI },
{ "cgib", 0xfc, INSTR_RIS_RURDI },
{ "cij", 0x7e, INSTR_RIE_RUPI },
{ "cgij", 0x7c, INSTR_RIE_RUPI },
{ "cit", 0x72, INSTR_RIE_R0IU },
{ "cgit", 0x70, INSTR_RIE_R0IU },
{ "clrb", 0xf7, INSTR_RRS_RRRDU },
{ "clgrb", 0xe5, INSTR_RRS_RRRDU },
{ "clrj", 0x77, INSTR_RIE_RRPU },
{ "clgrj", 0x65, INSTR_RIE_RRPU },
{ "clib", 0xff, INSTR_RIS_RURDU },
{ "clgib", 0xfd, INSTR_RIS_RURDU },
{ "clij", 0x7f, INSTR_RIE_RUPU },
{ "clgij", 0x7d, INSTR_RIE_RUPU },
{ "clfit", 0x73, INSTR_RIE_R0UU },
{ "clgit", 0x71, INSTR_RIE_R0UU },
{ "rnsbg", 0x54, INSTR_RIE_RRUUU },
{ "rxsbg", 0x57, INSTR_RIE_RRUUU },
{ "rosbg", 0x56, INSTR_RIE_RRUUU },
{ "risbg", 0x55, INSTR_RIE_RRUUU },
#endif #endif
{ "", 0, INSTR_INVALID } { "", 0, INSTR_INVALID }
}; };
@ -1004,6 +1188,16 @@ static struct insn opcode_ed[] = {
{ "ldy", 0x65, INSTR_RXY_FRRD }, { "ldy", 0x65, INSTR_RXY_FRRD },
{ "stey", 0x66, INSTR_RXY_FRRD }, { "stey", 0x66, INSTR_RXY_FRRD },
{ "stdy", 0x67, INSTR_RXY_FRRD }, { "stdy", 0x67, INSTR_RXY_FRRD },
{ "sldt", 0x40, INSTR_RXF_FRRDF },
{ "slxt", 0x48, INSTR_RXF_FRRDF },
{ "srdt", 0x41, INSTR_RXF_FRRDF },
{ "srxt", 0x49, INSTR_RXF_FRRDF },
{ "tdcet", 0x50, INSTR_RXE_FRRD },
{ "tdcdt", 0x54, INSTR_RXE_FRRD },
{ "tdcxt", 0x58, INSTR_RXE_FRRD },
{ "tdget", 0x51, INSTR_RXE_FRRD },
{ "tdgdt", 0x55, INSTR_RXE_FRRD },
{ "tdgxt", 0x59, INSTR_RXE_FRRD },
#endif #endif
{ "ldeb", 0x04, INSTR_RXE_FRRD }, { "ldeb", 0x04, INSTR_RXE_FRRD },
{ "lxdb", 0x05, INSTR_RXE_FRRD }, { "lxdb", 0x05, INSTR_RXE_FRRD },
@ -1037,6 +1231,7 @@ static struct insn opcode_ed[] = {
{ "mae", 0x2e, INSTR_RXF_FRRDF }, { "mae", 0x2e, INSTR_RXF_FRRDF },
{ "mse", 0x2f, INSTR_RXF_FRRDF }, { "mse", 0x2f, INSTR_RXF_FRRDF },
{ "sqe", 0x34, INSTR_RXE_FRRD }, { "sqe", 0x34, INSTR_RXE_FRRD },
{ "sqd", 0x35, INSTR_RXE_FRRD },
{ "mee", 0x37, INSTR_RXE_FRRD }, { "mee", 0x37, INSTR_RXE_FRRD },
{ "mad", 0x3e, INSTR_RXF_FRRDF }, { "mad", 0x3e, INSTR_RXF_FRRDF },
{ "msd", 0x3f, INSTR_RXF_FRRDF }, { "msd", 0x3f, INSTR_RXF_FRRDF },
@ -1117,6 +1312,12 @@ static struct insn *find_insn(unsigned char *code)
case 0xc2: case 0xc2:
table = opcode_c2; table = opcode_c2;
break; break;
case 0xc4:
table = opcode_c4;
break;
case 0xc6:
table = opcode_c6;
break;
case 0xc8: case 0xc8:
table = opcode_c8; table = opcode_c8;
break; break;

View File

@ -214,10 +214,13 @@ static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE);
static noinline __init void detect_machine_type(void) static noinline __init void detect_machine_type(void)
{ {
/* No VM information? Looks like LPAR */ /* Check current-configuration-level */
if (stsi(&vmms, 3, 2, 2) == -ENOSYS) if ((stsi(NULL, 0, 0, 0) >> 28) <= 2) {
S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
return; return;
if (!vmms.count) }
/* Get virtual-machine cpu information. */
if (stsi(&vmms, 3, 2, 2) == -ENOSYS || !vmms.count)
return; return;
/* Running under KVM? If not we assume z/VM */ /* Running under KVM? If not we assume z/VM */
@ -402,8 +405,19 @@ static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
static void __init setup_boot_command_line(void) static void __init setup_boot_command_line(void)
{ {
int i;
/* convert arch command line to ascii */
for (i = 0; i < ARCH_COMMAND_LINE_SIZE; i++)
if (COMMAND_LINE[i] & 0x80)
break;
if (i < ARCH_COMMAND_LINE_SIZE)
EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
COMMAND_LINE[ARCH_COMMAND_LINE_SIZE-1] = 0;
/* copy arch command line */ /* copy arch command line */
strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); strlcpy(boot_command_line, strstrip(COMMAND_LINE),
ARCH_COMMAND_LINE_SIZE);
/* append IPL PARM data to the boot command line */ /* append IPL PARM data to the boot command line */
if (MACHINE_IS_VM) if (MACHINE_IS_VM)

View File

@ -13,7 +13,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/lowcore.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>

View File

@ -9,11 +9,9 @@
* Heiko Carstens <heiko.carstens@de.ibm.com> * Heiko Carstens <heiko.carstens@de.ibm.com>
*/ */
#include <linux/sys.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/lowcore.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>

View File

@ -13,7 +13,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#include <trace/syscall.h> #include <trace/syscall.h>
#include <asm/lowcore.h> #include <asm/asm-offsets.h>
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE

View File

@ -1,5 +1,5 @@
/* /*
* Copyright IBM Corp. 1999,2009 * Copyright IBM Corp. 1999,2010
* *
* Author(s): Hartmut Penner <hp@de.ibm.com> * Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com>
@ -22,12 +22,9 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <asm/setup.h>
#include <asm/lowcore.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cpu.h>
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define ARCH_OFFSET 4 #define ARCH_OFFSET 4
@ -288,19 +285,7 @@ iplstart:
bz .Lagain1 # skip dateset trailer bz .Lagain1 # skip dateset trailer
la %r5,0(%r4,%r2) la %r5,0(%r4,%r2)
lr %r3,%r2 lr %r3,%r2
.Lidebc: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
tm 0(%r5),0x80 # high order bit set ?
bo .Ldocv # yes -> convert from EBCDIC
ahi %r5,-1
bct %r3,.Lidebc
b .Lnocv
.Ldocv:
l %r3,.Lcvtab
tr 0(256,%r4),0(%r3) # convert parameters to ascii
tr 256(256,%r4),0(%r3)
tr 512(256,%r4),0(%r3)
tr 768(122,%r4),0(%r3)
.Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
mvc 0(256,%r3),0(%r4) mvc 0(256,%r3),0(%r4)
mvc 256(256,%r3),256(%r4) mvc 256(256,%r3),256(%r4)
mvc 512(256,%r3),512(%r4) mvc 512(256,%r3),512(%r4)
@ -384,7 +369,6 @@ iplstart:
.Linitrd:.long _end + 0x400000 # default address of initrd .Linitrd:.long _end + 0x400000 # default address of initrd
.Lparm: .long PARMAREA .Lparm: .long PARMAREA
.Lstartup: .long startup .Lstartup: .long startup
.Lcvtab:.long _ebcasc # ebcdic to ascii table
.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 .Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
.byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
.byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
@ -417,13 +401,10 @@ start:
.sk8x8: .sk8x8:
mvc 0(240,%r8),0(%r9) # copy iplparms into buffer mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
.gotr: .gotr:
l %r10,.tbl # EBCDIC to ASCII table
tr 0(240,%r8),0(%r10)
slr %r0,%r0 slr %r0,%r0
st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11) st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11) st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
j startup # continue with startup j startup # continue with startup
.tbl: .long _ebcasc # translate table
.cmd: .long COMMAND_LINE # address of command line buffer .cmd: .long COMMAND_LINE # address of command line buffer
.parm: .long PARMAREA .parm: .long PARMAREA
.lowcase: .lowcase:
@ -467,16 +448,15 @@ start:
# or linload or SALIPL # or linload or SALIPL
# #
.org 0x10000 .org 0x10000
startup:basr %r13,0 # get base .globl startup
startup:
basr %r13,0 # get base
.LPG0: .LPG0:
xc 0x200(256),0x200 # partially clear lowcore xc 0x200(256),0x200 # partially clear lowcore
xc 0x300(256),0x300 xc 0x300(256),0x300
l %r1,5f-.LPG0(%r13) stck __LC_LAST_UPDATE_CLOCK
stck 0(%r1) spt 5f-.LPG0(%r13)
spt 6f-.LPG0(%r13) mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13)
mvc __LC_LAST_UPDATE_CLOCK(8),0(%r1)
mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13)
#ifndef CONFIG_MARCH_G5 #ifndef CONFIG_MARCH_G5
# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
@ -494,7 +474,6 @@ startup:basr %r13,0 # get base
cl %r0,2f+12-.LPG0(%r13) cl %r0,2f+12-.LPG0(%r13)
je 3f je 3f
1: l %r15,.Lstack-.LPG0(%r13) 1: l %r15,.Lstack-.LPG0(%r13)
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
ahi %r15,-96 ahi %r15,-96
la %r2,.Lals_string-.LPG0(%r13) la %r2,.Lals_string-.LPG0(%r13)
l %r3,.Lsclp_print-.LPG0(%r13) l %r3,.Lsclp_print-.LPG0(%r13)
@ -505,7 +484,7 @@ startup:basr %r13,0 # get base
.Lsclp_print: .Lsclp_print:
.long _sclp_print_early .long _sclp_print_early
.Lstack: .Lstack:
.long init_thread_union .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
.align 16 .align 16
2: .long 0x000a0000,0x8badcccc 2: .long 0x000a0000,0x8badcccc
#if defined(CONFIG_64BIT) #if defined(CONFIG_64BIT)
@ -532,13 +511,22 @@ startup:basr %r13,0 # get base
3: 3:
#endif #endif
#ifdef CONFIG_64BIT
mvi __LC_AR_MODE_ID,1 # set esame flag
slr %r0,%r0 # set cpuid to zero
lhi %r1,2 # mode 2 = esame (dump)
sigp %r1,%r0,0x12 # switch to esame mode
sam64 # switch to 64 bit mode
jg startup_continue
#else
mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
l %r13,4f-.LPG0(%r13) l %r13,4f-.LPG0(%r13)
b 0(%r13) b 0(%r13)
.align 4
4: .long startup_continue
5: .long sched_clock_base_cc
.align 8 .align 8
6: .long 0x7fffffff,0xffffffff 4: .long startup_continue
#endif
.align 8
5: .long 0x7fffffff,0xffffffff
# #
# params at 10400 (setup.h) # params at 10400 (setup.h)
@ -552,8 +540,4 @@ startup:basr %r13,0 # get base
.byte "root=/dev/ram0 ro" .byte "root=/dev/ram0 ro"
.byte 0 .byte 0
#ifdef CONFIG_64BIT .org 0x11000
#include "head64.S"
#else
#include "head31.S"
#endif

View File

@ -1,7 +1,7 @@
/* /*
* arch/s390/kernel/head31.S * arch/s390/kernel/head31.S
* *
* Copyright (C) IBM Corp. 2005,2006 * Copyright (C) IBM Corp. 2005,2010
* *
* Author(s): Hartmut Penner <hp@de.ibm.com> * Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com>
@ -10,13 +10,19 @@
* *
*/ */
.org 0x11000 #include <linux/init.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
__HEAD
.globl startup_continue
startup_continue: startup_continue:
basr %r13,0 # get base basr %r13,0 # get base
.LPG1: .LPG1:
mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) l %r1,.Lbase_cc-.LPG1(%r13)
mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
# move IPL device to lowcore # move IPL device to lowcore
@ -69,10 +75,12 @@ startup_continue:
.Lduald:.rept 8 .Lduald:.rept 8
.long 0x80000000,0,0,0 # invalid access-list entries .long 0x80000000,0,0,0 # invalid access-list entries
.endr .endr
.Lbase_cc:
.long sched_clock_base_cc
.org 0x12000
.globl _ehead .globl _ehead
_ehead: _ehead:
#ifdef CONFIG_SHARED_KERNEL #ifdef CONFIG_SHARED_KERNEL
.org 0x100000 .org 0x100000
#endif #endif

View File

@ -1,7 +1,7 @@
/* /*
* arch/s390/kernel/head64.S * arch/s390/kernel/head64.S
* *
* Copyright (C) IBM Corp. 1999,2006 * Copyright (C) IBM Corp. 1999,2010
* *
* Author(s): Hartmut Penner <hp@de.ibm.com> * Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com>
@ -10,80 +10,17 @@
* *
*/ */
.org 0x11000 #include <linux/init.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
__HEAD
.globl startup_continue
startup_continue: startup_continue:
basr %r13,0 # get base larl %r1,sched_clock_base_cc
.LPG1: sll %r13,1 # remove high order bit mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
srl %r13,1 larl %r13,.LPG1 # get base
#ifdef CONFIG_ZFCPDUMP
# check if we have been ipled using zfcp dump:
tm 0xb9,0x01 # test if subchannel is enabled
jno .nodump # subchannel disabled
l %r1,0xb8
la %r5,.Lipl_schib-.LPG1(%r13)
stsch 0(%r5) # get schib of subchannel
jne .nodump # schib not available
tm 5(%r5),0x01 # devno valid?
jno .nodump
tm 4(%r5),0x80 # qdio capable device?
jno .nodump
l %r2,20(%r0) # address of ipl parameter block
lhi %r3,0
ic %r3,0x148(%r2) # get opt field
chi %r3,0x20 # load with dump?
jne .nodump
# store all prefix registers in case of load with dump:
la %r7,0 # base register for 0 page
la %r8,0 # first cpu
l %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array
ahi %r11,4 # skip boot cpu
lr %r12,%r11
ahi %r12,(CONFIG_NR_CPUS*4) # end of prefix array
stap .Lcurrent_cpu+2-.LPG1(%r13) # store current cpu addr
1:
cl %r8,.Lcurrent_cpu-.LPG1(%r13) # is ipl cpu ?
je 4f # if yes get next cpu
2:
lr %r9,%r7
sigp %r9,%r8,0x9 # stop & store status of cpu
brc 8,3f # accepted
brc 4,4f # status stored: next cpu
brc 2,2b # busy: try again
brc 1,4f # not op: next cpu
3:
mvc 0(4,%r11),264(%r7) # copy prefix register to prefix array
ahi %r11,4 # next element in prefix array
clr %r11,%r12
je 5f # no more space in prefix array
4:
ahi %r8,1 # next cpu (r8 += 1)
chi %r8,MAX_CPU_ADDRESS # is last possible cpu ?
jle 1b # jump if not last cpu
5:
lhi %r1,2 # mode 2 = esame (dump)
j 6f
.align 4
.Lipl_schib:
.rept 13
.long 0
.endr
.nodump:
lhi %r1,1 # mode 1 = esame (normal ipl)
6:
#else
lhi %r1,1 # mode 1 = esame (normal ipl)
#endif /* CONFIG_ZFCPDUMP */
mvi __LC_AR_MODE_ID,1 # set esame flag
slr %r0,%r0 # set cpuid to zero
sigp %r1,%r0,0x12 # switch to esame mode
sam64 # switch to 64 bit mode
llgfr %r13,%r13 # clear high-order half of base reg
lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
@ -108,6 +45,7 @@ startup_continue:
lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
# virtual and never return ... # virtual and never return ...
.align 16 .align 16
.LPG1:
.Lentry:.quad 0x0000000180000000,_stext .Lentry:.quad 0x0000000180000000,_stext
.Lctl: .quad 0x04350002 # cr0: various things .Lctl: .quad 0x04350002 # cr0: various things
.quad 0 # cr1: primary space segment table .quad 0 # cr1: primary space segment table
@ -130,12 +68,6 @@ startup_continue:
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
.Lnop: .long 0x07000700 .Lnop: .long 0x07000700
.Lzero64:.fill 16,4,0x0 .Lzero64:.fill 16,4,0x0
#ifdef CONFIG_ZFCPDUMP
.Lcurrent_cpu:
.long 0x0
.Lpref_arr_ptr:
.long zfcpdump_prefix_array
#endif /* CONFIG_ZFCPDUMP */
.Lparmaddr: .Lparmaddr:
.quad PARMAREA .quad PARMAREA
.align 64 .align 64
@ -146,9 +78,9 @@ startup_continue:
.long 0x80000000,0,0,0 # invalid access-list entries .long 0x80000000,0,0,0 # invalid access-list entries
.endr .endr
.org 0x12000
.globl _ehead .globl _ehead
_ehead: _ehead:
#ifdef CONFIG_SHARED_KERNEL #ifdef CONFIG_SHARED_KERNEL
.org 0x100000 .org 0x100000
#endif #endif

View File

@ -553,7 +553,7 @@ out:
return rc; return rc;
} }
static void ipl_run(struct shutdown_trigger *trigger) static void __ipl_run(void *unused)
{ {
diag308(DIAG308_IPL, NULL); diag308(DIAG308_IPL, NULL);
if (MACHINE_IS_VM) if (MACHINE_IS_VM)
@ -562,6 +562,11 @@ static void ipl_run(struct shutdown_trigger *trigger)
reipl_ccw_dev(&ipl_info.data.ccw.dev_id); reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
} }
static void ipl_run(struct shutdown_trigger *trigger)
{
smp_switch_to_ipl_cpu(__ipl_run, NULL);
}
static int __init ipl_init(void) static int __init ipl_init(void)
{ {
int rc; int rc;
@ -1039,7 +1044,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
sprintf(dst + pos, " PARM %s", vmparm); sprintf(dst + pos, " PARM %s", vmparm);
} }
static void reipl_run(struct shutdown_trigger *trigger) static void __reipl_run(void *unused)
{ {
struct ccw_dev_id devid; struct ccw_dev_id devid;
static char buf[128]; static char buf[128];
@ -1087,6 +1092,11 @@ static void reipl_run(struct shutdown_trigger *trigger)
disabled_wait((unsigned long) __builtin_return_address(0)); disabled_wait((unsigned long) __builtin_return_address(0));
} }
static void reipl_run(struct shutdown_trigger *trigger)
{
smp_switch_to_ipl_cpu(__reipl_run, NULL);
}
static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
{ {
ipb->hdr.len = IPL_PARM_BLK_CCW_LEN; ipb->hdr.len = IPL_PARM_BLK_CCW_LEN;
@ -1369,20 +1379,18 @@ static struct kobj_attribute dump_type_attr =
static struct kset *dump_kset; static struct kset *dump_kset;
static void dump_run(struct shutdown_trigger *trigger) static void __dump_run(void *unused)
{ {
struct ccw_dev_id devid; struct ccw_dev_id devid;
static char buf[100]; static char buf[100];
switch (dump_method) { switch (dump_method) {
case DUMP_METHOD_CCW_CIO: case DUMP_METHOD_CCW_CIO:
smp_send_stop();
devid.devno = dump_block_ccw->ipl_info.ccw.devno; devid.devno = dump_block_ccw->ipl_info.ccw.devno;
devid.ssid = 0; devid.ssid = 0;
reipl_ccw_dev(&devid); reipl_ccw_dev(&devid);
break; break;
case DUMP_METHOD_CCW_VM: case DUMP_METHOD_CCW_VM:
smp_send_stop();
sprintf(buf, "STORE STATUS"); sprintf(buf, "STORE STATUS");
__cpcmd(buf, NULL, 0, NULL); __cpcmd(buf, NULL, 0, NULL);
sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
@ -1396,10 +1404,17 @@ static void dump_run(struct shutdown_trigger *trigger)
diag308(DIAG308_SET, dump_block_fcp); diag308(DIAG308_SET, dump_block_fcp);
diag308(DIAG308_DUMP, NULL); diag308(DIAG308_DUMP, NULL);
break; break;
case DUMP_METHOD_NONE: default:
return; break;
} }
printk(KERN_EMERG "Dump failed!\n"); }
static void dump_run(struct shutdown_trigger *trigger)
{
if (dump_method == DUMP_METHOD_NONE)
return;
smp_send_stop();
smp_switch_to_ipl_cpu(__dump_run, NULL);
} }
static int __init dump_ccw_init(void) static int __init dump_ccw_init(void)
@ -1577,7 +1592,7 @@ static void vmcmd_run(struct shutdown_trigger *trigger)
static int vmcmd_init(void) static int vmcmd_init(void)
{ {
if (!MACHINE_IS_VM) if (!MACHINE_IS_VM)
return -ENOTSUPP; return -EOPNOTSUPP;
vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj); vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj);
if (!vmcmd_kset) if (!vmcmd_kset)
return -ENOMEM; return -ENOMEM;
@ -1595,7 +1610,7 @@ static void stop_run(struct shutdown_trigger *trigger)
{ {
if (strcmp(trigger->name, ON_PANIC_STR) == 0) if (strcmp(trigger->name, ON_PANIC_STR) == 0)
disabled_wait((unsigned long) __builtin_return_address(0)); disabled_wait((unsigned long) __builtin_return_address(0));
while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
cpu_relax(); cpu_relax();
for (;;); for (;;);
} }
@ -1902,7 +1917,6 @@ void __init ipl_update_parameters(void)
void __init ipl_save_parameters(void) void __init ipl_save_parameters(void)
{ {
struct cio_iplinfo iplinfo; struct cio_iplinfo iplinfo;
unsigned int *ipl_ptr;
void *src, *dst; void *src, *dst;
if (cio_get_iplinfo(&iplinfo)) if (cio_get_iplinfo(&iplinfo))
@ -1913,11 +1927,10 @@ void __init ipl_save_parameters(void)
if (!iplinfo.is_qdio) if (!iplinfo.is_qdio)
return; return;
ipl_flags |= IPL_PARMBLOCK_VALID; ipl_flags |= IPL_PARMBLOCK_VALID;
ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr;
src = (void *)(unsigned long)*ipl_ptr;
dst = (void *)IPL_PARMBLOCK_ORIGIN; dst = (void *)IPL_PARMBLOCK_ORIGIN;
memmove(dst, src, PAGE_SIZE); memmove(dst, src, PAGE_SIZE);
*ipl_ptr = IPL_PARMBLOCK_ORIGIN; S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
} }
static LIST_HEAD(rcall); static LIST_HEAD(rcall);

View File

@ -54,11 +54,11 @@ void machine_shutdown(void)
{ {
} }
void machine_kexec(struct kimage *image) static void __machine_kexec(void *data)
{ {
relocate_kernel_t data_mover; relocate_kernel_t data_mover;
struct kimage *image = data;
smp_send_stop();
pfault_fini(); pfault_fini();
s390_reset_system(); s390_reset_system();
@ -68,3 +68,9 @@ void machine_kexec(struct kimage *image)
(*data_mover)(&image->head, image->start); (*data_mover)(&image->head, image->start);
for (;;); for (;;);
} }
void machine_kexec(struct kimage *image)
{
smp_send_stop();
smp_switch_to_ipl_cpu(__machine_kexec, image);
}

View File

@ -6,7 +6,7 @@
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
*/ */
#include <asm/lowcore.h> #include <asm/asm-offsets.h>
# #
# do_reipl_asm # do_reipl_asm

View File

@ -4,7 +4,7 @@
* Denis Joseph Barrow, * Denis Joseph Barrow,
*/ */
#include <asm/lowcore.h> #include <asm/asm-offsets.h>
# #
# do_reipl_asm # do_reipl_asm

View File

@ -9,8 +9,10 @@
*/ */
LC_EXT_NEW_PSW = 0x58 # addr of ext int handler LC_EXT_NEW_PSW = 0x58 # addr of ext int handler
LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit
LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter
LC_EXT_INT_CODE = 0x86 # addr of ext int code LC_EXT_INT_CODE = 0x86 # addr of ext int code
LC_AR_MODE_ID = 0xa3
# #
# Subroutine which waits synchronously until either an external interruption # Subroutine which waits synchronously until either an external interruption
@ -30,8 +32,16 @@ _sclp_wait_int:
.LbaseS1: .LbaseS1:
ahi %r15,-96 # create stack frame ahi %r15,-96 # create stack frame
la %r8,LC_EXT_NEW_PSW # register int handler la %r8,LC_EXT_NEW_PSW # register int handler
mvc .LoldpswS1-.LbaseS1(8,%r13),0(%r8) la %r9,.LextpswS1-.LbaseS1(%r13)
mvc 0(8,%r8),.LextpswS1-.LbaseS1(%r13) #ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa1
la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit
la %r9,.LextpswS1_64-.LbaseS1(%r13)
.Lesa1:
#endif
mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
mvc 0(16,%r8),0(%r9)
lhi %r6,0x0200 # cr mask for ext int (cr0.54) lhi %r6,0x0200 # cr mask for ext int (cr0.54)
ltr %r2,%r2 ltr %r2,%r2
jz .LsetctS1 jz .LsetctS1
@ -64,15 +74,19 @@ _sclp_wait_int:
.LtimeoutS1: .LtimeoutS1:
lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting
# restore old handler # restore old handler
mvc 0(8,%r8),.LoldpswS1-.LbaseS1(%r13) mvc 0(16,%r8),.LoldpswS1-.LbaseS1(%r13)
lm %r6,%r15,120(%r15) # restore registers lm %r6,%r15,120(%r15) # restore registers
br %r14 # return to caller br %r14 # return to caller
.align 8 .align 8
.LoldpswS1: .LoldpswS1:
.long 0, 0 # old ext int PSW .long 0, 0, 0, 0 # old ext int PSW
.LextpswS1: .LextpswS1:
.long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
#ifdef CONFIG_64BIT
.LextpswS1_64:
.quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit
#endif
.LwaitpswS1: .LwaitpswS1:
.long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
.LtimeS1: .LtimeS1:
@ -250,6 +264,13 @@ _sclp_print:
_sclp_print_early: _sclp_print_early:
stm %r6,%r15,24(%r15) # save registers stm %r6,%r15,24(%r15) # save registers
ahi %r15,-96 # create stack frame ahi %r15,-96 # create stack frame
#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa2
ahi %r15,-80
stmh %r6,%r15,96(%r15) # store upper register halves
.Lesa2:
#endif
lr %r10,%r2 # save string pointer lr %r10,%r2 # save string pointer
lhi %r2,0 lhi %r2,0
bras %r14,_sclp_setup # enable console bras %r14,_sclp_setup # enable console
@ -262,6 +283,13 @@ _sclp_print_early:
lhi %r2,1 lhi %r2,1
bras %r14,_sclp_setup # disable console bras %r14,_sclp_setup # disable console
.LendS5: .LendS5:
#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa3
lmh %r6,%r15,96(%r15) # store upper register halves
ahi %r15,80
.Lesa3:
#endif
lm %r6,%r15,120(%r15) # restore registers lm %r6,%r15,120(%r15) # restore registers
br %r14 br %r14

View File

@ -396,15 +396,12 @@ static void __init
setup_lowcore(void) setup_lowcore(void)
{ {
struct _lowcore *lc; struct _lowcore *lc;
int lc_pages;
/* /*
* Setup lowcore for boot cpu * Setup lowcore for boot cpu
*/ */
lc_pages = sizeof(void *) == 8 ? 2 : 1; BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
lc = (struct _lowcore *) lc = __alloc_bootmem(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
__alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
memset(lc, 0, lc_pages * PAGE_SIZE);
lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
lc->restart_psw.addr = lc->restart_psw.addr =
PSW_ADDR_AMODE | (unsigned long) restart_int_handler; PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
@ -804,7 +801,7 @@ setup_arch(char **cmdline_p)
if (MACHINE_IS_VM) if (MACHINE_IS_VM)
pr_info("Linux is running as a z/VM " pr_info("Linux is running as a z/VM "
"guest operating system in 31-bit mode\n"); "guest operating system in 31-bit mode\n");
else else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 31-bit mode\n"); pr_info("Linux is running natively in 31-bit mode\n");
if (MACHINE_HAS_IEEE) if (MACHINE_HAS_IEEE)
pr_info("The hardware system has IEEE compatible " pr_info("The hardware system has IEEE compatible "
@ -818,7 +815,7 @@ setup_arch(char **cmdline_p)
"guest operating system in 64-bit mode\n"); "guest operating system in 64-bit mode\n");
else if (MACHINE_IS_KVM) else if (MACHINE_IS_KVM)
pr_info("Linux is running under KVM in 64-bit mode\n"); pr_info("Linux is running under KVM in 64-bit mode\n");
else else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n"); pr_info("Linux is running natively in 64-bit mode\n");
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */

View File

@ -36,6 +36,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <asm/asm-offsets.h>
#include <asm/ipl.h> #include <asm/ipl.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/sigp.h> #include <asm/sigp.h>
@ -53,7 +54,7 @@
#include "entry.h" #include "entry.h"
/* logical cpu to cpu address */ /* logical cpu to cpu address */
int __cpu_logical_map[NR_CPUS]; unsigned short __cpu_logical_map[NR_CPUS];
static struct task_struct *current_set[NR_CPUS]; static struct task_struct *current_set[NR_CPUS];
@ -72,13 +73,13 @@ static int cpu_management;
static DEFINE_PER_CPU(struct cpu, cpu_devices); static DEFINE_PER_CPU(struct cpu, cpu_devices);
static void smp_ext_bitcall(int, ec_bit_sig); static void smp_ext_bitcall(int, int);
static int cpu_stopped(int cpu) static int raw_cpu_stopped(int cpu)
{ {
__u32 status; u32 status;
switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) {
case sigp_status_stored: case sigp_status_stored:
/* Check for stopped and check stop state */ /* Check for stopped and check stop state */
if (status & 0x50) if (status & 0x50)
@ -90,6 +91,44 @@ static int cpu_stopped(int cpu)
return 0; return 0;
} }
static inline int cpu_stopped(int cpu)
{
return raw_cpu_stopped(cpu_logical_map(cpu));
}
void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
{
struct _lowcore *lc, *current_lc;
struct stack_frame *sf;
struct pt_regs *regs;
unsigned long sp;
if (smp_processor_id() == 0)
func(data);
__load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY);
/* Disable lowcore protection */
__ctl_clear_bit(0, 28);
current_lc = lowcore_ptr[smp_processor_id()];
lc = lowcore_ptr[0];
if (!lc)
lc = current_lc;
lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
if (!cpu_online(0))
smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
cpu_relax();
sp = lc->panic_stack;
sp -= sizeof(struct pt_regs);
regs = (struct pt_regs *) sp;
memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs));
regs->psw = lc->psw_save_area;
sp -= STACK_FRAME_OVERHEAD;
sf = (struct stack_frame *) sp;
sf->back_chain = regs->gprs[15];
smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
}
void smp_send_stop(void) void smp_send_stop(void)
{ {
int cpu, rc; int cpu, rc;
@ -103,7 +142,7 @@ void smp_send_stop(void)
if (cpu == smp_processor_id()) if (cpu == smp_processor_id())
continue; continue;
do { do {
rc = signal_processor(cpu, sigp_stop); rc = sigp(cpu, sigp_stop);
} while (rc == sigp_busy); } while (rc == sigp_busy);
while (!cpu_stopped(cpu)) while (!cpu_stopped(cpu))
@ -139,13 +178,13 @@ static void do_ext_call_interrupt(__u16 code)
* Send an external call sigp to another cpu and return without waiting * Send an external call sigp to another cpu and return without waiting
* for its completion. * for its completion.
*/ */
static void smp_ext_bitcall(int cpu, ec_bit_sig sig) static void smp_ext_bitcall(int cpu, int sig)
{ {
/* /*
* Set signaling bit in lowcore of target cpu and kick it * Set signaling bit in lowcore of target cpu and kick it
*/ */
set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) while (sigp(cpu, sigp_emergency_signal) == sigp_busy)
udelay(10); udelay(10);
} }
@ -239,24 +278,8 @@ void smp_ctl_clear_bit(int cr, int bit)
} }
EXPORT_SYMBOL(smp_ctl_clear_bit); EXPORT_SYMBOL(smp_ctl_clear_bit);
/*
* In early ipl state a temp. logically cpu number is needed, so the sigp
* functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
* CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
*/
#define CPU_INIT_NO 1
#ifdef CONFIG_ZFCPDUMP #ifdef CONFIG_ZFCPDUMP
/*
* zfcpdump_prefix_array holds prefix registers for the following scenario:
* 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
* save its prefix registers, since they get lost, when switching from 31 bit
* to 64 bit.
*/
unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
__attribute__((__section__(".data")));
static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
{ {
if (ipl_info.type != IPL_TYPE_FCP_DUMP) if (ipl_info.type != IPL_TYPE_FCP_DUMP)
@ -266,21 +289,15 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
"the dump\n", cpu, NR_CPUS - 1); "the dump\n", cpu, NR_CPUS - 1);
return; return;
} }
zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
__cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
sigp_busy)
cpu_relax(); cpu_relax();
memcpy(zfcpdump_save_areas[cpu], memcpy(zfcpdump_save_areas[cpu],
(void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
SAVE_AREA_SIZE); sizeof(struct save_area));
#ifdef CONFIG_64BIT
/* copy original prefix register */
zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
#endif
} }
union save_area *zfcpdump_save_areas[NR_CPUS + 1]; struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
EXPORT_SYMBOL_GPL(zfcpdump_save_areas); EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
#else #else
@ -389,8 +406,7 @@ static void __init smp_detect_cpus(void)
for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
if (cpu == boot_cpu_addr) if (cpu == boot_cpu_addr)
continue; continue;
__cpu_logical_map[CPU_INIT_NO] = cpu; if (!raw_cpu_stopped(cpu))
if (!cpu_stopped(CPU_INIT_NO))
continue; continue;
smp_get_save_area(c_cpus, cpu); smp_get_save_area(c_cpus, cpu);
c_cpus++; c_cpus++;
@ -413,8 +429,7 @@ static void __init smp_detect_cpus(void)
cpu_addr = info->cpu[cpu].address; cpu_addr = info->cpu[cpu].address;
if (cpu_addr == boot_cpu_addr) if (cpu_addr == boot_cpu_addr)
continue; continue;
__cpu_logical_map[CPU_INIT_NO] = cpu_addr; if (!raw_cpu_stopped(cpu_addr)) {
if (!cpu_stopped(CPU_INIT_NO)) {
s_cpus++; s_cpus++;
continue; continue;
} }
@ -533,18 +548,18 @@ static void smp_free_lowcore(int cpu)
/* Upping and downing of CPUs */ /* Upping and downing of CPUs */
int __cpuinit __cpu_up(unsigned int cpu) int __cpuinit __cpu_up(unsigned int cpu)
{ {
struct task_struct *idle;
struct _lowcore *cpu_lowcore; struct _lowcore *cpu_lowcore;
struct task_struct *idle;
struct stack_frame *sf; struct stack_frame *sf;
sigp_ccode ccode;
u32 lowcore; u32 lowcore;
int ccode;
if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
return -EIO; return -EIO;
if (smp_alloc_lowcore(cpu)) if (smp_alloc_lowcore(cpu))
return -ENOMEM; return -ENOMEM;
do { do {
ccode = signal_processor(cpu, sigp_initial_cpu_reset); ccode = sigp(cpu, sigp_initial_cpu_reset);
if (ccode == sigp_busy) if (ccode == sigp_busy)
udelay(10); udelay(10);
if (ccode == sigp_not_operational) if (ccode == sigp_not_operational)
@ -552,7 +567,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
} while (ccode == sigp_busy); } while (ccode == sigp_busy);
lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
udelay(10); udelay(10);
idle = current_set[cpu]; idle = current_set[cpu];
@ -578,7 +593,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
eieio(); eieio();
while (signal_processor(cpu, sigp_restart) == sigp_busy) while (sigp(cpu, sigp_restart) == sigp_busy)
udelay(10); udelay(10);
while (!cpu_online(cpu)) while (!cpu_online(cpu))
@ -640,7 +655,7 @@ void __cpu_die(unsigned int cpu)
/* Wait until target cpu is down */ /* Wait until target cpu is down */
while (!cpu_stopped(cpu)) while (!cpu_stopped(cpu))
cpu_relax(); cpu_relax();
while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy) while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
udelay(10); udelay(10);
smp_free_lowcore(cpu); smp_free_lowcore(cpu);
pr_info("Processor %d stopped\n", cpu); pr_info("Processor %d stopped\n", cpu);
@ -649,7 +664,7 @@ void __cpu_die(unsigned int cpu)
void cpu_die(void) void cpu_die(void)
{ {
idle_task_exit(); idle_task_exit();
while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
cpu_relax(); cpu_relax();
for (;;); for (;;);
} }
@ -765,7 +780,8 @@ static ssize_t cpu_configure_store(struct sys_device *dev,
get_online_cpus(); get_online_cpus();
mutex_lock(&smp_cpu_state_mutex); mutex_lock(&smp_cpu_state_mutex);
rc = -EBUSY; rc = -EBUSY;
if (cpu_online(cpu)) /* disallow configuration changes of online cpus and cpu 0 */
if (cpu_online(cpu) || cpu == 0)
goto out; goto out;
rc = 0; rc = 0;
switch (val) { switch (val) {

View File

@ -0,0 +1,58 @@
/*
* 31-bit switch cpu code
*
* Copyright IBM Corp. 2009
*
*/
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
# smp_switch_to_cpu switches to destination cpu and executes the passed function
# Parameter: %r2 - function to call
# %r3 - function parameter
# %r4 - stack poiner
# %r5 - current cpu
# %r6 - destination cpu
.section .text
.align 4
.globl smp_switch_to_cpu
smp_switch_to_cpu:
stm %r6,%r15,__SF_GPRS(%r15)
lr %r1,%r15
ahi %r15,-STACK_FRAME_OVERHEAD
st %r1,__SF_BACKCHAIN(%r15)
basr %r13,0
0: la %r1,.gprregs_addr-0b(%r13)
l %r1,0(%r1)
stm %r0,%r15,0(%r1)
1: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
brc 2,1b /* busy, try again */
2: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
brc 2,2b /* busy, try again */
3: j 3b
.globl smp_restart_cpu
smp_restart_cpu:
basr %r13,0
0: la %r1,.gprregs_addr-0b(%r13)
l %r1,0(%r1)
lm %r0,%r15,0(%r1)
1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
brc 10,1b /* busy, accepted (status 0), running */
tmll %r0,0x40 /* Test if calling CPU is stopped */
jz 1b
ltr %r4,%r4 /* New stack ? */
jz 1f
lr %r15,%r4
1: basr %r14,%r2
.gprregs_addr:
.long .gprregs
.section .data,"aw",@progbits
.gprregs:
.rept 16
.long 0
.endr

View File

@ -0,0 +1,51 @@
/*
* 64-bit switch cpu code
*
* Copyright IBM Corp. 2009
*
*/
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
# smp_switch_to_cpu switches to destination cpu and executes the passed function
# Parameter: %r2 - function to call
# %r3 - function parameter
# %r4 - stack poiner
# %r5 - current cpu
# %r6 - destination cpu
.section .text
.align 4
.globl smp_switch_to_cpu
smp_switch_to_cpu:
stmg %r6,%r15,__SF_GPRS(%r15)
lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD
stg %r1,__SF_BACKCHAIN(%r15)
larl %r1,.gprregs
stmg %r0,%r15,0(%r1)
1: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
brc 2,1b /* busy, try again */
2: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
brc 2,2b /* busy, try again */
3: j 3b
.globl smp_restart_cpu
smp_restart_cpu:
larl %r1,.gprregs
lmg %r0,%r15,0(%r1)
1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
brc 10,1b /* busy, accepted (status 0), running */
tmll %r0,0x40 /* Test if calling CPU is stopped */
jz 1b
ltgr %r4,%r4 /* New stack ? */
jz 1f
lgr %r15,%r4
1: basr %r14,%r2
.section .data,"aw",@progbits
.gprregs:
.rept 16
.quad 0
.endr

View File

@ -176,7 +176,7 @@ pgm_check_entry:
cgr %r1,%r2 cgr %r1,%r2
je restore_registers /* r1 = r2 -> nothing to do */ je restore_registers /* r1 = r2 -> nothing to do */
larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */
mvc __LC_RESTART_PSW(16,%r0),0(%r4) mvc __LC_RST_NEW_PSW(16,%r0),0(%r4)
3: 3:
sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET
brc 8,4f /* accepted */ brc 8,4f /* accepted */

View File

@ -51,14 +51,6 @@
#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
/*
* Create a small time difference between the timer interrupts
* on the different cpus to avoid lock contention.
*/
#define CPU_DEVIATION (smp_processor_id() << 12)
#define TICK_SIZE tick
u64 sched_clock_base_cc = -1; /* Force to data section. */ u64 sched_clock_base_cc = -1; /* Force to data section. */
EXPORT_SYMBOL_GPL(sched_clock_base_cc); EXPORT_SYMBOL_GPL(sched_clock_base_cc);

View File

@ -23,6 +23,7 @@
#include <linux/security.h> #include <linux/security.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/processor.h> #include <asm/processor.h>

View File

@ -39,7 +39,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
vcpu->run->s390_reset_flags = 0; vcpu->run->s390_reset_flags = 0;
break; break;
default: default:
return -ENOTSUPP; return -EOPNOTSUPP;
} }
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@ -62,6 +62,6 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
case 0x308: case 0x308:
return __diag_ipl_functions(vcpu); return __diag_ipl_functions(vcpu);
default: default:
return -ENOTSUPP; return -EOPNOTSUPP;
} }
} }

View File

@ -32,7 +32,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
vcpu->stat.instruction_lctlg++; vcpu->stat.instruction_lctlg++;
if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
return -ENOTSUPP; return -EOPNOTSUPP;
useraddr = disp2; useraddr = disp2;
if (base2) if (base2)
@ -138,7 +138,7 @@ static int handle_stop(struct kvm_vcpu *vcpu)
rc = __kvm_s390_vcpu_store_status(vcpu, rc = __kvm_s390_vcpu_store_status(vcpu,
KVM_S390_STORE_STATUS_NOADDR); KVM_S390_STORE_STATUS_NOADDR);
if (rc >= 0) if (rc >= 0)
rc = -ENOTSUPP; rc = -EOPNOTSUPP;
} }
if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) { if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
@ -150,7 +150,7 @@ static int handle_stop(struct kvm_vcpu *vcpu)
if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
rc = -ENOTSUPP; rc = -EOPNOTSUPP;
} }
spin_unlock_bh(&vcpu->arch.local_int.lock); spin_unlock_bh(&vcpu->arch.local_int.lock);
@ -171,9 +171,9 @@ static int handle_validity(struct kvm_vcpu *vcpu)
2*PAGE_SIZE); 2*PAGE_SIZE);
if (rc) if (rc)
/* user will receive sigsegv, exit to user */ /* user will receive sigsegv, exit to user */
rc = -ENOTSUPP; rc = -EOPNOTSUPP;
} else } else
rc = -ENOTSUPP; rc = -EOPNOTSUPP;
if (rc) if (rc)
VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
@ -189,7 +189,7 @@ static int handle_instruction(struct kvm_vcpu *vcpu)
handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
if (handler) if (handler)
return handler(vcpu); return handler(vcpu);
return -ENOTSUPP; return -EOPNOTSUPP;
} }
static int handle_prog(struct kvm_vcpu *vcpu) static int handle_prog(struct kvm_vcpu *vcpu)
@ -206,7 +206,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
rc = handle_instruction(vcpu); rc = handle_instruction(vcpu);
rc2 = handle_prog(vcpu); rc2 = handle_prog(vcpu);
if (rc == -ENOTSUPP) if (rc == -EOPNOTSUPP)
vcpu->arch.sie_block->icptcode = 0x04; vcpu->arch.sie_block->icptcode = 0x04;
if (rc) if (rc)
return rc; return rc;
@ -231,9 +231,9 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
u8 code = vcpu->arch.sie_block->icptcode; u8 code = vcpu->arch.sie_block->icptcode;
if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
return -ENOTSUPP; return -EOPNOTSUPP;
func = intercept_funcs[code >> 2]; func = intercept_funcs[code >> 2];
if (func) if (func)
return func(vcpu); return func(vcpu);
return -ENOTSUPP; return -EOPNOTSUPP;
} }

View File

@ -10,12 +10,12 @@
* Author(s): Carsten Otte <cotte@de.ibm.com> * Author(s): Carsten Otte <cotte@de.ibm.com>
*/ */
#include <asm/lowcore.h>
#include <asm/uaccess.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/hrtimer.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <asm/asm-offsets.h>
#include <asm/uaccess.h>
#include "kvm-s390.h" #include "kvm-s390.h"
#include "gaccess.h" #include "gaccess.h"
@ -187,8 +187,8 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
if (rc == -EFAULT) if (rc == -EFAULT)
exception = 1; exception = 1;
rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM, rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
inti->ext.ext_params2); inti->ext.ext_params2);
if (rc == -EFAULT) if (rc == -EFAULT)
exception = 1; exception = 1;
break; break;
@ -342,7 +342,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
if (psw_interrupts_disabled(vcpu)) { if (psw_interrupts_disabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
__unset_cpu_idle(vcpu); __unset_cpu_idle(vcpu);
return -ENOTSUPP; /* disabled wait */ return -EOPNOTSUPP; /* disabled wait */
} }
if (psw_extint_disabled(vcpu) || if (psw_extint_disabled(vcpu) ||

View File

@ -23,6 +23,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/nmi.h> #include <asm/nmi.h>
@ -543,7 +544,7 @@ rerun_vcpu:
rc = -EINTR; rc = -EINTR;
} }
if (rc == -ENOTSUPP) { if (rc == -EOPNOTSUPP) {
/* intercept cannot be handled in-kernel, prepare kvm-run */ /* intercept cannot be handled in-kernel, prepare kvm-run */
kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
@ -603,45 +604,45 @@ int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
} else } else
prefix = 0; prefix = 0;
if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs), if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
vcpu->arch.guest_fpregs.fprs, 128, prefix)) vcpu->arch.guest_fpregs.fprs, 128, prefix))
return -EFAULT; return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs), if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
vcpu->arch.guest_gprs, 128, prefix)) vcpu->arch.guest_gprs, 128, prefix))
return -EFAULT; return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw), if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
&vcpu->arch.sie_block->gpsw, 16, prefix)) &vcpu->arch.sie_block->gpsw, 16, prefix))
return -EFAULT; return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg), if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
&vcpu->arch.sie_block->prefix, 4, prefix)) &vcpu->arch.sie_block->prefix, 4, prefix))
return -EFAULT; return -EFAULT;
if (__guestcopy(vcpu, if (__guestcopy(vcpu,
addr + offsetof(struct save_area_s390x, fp_ctrl_reg), addr + offsetof(struct save_area, fp_ctrl_reg),
&vcpu->arch.guest_fpregs.fpc, 4, prefix)) &vcpu->arch.guest_fpregs.fpc, 4, prefix))
return -EFAULT; return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg), if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
&vcpu->arch.sie_block->todpr, 4, prefix)) &vcpu->arch.sie_block->todpr, 4, prefix))
return -EFAULT; return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer), if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
&vcpu->arch.sie_block->cputm, 8, prefix)) &vcpu->arch.sie_block->cputm, 8, prefix))
return -EFAULT; return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp), if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
&vcpu->arch.sie_block->ckc, 8, prefix)) &vcpu->arch.sie_block->ckc, 8, prefix))
return -EFAULT; return -EFAULT;
if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs), if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
&vcpu->arch.guest_acrs, 64, prefix)) &vcpu->arch.guest_acrs, 64, prefix))
return -EFAULT; return -EFAULT;
if (__guestcopy(vcpu, if (__guestcopy(vcpu,
addr + offsetof(struct save_area_s390x, ctrl_regs), addr + offsetof(struct save_area, ctrl_regs),
&vcpu->arch.sie_block->gcr, 128, prefix)) &vcpu->arch.sie_block->gcr, 128, prefix))
return -EFAULT; return -EFAULT;
return 0; return 0;

View File

@ -323,5 +323,5 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
else else
return handler(vcpu); return handler(vcpu);
} }
return -ENOTSUPP; return -EOPNOTSUPP;
} }

View File

@ -172,7 +172,7 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
rc = 0; /* order accepted */ rc = 0; /* order accepted */
break; break;
default: default:
rc = -ENOTSUPP; rc = -EOPNOTSUPP;
} }
return rc; return rc;
} }
@ -293,7 +293,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
vcpu->stat.instruction_sigp_restart++; vcpu->stat.instruction_sigp_restart++;
/* user space must know about restart */ /* user space must know about restart */
default: default:
return -ENOTSUPP; return -EOPNOTSUPP;
} }
if (rc < 0) if (rc < 0)

View File

@ -2,7 +2,7 @@
# Makefile for s390-specific library files.. # Makefile for s390-specific library files..
# #
lib-y += delay.o string.o uaccess_std.o uaccess_pt.o lib-y += delay.o string.o uaccess_std.o uaccess_pt.o usercopy.o
obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o
lib-$(CONFIG_64BIT) += uaccess_mvcos.o lib-$(CONFIG_64BIT) += uaccess_mvcos.o
lib-$(CONFIG_SMP) += spinlock.o lib-$(CONFIG_SMP) += spinlock.o

View File

@ -43,16 +43,24 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
{ {
int count = spin_retry; int count = spin_retry;
unsigned int cpu = ~smp_processor_id(); unsigned int cpu = ~smp_processor_id();
unsigned int owner;
while (1) { while (1) {
if (count-- <= 0) { owner = lp->owner_cpu;
unsigned int owner = lp->owner_cpu; if (!owner || smp_vcpu_scheduled(~owner)) {
if (owner != 0) for (count = spin_retry; count > 0; count--) {
_raw_yield_cpu(~owner); if (arch_spin_is_locked(lp))
count = spin_retry; continue;
if (_raw_compare_and_swap(&lp->owner_cpu, 0,
cpu) == 0)
return;
}
if (MACHINE_IS_LPAR)
continue;
} }
if (arch_spin_is_locked(lp)) owner = lp->owner_cpu;
continue; if (owner)
_raw_yield_cpu(~owner);
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return; return;
} }
@ -63,17 +71,27 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{ {
int count = spin_retry; int count = spin_retry;
unsigned int cpu = ~smp_processor_id(); unsigned int cpu = ~smp_processor_id();
unsigned int owner;
local_irq_restore(flags); local_irq_restore(flags);
while (1) { while (1) {
if (count-- <= 0) { owner = lp->owner_cpu;
unsigned int owner = lp->owner_cpu; if (!owner || smp_vcpu_scheduled(~owner)) {
if (owner != 0) for (count = spin_retry; count > 0; count--) {
_raw_yield_cpu(~owner); if (arch_spin_is_locked(lp))
count = spin_retry; continue;
local_irq_disable();
if (_raw_compare_and_swap(&lp->owner_cpu, 0,
cpu) == 0)
return;
local_irq_restore(flags);
}
if (MACHINE_IS_LPAR)
continue;
} }
if (arch_spin_is_locked(lp)) owner = lp->owner_cpu;
continue; if (owner)
_raw_yield_cpu(~owner);
local_irq_disable(); local_irq_disable();
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return; return;
@ -100,8 +118,11 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
void arch_spin_relax(arch_spinlock_t *lock) void arch_spin_relax(arch_spinlock_t *lock)
{ {
unsigned int cpu = lock->owner_cpu; unsigned int cpu = lock->owner_cpu;
if (cpu != 0) if (cpu != 0) {
_raw_yield_cpu(~cpu); if (MACHINE_IS_VM || MACHINE_IS_KVM ||
!smp_vcpu_scheduled(~cpu))
_raw_yield_cpu(~cpu);
}
} }
EXPORT_SYMBOL(arch_spin_relax); EXPORT_SYMBOL(arch_spin_relax);

View File

@ -0,0 +1,8 @@
#include <linux/module.h>
#include <linux/bug.h>
void copy_from_user_overflow(void)
{
WARN(1, "Buffer overflow detected!\n");
}
EXPORT_SYMBOL(copy_from_user_overflow);

View File

@ -309,7 +309,7 @@ query_segment_type (struct dcss_segment *seg)
} }
#endif #endif
if (qout->segcnt > 6) { if (qout->segcnt > 6) {
rc = -ENOTSUPP; rc = -EOPNOTSUPP;
goto out_free; goto out_free;
} }
@ -324,11 +324,11 @@ query_segment_type (struct dcss_segment *seg)
for (i=0; i<qout->segcnt; i++) { for (i=0; i<qout->segcnt; i++) {
if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) && if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) &&
((qout->range[i].start & 0xff) != SEG_TYPE_EN)) { ((qout->range[i].start & 0xff) != SEG_TYPE_EN)) {
rc = -ENOTSUPP; rc = -EOPNOTSUPP;
goto out_free; goto out_free;
} }
if (start != qout->range[i].start >> PAGE_SHIFT) { if (start != qout->range[i].start >> PAGE_SHIFT) {
rc = -ENOTSUPP; rc = -EOPNOTSUPP;
goto out_free; goto out_free;
} }
start = (qout->range[i].end >> PAGE_SHIFT) + 1; start = (qout->range[i].end >> PAGE_SHIFT) + 1;
@ -357,7 +357,7 @@ query_segment_type (struct dcss_segment *seg)
* -ENOSYS : we are not running on VM * -ENOSYS : we are not running on VM
* -EIO : could not perform query diagnose * -EIO : could not perform query diagnose
* -ENOENT : no such segment * -ENOENT : no such segment
* -ENOTSUPP: multi-part segment cannot be used with linux * -EOPNOTSUPP: multi-part segment cannot be used with linux
* -ENOMEM : out of memory * -ENOMEM : out of memory
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
*/ */
@ -515,7 +515,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
* -ENOSYS : we are not running on VM * -ENOSYS : we are not running on VM
* -EIO : could not perform query or load diagnose * -EIO : could not perform query or load diagnose
* -ENOENT : no such segment * -ENOENT : no such segment
* -ENOTSUPP: multi-part segment cannot be used with linux * -EOPNOTSUPP: multi-part segment cannot be used with linux
* -ENOSPC : segment cannot be used (overlaps with storage) * -ENOSPC : segment cannot be used (overlaps with storage)
* -EBUSY : segment can temporarily not be used (overlaps with dcss) * -EBUSY : segment can temporarily not be used (overlaps with dcss)
* -ERANGE : segment cannot be used (exceeds kernel mapping range) * -ERANGE : segment cannot be used (exceeds kernel mapping range)
@ -742,7 +742,7 @@ void segment_warning(int rc, char *seg_name)
pr_err("Loading or querying DCSS %s resulted in a " pr_err("Loading or querying DCSS %s resulted in a "
"hardware error\n", seg_name); "hardware error\n", seg_name);
break; break;
case -ENOTSUPP: case -EOPNOTSUPP:
pr_err("DCSS %s has multiple page ranges and cannot be " pr_err("DCSS %s has multiple page ranges and cannot be "
"loaded or queried\n", seg_name); "loaded or queried\n", seg_name);
break; break;

View File

@ -30,6 +30,7 @@
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <asm/asm-offsets.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/s390_ext.h> #include <asm/s390_ext.h>
@ -59,15 +60,13 @@ static inline int notify_page_fault(struct pt_regs *regs)
{ {
int ret = 0; int ret = 0;
#ifdef CONFIG_KPROBES
/* kprobe_running() needs smp_processor_id() */ /* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) { if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable(); preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14)) if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1; ret = 1;
preempt_enable(); preempt_enable();
} }
#endif
return ret; return ret;
} }

View File

@ -143,33 +143,34 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
} }
#endif #endif
void free_initmem(void) void free_init_pages(char *what, unsigned long begin, unsigned long end)
{ {
unsigned long addr; unsigned long addr = begin;
addr = (unsigned long)(&__init_begin); if (begin >= end)
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { return;
for (; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr)); ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr)); init_page_count(virt_to_page(addr));
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
PAGE_SIZE);
free_page(addr); free_page(addr);
totalram_pages++; totalram_pages++;
} }
printk ("Freeing unused kernel memory: %ldk freed\n", printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10); }
void free_initmem(void)
{
free_init_pages("unused kernel memory",
(unsigned long)&__init_begin,
(unsigned long)&__init_end);
} }
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end) void free_initrd_mem(unsigned long start, unsigned long end)
{ {
if (start < end) free_init_pages("initrd memory", start, end);
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
} }
#endif #endif

View File

@ -20,6 +20,7 @@
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/hdreg.h> #include <linux/hdreg.h>
#include <linux/async.h> #include <linux/async.h>
#include <linux/mutex.h>
#include <asm/ccwdev.h> #include <asm/ccwdev.h>
#include <asm/ebcdic.h> #include <asm/ebcdic.h>
@ -112,6 +113,7 @@ struct dasd_device *dasd_alloc_device(void)
INIT_WORK(&device->restore_device, do_restore_device); INIT_WORK(&device->restore_device, do_restore_device);
device->state = DASD_STATE_NEW; device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW; device->target = DASD_STATE_NEW;
mutex_init(&device->state_mutex);
return device; return device;
} }
@ -321,8 +323,8 @@ static int dasd_state_ready_to_basic(struct dasd_device *device)
device->state = DASD_STATE_READY; device->state = DASD_STATE_READY;
return rc; return rc;
} }
dasd_destroy_partitions(block);
dasd_flush_request_queue(block); dasd_flush_request_queue(block);
dasd_destroy_partitions(block);
block->blocks = 0; block->blocks = 0;
block->bp_block = 0; block->bp_block = 0;
block->s2b_shift = 0; block->s2b_shift = 0;
@ -484,10 +486,8 @@ static void dasd_change_state(struct dasd_device *device)
if (rc) if (rc)
device->target = device->state; device->target = device->state;
if (device->state == device->target) { if (device->state == device->target)
wake_up(&dasd_init_waitq); wake_up(&dasd_init_waitq);
dasd_put_device(device);
}
/* let user-space know that the device status changed */ /* let user-space know that the device status changed */
kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
@ -502,7 +502,9 @@ static void dasd_change_state(struct dasd_device *device)
static void do_kick_device(struct work_struct *work) static void do_kick_device(struct work_struct *work)
{ {
struct dasd_device *device = container_of(work, struct dasd_device, kick_work); struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
mutex_lock(&device->state_mutex);
dasd_change_state(device); dasd_change_state(device);
mutex_unlock(&device->state_mutex);
dasd_schedule_device_bh(device); dasd_schedule_device_bh(device);
dasd_put_device(device); dasd_put_device(device);
} }
@ -539,18 +541,19 @@ void dasd_restore_device(struct dasd_device *device)
void dasd_set_target_state(struct dasd_device *device, int target) void dasd_set_target_state(struct dasd_device *device, int target)
{ {
dasd_get_device(device); dasd_get_device(device);
mutex_lock(&device->state_mutex);
/* If we are in probeonly mode stop at DASD_STATE_READY. */ /* If we are in probeonly mode stop at DASD_STATE_READY. */
if (dasd_probeonly && target > DASD_STATE_READY) if (dasd_probeonly && target > DASD_STATE_READY)
target = DASD_STATE_READY; target = DASD_STATE_READY;
if (device->target != target) { if (device->target != target) {
if (device->state == target) { if (device->state == target)
wake_up(&dasd_init_waitq); wake_up(&dasd_init_waitq);
dasd_put_device(device);
}
device->target = target; device->target = target;
} }
if (device->state != device->target) if (device->state != device->target)
dasd_change_state(device); dasd_change_state(device);
mutex_unlock(&device->state_mutex);
dasd_put_device(device);
} }
/* /*
@ -1000,12 +1003,20 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
return; return;
} }
device = (struct dasd_device *) cqr->startdev; device = dasd_device_from_cdev_locked(cdev);
if (device == NULL || if (IS_ERR(device)) {
device != dasd_device_from_cdev_locked(cdev) || DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { "unable to get device from cdev");
return;
}
if (!cqr->startdev ||
device != cqr->startdev ||
strncmp(cqr->startdev->discipline->ebcname,
(char *) &cqr->magic, 4)) {
DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
"invalid device in request"); "invalid device in request");
dasd_put_device(device);
return; return;
} }
@ -1692,7 +1703,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
cqr, rc); cqr, rc);
} else { } else {
cqr->stopclk = get_clock(); cqr->stopclk = get_clock();
rc = 1;
} }
break; break;
default: /* already finished or clear pending - do nothing */ default: /* already finished or clear pending - do nothing */
@ -2170,9 +2180,13 @@ static void dasd_flush_request_queue(struct dasd_block *block)
static int dasd_open(struct block_device *bdev, fmode_t mode) static int dasd_open(struct block_device *bdev, fmode_t mode)
{ {
struct dasd_block *block = bdev->bd_disk->private_data; struct dasd_block *block = bdev->bd_disk->private_data;
struct dasd_device *base = block->base; struct dasd_device *base;
int rc; int rc;
if (!block)
return -ENODEV;
base = block->base;
atomic_inc(&block->open_count); atomic_inc(&block->open_count);
if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
rc = -ENODEV; rc = -ENODEV;
@ -2285,11 +2299,6 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
if (ret) if (ret)
pr_warning("%s: Setting the DASD online failed with rc=%d\n", pr_warning("%s: Setting the DASD online failed with rc=%d\n",
dev_name(&cdev->dev), ret); dev_name(&cdev->dev), ret);
else {
struct dasd_device *device = dasd_device_from_cdev(cdev);
wait_event(dasd_init_waitq, _wait_for_device(device));
dasd_put_device(device);
}
} }
/* /*
@ -2424,6 +2433,9 @@ int dasd_generic_set_online(struct ccw_device *cdev,
} else } else
pr_debug("dasd_generic device %s found\n", pr_debug("dasd_generic device %s found\n",
dev_name(&cdev->dev)); dev_name(&cdev->dev));
wait_event(dasd_init_waitq, _wait_for_device(device));
dasd_put_device(device); dasd_put_device(device);
return rc; return rc;
} }

View File

@ -874,12 +874,19 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
ssize_t len; ssize_t len;
device = dasd_device_from_cdev(to_ccwdev(dev)); device = dasd_device_from_cdev(to_ccwdev(dev));
if (!IS_ERR(device) && device->discipline) { if (IS_ERR(device))
goto out;
else if (!device->discipline) {
dasd_put_device(device);
goto out;
} else {
len = snprintf(buf, PAGE_SIZE, "%s\n", len = snprintf(buf, PAGE_SIZE, "%s\n",
device->discipline->name); device->discipline->name);
dasd_put_device(device); dasd_put_device(device);
} else return len;
len = snprintf(buf, PAGE_SIZE, "none\n"); }
out:
len = snprintf(buf, PAGE_SIZE, "none\n");
return len; return len;
} }

View File

@ -88,6 +88,7 @@ void dasd_gendisk_free(struct dasd_block *block)
if (block->gdp) { if (block->gdp) {
del_gendisk(block->gdp); del_gendisk(block->gdp);
block->gdp->queue = NULL; block->gdp->queue = NULL;
block->gdp->private_data = NULL;
put_disk(block->gdp); put_disk(block->gdp);
block->gdp = NULL; block->gdp = NULL;
} }

View File

@ -368,6 +368,7 @@ struct dasd_device {
/* Device state and target state. */ /* Device state and target state. */
int state, target; int state, target;
struct mutex state_mutex;
int stopped; /* device (ccw_device_start) was stopped */ int stopped; /* device (ccw_device_start) was stopped */
/* reference count. */ /* reference count. */

View File

@ -165,51 +165,32 @@ static const struct file_operations dasd_devices_file_ops = {
.release = seq_release, .release = seq_release,
}; };
static int
dasd_calc_metrics(char *page, char **start, off_t off,
int count, int *eof, int len)
{
len = (len > off) ? len - off : 0;
if (len > count)
len = count;
if (len < count)
*eof = 1;
*start = page + off;
return len;
}
#ifdef CONFIG_DASD_PROFILE #ifdef CONFIG_DASD_PROFILE
static char * static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor)
dasd_statistics_array(char *str, unsigned int *array, int factor)
{ {
int i; int i;
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
str += sprintf(str, "%7d ", array[i] / factor); seq_printf(m, "%7d ", array[i] / factor);
if (i == 15) if (i == 15)
str += sprintf(str, "\n"); seq_putc(m, '\n');
} }
str += sprintf(str,"\n"); seq_putc(m, '\n');
return str;
} }
#endif /* CONFIG_DASD_PROFILE */ #endif /* CONFIG_DASD_PROFILE */
static int static int dasd_stats_proc_show(struct seq_file *m, void *v)
dasd_statistics_read(char *page, char **start, off_t off,
int count, int *eof, void *data)
{ {
unsigned long len;
#ifdef CONFIG_DASD_PROFILE #ifdef CONFIG_DASD_PROFILE
struct dasd_profile_info_t *prof; struct dasd_profile_info_t *prof;
char *str;
int factor; int factor;
/* check for active profiling */ /* check for active profiling */
if (dasd_profile_level == DASD_PROFILE_OFF) { if (dasd_profile_level == DASD_PROFILE_OFF) {
len = sprintf(page, "Statistics are off - they might be " seq_printf(m, "Statistics are off - they might be "
"switched on using 'echo set on > " "switched on using 'echo set on > "
"/proc/dasd/statistics'\n"); "/proc/dasd/statistics'\n");
return dasd_calc_metrics(page, start, off, count, eof, len); return 0;
} }
prof = &dasd_global_profile; prof = &dasd_global_profile;
@ -217,47 +198,49 @@ dasd_statistics_read(char *page, char **start, off_t off,
for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
factor *= 10); factor *= 10);
str = page; seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs);
str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); seq_printf(m, "with %u sectors(512B each)\n",
str += sprintf(str, "with %u sectors(512B each)\n",
prof->dasd_io_sects); prof->dasd_io_sects);
str += sprintf(str, "Scale Factor is %d\n", factor); seq_printf(m, "Scale Factor is %d\n", factor);
str += sprintf(str, seq_printf(m,
" __<4 ___8 __16 __32 __64 _128 " " __<4 ___8 __16 __32 __64 _128 "
" _256 _512 __1k __2k __4k __8k " " _256 _512 __1k __2k __4k __8k "
" _16k _32k _64k 128k\n"); " _16k _32k _64k 128k\n");
str += sprintf(str, seq_printf(m,
" _256 _512 __1M __2M __4M __8M " " _256 _512 __1M __2M __4M __8M "
" _16M _32M _64M 128M 256M 512M " " _16M _32M _64M 128M 256M 512M "
" __1G __2G __4G " " _>4G\n"); " __1G __2G __4G " " _>4G\n");
str += sprintf(str, "Histogram of sizes (512B secs)\n"); seq_printf(m, "Histogram of sizes (512B secs)\n");
str = dasd_statistics_array(str, prof->dasd_io_secs, factor); dasd_statistics_array(m, prof->dasd_io_secs, factor);
str += sprintf(str, "Histogram of I/O times (microseconds)\n"); seq_printf(m, "Histogram of I/O times (microseconds)\n");
str = dasd_statistics_array(str, prof->dasd_io_times, factor); dasd_statistics_array(m, prof->dasd_io_times, factor);
str += sprintf(str, "Histogram of I/O times per sector\n"); seq_printf(m, "Histogram of I/O times per sector\n");
str = dasd_statistics_array(str, prof->dasd_io_timps, factor); dasd_statistics_array(m, prof->dasd_io_timps, factor);
str += sprintf(str, "Histogram of I/O time till ssch\n"); seq_printf(m, "Histogram of I/O time till ssch\n");
str = dasd_statistics_array(str, prof->dasd_io_time1, factor); dasd_statistics_array(m, prof->dasd_io_time1, factor);
str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); seq_printf(m, "Histogram of I/O time between ssch and irq\n");
str = dasd_statistics_array(str, prof->dasd_io_time2, factor); dasd_statistics_array(m, prof->dasd_io_time2, factor);
str += sprintf(str, "Histogram of I/O time between ssch " seq_printf(m, "Histogram of I/O time between ssch "
"and irq per sector\n"); "and irq per sector\n");
str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor); dasd_statistics_array(m, prof->dasd_io_time2ps, factor);
str += sprintf(str, "Histogram of I/O time between irq and end\n"); seq_printf(m, "Histogram of I/O time between irq and end\n");
str = dasd_statistics_array(str, prof->dasd_io_time3, factor); dasd_statistics_array(m, prof->dasd_io_time3, factor);
str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); seq_printf(m, "# of req in chanq at enqueuing (1..32) \n");
str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor); dasd_statistics_array(m, prof->dasd_io_nr_req, factor);
len = str - page;
#else #else
len = sprintf(page, "Statistics are not activated in this kernel\n"); seq_printf(m, "Statistics are not activated in this kernel\n");
#endif #endif
return dasd_calc_metrics(page, start, off, count, eof, len); return 0;
} }
static int static int dasd_stats_proc_open(struct inode *inode, struct file *file)
dasd_statistics_write(struct file *file, const char __user *user_buf, {
unsigned long user_len, void *data) return single_open(file, dasd_stats_proc_show, NULL);
}
static ssize_t dasd_stats_proc_write(struct file *file,
const char __user *user_buf, size_t user_len, loff_t *pos)
{ {
#ifdef CONFIG_DASD_PROFILE #ifdef CONFIG_DASD_PROFILE
char *buffer, *str; char *buffer, *str;
@ -308,6 +291,15 @@ out_error:
#endif /* CONFIG_DASD_PROFILE */ #endif /* CONFIG_DASD_PROFILE */
} }
static const struct file_operations dasd_stats_proc_fops = {
.owner = THIS_MODULE,
.open = dasd_stats_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = dasd_stats_proc_write,
};
/* /*
* Create dasd proc-fs entries. * Create dasd proc-fs entries.
* In case creation failed, cleanup and return -ENOENT. * In case creation failed, cleanup and return -ENOENT.
@ -324,13 +316,12 @@ dasd_proc_init(void)
&dasd_devices_file_ops); &dasd_devices_file_ops);
if (!dasd_devices_entry) if (!dasd_devices_entry)
goto out_nodevices; goto out_nodevices;
dasd_statistics_entry = create_proc_entry("statistics", dasd_statistics_entry = proc_create("statistics",
S_IFREG | S_IRUGO | S_IWUSR, S_IFREG | S_IRUGO | S_IWUSR,
dasd_proc_root_entry); dasd_proc_root_entry,
&dasd_stats_proc_fops);
if (!dasd_statistics_entry) if (!dasd_statistics_entry)
goto out_nostatistics; goto out_nostatistics;
dasd_statistics_entry->read_proc = dasd_statistics_read;
dasd_statistics_entry->write_proc = dasd_statistics_write;
return 0; return 0;
out_nostatistics: out_nostatistics:

View File

@ -15,6 +15,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <asm/asm-offsets.h>
#include <asm/ipl.h> #include <asm/ipl.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/setup.h> #include <asm/setup.h>
@ -40,12 +41,12 @@ enum arch_id {
/* dump system info */ /* dump system info */
struct sys_info { struct sys_info {
enum arch_id arch; enum arch_id arch;
unsigned long sa_base; unsigned long sa_base;
u32 sa_size; u32 sa_size;
int cpu_map[NR_CPUS]; int cpu_map[NR_CPUS];
unsigned long mem_size; unsigned long mem_size;
union save_area lc_mask; struct save_area lc_mask;
}; };
struct ipib_info { struct ipib_info {
@ -183,52 +184,9 @@ static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
return 0; return 0;
} }
#ifdef __s390x__
/*
* Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info
*/
static void __init s390x_to_s390_regs(union save_area *out, union save_area *in,
int cpu)
{
int i;
for (i = 0; i < 16; i++) {
out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff;
out->s390.acc_regs[i] = in->s390x.acc_regs[i];
out->s390.ctrl_regs[i] =
in->s390x.ctrl_regs[i] & 0x00000000ffffffff;
}
/* locore for 31 bit has only space for fpregs 0,2,4,6 */
out->s390.fp_regs[0] = in->s390x.fp_regs[0];
out->s390.fp_regs[1] = in->s390x.fp_regs[2];
out->s390.fp_regs[2] = in->s390x.fp_regs[4];
out->s390.fp_regs[3] = in->s390x.fp_regs[6];
memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4);
out->s390.psw[1] |= 0x8; /* set bit 12 */
memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4);
out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */
out->s390.pref_reg = in->s390x.pref_reg;
out->s390.timer = in->s390x.timer;
out->s390.clk_cmp = in->s390x.clk_cmp;
}
static void __init s390x_to_s390_save_areas(void)
{
int i = 1;
static union save_area tmp;
while (zfcpdump_save_areas[i]) {
s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i);
memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp));
i++;
}
}
#endif /* __s390x__ */
static int __init init_cpu_info(enum arch_id arch) static int __init init_cpu_info(enum arch_id arch)
{ {
union save_area *sa; struct save_area *sa;
/* get info for boot cpu from lowcore, stored in the HSA */ /* get info for boot cpu from lowcore, stored in the HSA */
@ -241,20 +199,12 @@ static int __init init_cpu_info(enum arch_id arch)
return -EIO; return -EIO;
} }
zfcpdump_save_areas[0] = sa; zfcpdump_save_areas[0] = sa;
#ifdef __s390x__
/* convert s390x regs to s390, if we are dumping an s390 Linux */
if (arch == ARCH_S390)
s390x_to_s390_save_areas();
#endif
return 0; return 0;
} }
static DEFINE_MUTEX(zcore_mutex); static DEFINE_MUTEX(zcore_mutex);
#define DUMP_VERSION 0x3 #define DUMP_VERSION 0x5
#define DUMP_MAGIC 0xa8190173618f23fdULL #define DUMP_MAGIC 0xa8190173618f23fdULL
#define DUMP_ARCH_S390X 2 #define DUMP_ARCH_S390X 2
#define DUMP_ARCH_S390 1 #define DUMP_ARCH_S390 1
@ -279,7 +229,14 @@ struct zcore_header {
u32 volnr; u32 volnr;
u32 build_arch; u32 build_arch;
u64 rmem_size; u64 rmem_size;
char pad2[4016]; u8 mvdump;
u16 cpu_cnt;
u16 real_cpu_cnt;
u8 end_pad1[0x200-0x061];
u64 mvdump_sign;
u64 mvdump_zipl_time;
u8 end_pad2[0x800-0x210];
u32 lc_vec[512];
} __attribute__((packed,__aligned__(16))); } __attribute__((packed,__aligned__(16)));
static struct zcore_header zcore_header = { static struct zcore_header zcore_header = {
@ -289,7 +246,7 @@ static struct zcore_header zcore_header = {
.dump_level = 0, .dump_level = 0,
.page_size = PAGE_SIZE, .page_size = PAGE_SIZE,
.mem_start = 0, .mem_start = 0,
#ifdef __s390x__ #ifdef CONFIG_64BIT
.build_arch = DUMP_ARCH_S390X, .build_arch = DUMP_ARCH_S390X,
#else #else
.build_arch = DUMP_ARCH_S390, .build_arch = DUMP_ARCH_S390,
@ -340,11 +297,7 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
unsigned long prefix; unsigned long prefix;
unsigned long sa_off, len, buf_off; unsigned long sa_off, len, buf_off;
if (sys_info.arch == ARCH_S390) prefix = zfcpdump_save_areas[i]->pref_reg;
prefix = zfcpdump_save_areas[i]->s390.pref_reg;
else
prefix = zfcpdump_save_areas[i]->s390x.pref_reg;
sa_start = prefix + sys_info.sa_base; sa_start = prefix + sys_info.sa_base;
sa_end = prefix + sys_info.sa_base + sys_info.sa_size; sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
@ -561,34 +514,39 @@ static const struct file_operations zcore_reipl_fops = {
.release = zcore_reipl_release, .release = zcore_reipl_release,
}; };
#ifdef CONFIG_32BIT
static void __init set_s390_lc_mask(union save_area *map) static void __init set_lc_mask(struct save_area *map)
{ {
memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save)); memset(&map->ext_save, 0xff, sizeof(map->ext_save));
memset(&map->s390.timer, 0xff, sizeof(map->s390.timer)); memset(&map->timer, 0xff, sizeof(map->timer));
memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp)); memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
memset(&map->s390.psw, 0xff, sizeof(map->s390.psw)); memset(&map->psw, 0xff, sizeof(map->psw));
memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg)); memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs)); memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs)); memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs)); memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs)); memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
} }
static void __init set_s390x_lc_mask(union save_area *map) #else /* CONFIG_32BIT */
static void __init set_lc_mask(struct save_area *map)
{ {
memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs)); memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs)); memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw)); memset(&map->psw, 0xff, sizeof(map->psw));
memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg)); memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg)); memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg));
memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg)); memset(&map->tod_reg, 0xff, sizeof(map->tod_reg));
memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer)); memset(&map->timer, 0xff, sizeof(map->timer));
memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp)); memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs)); memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs)); memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
} }
#endif /* CONFIG_32BIT */
/* /*
* Initialize dump globals for a given architecture * Initialize dump globals for a given architecture
*/ */
@ -599,21 +557,18 @@ static int __init sys_info_init(enum arch_id arch)
switch (arch) { switch (arch) {
case ARCH_S390X: case ARCH_S390X:
pr_alert("DETECTED 'S390X (64 bit) OS'\n"); pr_alert("DETECTED 'S390X (64 bit) OS'\n");
sys_info.sa_base = SAVE_AREA_BASE_S390X;
sys_info.sa_size = sizeof(struct save_area_s390x);
set_s390x_lc_mask(&sys_info.lc_mask);
break; break;
case ARCH_S390: case ARCH_S390:
pr_alert("DETECTED 'S390 (32 bit) OS'\n"); pr_alert("DETECTED 'S390 (32 bit) OS'\n");
sys_info.sa_base = SAVE_AREA_BASE_S390;
sys_info.sa_size = sizeof(struct save_area_s390);
set_s390_lc_mask(&sys_info.lc_mask);
break; break;
default: default:
pr_alert("0x%x is an unknown architecture.\n",arch); pr_alert("0x%x is an unknown architecture.\n",arch);
return -EINVAL; return -EINVAL;
} }
sys_info.sa_base = SAVE_AREA_BASE;
sys_info.sa_size = sizeof(struct save_area);
sys_info.arch = arch; sys_info.arch = arch;
set_lc_mask(&sys_info.lc_mask);
rc = init_cpu_info(arch); rc = init_cpu_info(arch);
if (rc) if (rc)
return rc; return rc;
@ -660,8 +615,9 @@ static int __init get_mem_size(unsigned long *mem)
static int __init zcore_header_init(int arch, struct zcore_header *hdr) static int __init zcore_header_init(int arch, struct zcore_header *hdr)
{ {
int rc; int rc, i;
unsigned long memory = 0; unsigned long memory = 0;
u32 prefix;
if (arch == ARCH_S390X) if (arch == ARCH_S390X)
hdr->arch_id = DUMP_ARCH_S390X; hdr->arch_id = DUMP_ARCH_S390X;
@ -676,6 +632,14 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr)
hdr->num_pages = memory / PAGE_SIZE; hdr->num_pages = memory / PAGE_SIZE;
hdr->tod = get_clock(); hdr->tod = get_clock();
get_cpu_id(&hdr->cpu_id); get_cpu_id(&hdr->cpu_id);
for (i = 0; zfcpdump_save_areas[i]; i++) {
prefix = zfcpdump_save_areas[i]->pref_reg;
hdr->real_cpu_cnt++;
if (!prefix)
continue;
hdr->lc_vec[hdr->cpu_cnt] = prefix;
hdr->cpu_cnt++;
}
return 0; return 0;
} }
@ -741,14 +705,21 @@ static int __init zcore_init(void)
if (rc) if (rc)
goto fail; goto fail;
#ifndef __s390x__ #ifdef CONFIG_64BIT
if (arch == ARCH_S390) {
pr_alert("The 64-bit dump tool cannot be used for a "
"32-bit system\n");
rc = -EINVAL;
goto fail;
}
#else /* CONFIG_64BIT */
if (arch == ARCH_S390X) { if (arch == ARCH_S390X) {
pr_alert("The 32-bit dump tool cannot be used for a " pr_alert("The 32-bit dump tool cannot be used for a "
"64-bit system\n"); "64-bit system\n");
rc = -EINVAL; rc = -EINVAL;
goto fail; goto fail;
} }
#endif #endif /* CONFIG_64BIT */
rc = sys_info_init(arch); rc = sys_info_init(arch);
if (rc) if (rc)

View File

@ -224,8 +224,8 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
*/ */
void ccw_request_handler(struct ccw_device *cdev) void ccw_request_handler(struct ccw_device *cdev)
{ {
struct irb *irb = (struct irb *)&S390_lowcore.irb;
struct ccw_request *req = &cdev->private->req; struct ccw_request *req = &cdev->private->req;
struct irb *irb = (struct irb *) __LC_IRB;
enum io_status status; enum io_status status;
int rc = -EOPNOTSUPP; int rc = -EOPNOTSUPP;

View File

@ -574,7 +574,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
secm_area->request.length = 0x0050; secm_area->request.length = 0x0050;
secm_area->request.code = 0x0016; secm_area->request.code = 0x0016;
secm_area->key = PAGE_DEFAULT_KEY; secm_area->key = PAGE_DEFAULT_KEY >> 4;
secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;

View File

@ -51,7 +51,7 @@ static void chsc_subchannel_irq(struct subchannel *sch)
{ {
struct chsc_private *private = sch->private; struct chsc_private *private = sch->private;
struct chsc_request *request = private->request; struct chsc_request *request = private->request;
struct irb *irb = (struct irb *)__LC_IRB; struct irb *irb = (struct irb *)&S390_lowcore.irb;
CHSC_LOG(4, "irb"); CHSC_LOG(4, "irb");
CHSC_LOG_HEX(4, irb, sizeof(*irb)); CHSC_LOG_HEX(4, irb, sizeof(*irb));
@ -237,7 +237,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
int ret = -ENODEV; int ret = -ENODEV;
char dbf[10]; char dbf[10];
chsc_area->header.key = PAGE_DEFAULT_KEY; chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
while ((sch = chsc_get_next_subchannel(sch))) { while ((sch = chsc_get_next_subchannel(sch))) {
spin_lock(sch->lock); spin_lock(sch->lock);
private = sch->private; private = sch->private;

View File

@ -625,8 +625,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
/* /*
* Get interrupt information from lowcore * Get interrupt information from lowcore
*/ */
tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
irb = (struct irb *) __LC_IRB; irb = (struct irb *)&S390_lowcore.irb;
do { do {
kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
/* /*
@ -661,7 +661,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
* We don't do this for VM because a tpi drops the cpu * We don't do this for VM because a tpi drops the cpu
* out of the sie which costs more cycles than it saves. * out of the sie which costs more cycles than it saves.
*/ */
} while (!MACHINE_IS_VM && tpi (NULL) != 0); } while (MACHINE_IS_LPAR && tpi(NULL) != 0);
irq_exit(); irq_exit();
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
@ -682,10 +682,10 @@ static int cio_tpi(void)
struct irb *irb; struct irb *irb;
int irq_context; int irq_context;
tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
if (tpi(NULL) != 1) if (tpi(NULL) != 1)
return 0; return 0;
irb = (struct irb *) __LC_IRB; irb = (struct irb *)&S390_lowcore.irb;
/* Store interrupt response block to lowcore. */ /* Store interrupt response block to lowcore. */
if (tsch(tpi_info->schid, irb) != 0) if (tsch(tpi_info->schid, irb) != 0)
/* Not status pending or not operational. */ /* Not status pending or not operational. */
@ -885,7 +885,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid)
struct tpi_info ti; struct tpi_info ti;
if (tpi(&ti)) { if (tpi(&ti)) {
tsch(ti.schid, (struct irb *)__LC_IRB); tsch(ti.schid, (struct irb *)&S390_lowcore.irb);
if (schid_equal(&ti.schid, &schid)) if (schid_equal(&ti.schid, &schid))
return 0; return 0;
} }
@ -1083,7 +1083,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
struct subchannel_id schid; struct subchannel_id schid;
struct schib schib; struct schib schib;
schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
if (!schid.one) if (!schid.one)
return -ENODEV; return -ENODEV;
if (stsch(schid, &schib)) if (stsch(schid, &schib))

View File

@ -8,15 +8,16 @@
* Heiko Carstens <heiko.carstens@de.ibm.com>, * Heiko Carstens <heiko.carstens@de.ibm.com>,
*/ */
#include <linux/semaphore.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/wait.h>
#include <asm/crw.h> #include <asm/crw.h>
static struct semaphore crw_semaphore;
static DEFINE_MUTEX(crw_handler_mutex); static DEFINE_MUTEX(crw_handler_mutex);
static crw_handler_t crw_handlers[NR_RSCS]; static crw_handler_t crw_handlers[NR_RSCS];
static atomic_t crw_nr_req = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
/** /**
* crw_register_handler() - register a channel report word handler * crw_register_handler() - register a channel report word handler
@ -59,12 +60,14 @@ void crw_unregister_handler(int rsc)
static int crw_collect_info(void *unused) static int crw_collect_info(void *unused)
{ {
struct crw crw[2]; struct crw crw[2];
int ccode; int ccode, signal;
unsigned int chain; unsigned int chain;
int ignore;
repeat: repeat:
ignore = down_interruptible(&crw_semaphore); signal = wait_event_interruptible(crw_handler_wait_q,
atomic_read(&crw_nr_req) > 0);
if (unlikely(signal))
atomic_inc(&crw_nr_req);
chain = 0; chain = 0;
while (1) { while (1) {
crw_handler_t handler; crw_handler_t handler;
@ -122,25 +125,23 @@ repeat:
/* chain is always 0 or 1 here. */ /* chain is always 0 or 1 here. */
chain = crw[chain].chn ? chain + 1 : 0; chain = crw[chain].chn ? chain + 1 : 0;
} }
if (atomic_dec_and_test(&crw_nr_req))
wake_up(&crw_handler_wait_q);
goto repeat; goto repeat;
return 0; return 0;
} }
void crw_handle_channel_report(void) void crw_handle_channel_report(void)
{ {
up(&crw_semaphore); atomic_inc(&crw_nr_req);
wake_up(&crw_handler_wait_q);
} }
/* void crw_wait_for_channel_report(void)
* Separate initcall needed for semaphore initialization since
* crw_handle_channel_report might be called before crw_machine_check_init.
*/
static int __init crw_init_semaphore(void)
{ {
init_MUTEX_LOCKED(&crw_semaphore); crw_handle_channel_report();
return 0; wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
} }
pure_initcall(crw_init_semaphore);
/* /*
* Machine checks for the channel subsystem must be enabled * Machine checks for the channel subsystem must be enabled

View File

@ -18,6 +18,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/proc_fs.h>
#include <asm/isc.h> #include <asm/isc.h>
#include <asm/crw.h> #include <asm/crw.h>
@ -232,7 +233,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
if (!get_device(&sch->dev)) if (!get_device(&sch->dev))
return; return;
sch->todo = todo; sch->todo = todo;
if (!queue_work(slow_path_wq, &sch->todo_work)) { if (!queue_work(cio_work_q, &sch->todo_work)) {
/* Already queued, release workqueue ref. */ /* Already queued, release workqueue ref. */
put_device(&sch->dev); put_device(&sch->dev);
} }
@ -543,7 +544,7 @@ static void css_slow_path_func(struct work_struct *unused)
} }
static DECLARE_WORK(slow_path_work, css_slow_path_func); static DECLARE_WORK(slow_path_work, css_slow_path_func);
struct workqueue_struct *slow_path_wq; struct workqueue_struct *cio_work_q;
void css_schedule_eval(struct subchannel_id schid) void css_schedule_eval(struct subchannel_id schid)
{ {
@ -552,7 +553,7 @@ void css_schedule_eval(struct subchannel_id schid)
spin_lock_irqsave(&slow_subchannel_lock, flags); spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid); idset_sch_add(slow_subchannel_set, schid);
atomic_set(&css_eval_scheduled, 1); atomic_set(&css_eval_scheduled, 1);
queue_work(slow_path_wq, &slow_path_work); queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags); spin_unlock_irqrestore(&slow_subchannel_lock, flags);
} }
@ -563,7 +564,7 @@ void css_schedule_eval_all(void)
spin_lock_irqsave(&slow_subchannel_lock, flags); spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set); idset_fill(slow_subchannel_set);
atomic_set(&css_eval_scheduled, 1); atomic_set(&css_eval_scheduled, 1);
queue_work(slow_path_wq, &slow_path_work); queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags); spin_unlock_irqrestore(&slow_subchannel_lock, flags);
} }
@ -594,14 +595,14 @@ void css_schedule_eval_all_unreg(void)
spin_lock_irqsave(&slow_subchannel_lock, flags); spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, unreg_set); idset_add_set(slow_subchannel_set, unreg_set);
atomic_set(&css_eval_scheduled, 1); atomic_set(&css_eval_scheduled, 1);
queue_work(slow_path_wq, &slow_path_work); queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags); spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set); idset_free(unreg_set);
} }
void css_wait_for_slow_path(void) void css_wait_for_slow_path(void)
{ {
flush_workqueue(slow_path_wq); flush_workqueue(cio_work_q);
} }
/* Schedule reprobing of all unregistered subchannels. */ /* Schedule reprobing of all unregistered subchannels. */
@ -992,12 +993,21 @@ static int __init channel_subsystem_init(void)
ret = css_bus_init(); ret = css_bus_init();
if (ret) if (ret)
return ret; return ret;
cio_work_q = create_singlethread_workqueue("cio");
if (!cio_work_q) {
ret = -ENOMEM;
goto out_bus;
}
ret = io_subchannel_init(); ret = io_subchannel_init();
if (ret) if (ret)
css_bus_cleanup(); goto out_wq;
return ret; return ret;
out_wq:
destroy_workqueue(cio_work_q);
out_bus:
css_bus_cleanup();
return ret;
} }
subsys_initcall(channel_subsystem_init); subsys_initcall(channel_subsystem_init);
@ -1006,10 +1016,25 @@ static int css_settle(struct device_driver *drv, void *unused)
struct css_driver *cssdrv = to_cssdriver(drv); struct css_driver *cssdrv = to_cssdriver(drv);
if (cssdrv->settle) if (cssdrv->settle)
cssdrv->settle(); return cssdrv->settle();
return 0; return 0;
} }
int css_complete_work(void)
{
int ret;
/* Wait for the evaluation of subchannels to finish. */
ret = wait_event_interruptible(css_eval_wq,
atomic_read(&css_eval_scheduled) == 0);
if (ret)
return -EINTR;
flush_workqueue(cio_work_q);
/* Wait for the subchannel type specific initialization to finish */
return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
}
/* /*
* Wait for the initialization of devices to finish, to make sure we are * Wait for the initialization of devices to finish, to make sure we are
* done with our setup if the search for the root device starts. * done with our setup if the search for the root device starts.
@ -1018,13 +1043,41 @@ static int __init channel_subsystem_init_sync(void)
{ {
/* Start initial subchannel evaluation. */ /* Start initial subchannel evaluation. */
css_schedule_eval_all(); css_schedule_eval_all();
/* Wait for the evaluation of subchannels to finish. */ css_complete_work();
wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); return 0;
/* Wait for the subchannel type specific initialization to finish */
return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
} }
subsys_initcall_sync(channel_subsystem_init_sync); subsys_initcall_sync(channel_subsystem_init_sync);
#ifdef CONFIG_PROC_FS
static ssize_t cio_settle_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
int ret;
/* Handle pending CRW's. */
crw_wait_for_channel_report();
ret = css_complete_work();
return ret ? ret : count;
}
static const struct file_operations cio_settle_proc_fops = {
.write = cio_settle_write,
};
static int __init cio_settle_init(void)
{
struct proc_dir_entry *entry;
entry = proc_create("cio_settle", S_IWUSR, NULL,
&cio_settle_proc_fops);
if (!entry)
return -ENOMEM;
return 0;
}
device_initcall(cio_settle_init);
#endif /*CONFIG_PROC_FS*/
int sch_is_pseudo_sch(struct subchannel *sch) int sch_is_pseudo_sch(struct subchannel *sch)
{ {
return sch == to_css(sch->dev.parent)->pseudo_subchannel; return sch == to_css(sch->dev.parent)->pseudo_subchannel;

View File

@ -95,7 +95,7 @@ struct css_driver {
int (*freeze)(struct subchannel *); int (*freeze)(struct subchannel *);
int (*thaw) (struct subchannel *); int (*thaw) (struct subchannel *);
int (*restore)(struct subchannel *); int (*restore)(struct subchannel *);
void (*settle)(void); int (*settle)(void);
const char *name; const char *name;
}; };
@ -146,12 +146,13 @@ extern struct channel_subsystem *channel_subsystems[];
/* Helper functions to build lists for the slow path. */ /* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid); void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void); void css_schedule_eval_all(void);
int css_complete_work(void);
int sch_is_pseudo_sch(struct subchannel *); int sch_is_pseudo_sch(struct subchannel *);
struct schib; struct schib;
int css_sch_is_valid(struct schib *); int css_sch_is_valid(struct schib *);
extern struct workqueue_struct *slow_path_wq; extern struct workqueue_struct *cio_work_q;
void css_wait_for_slow_path(void); void css_wait_for_slow_path(void);
void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo); void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
#endif #endif

View File

@ -136,7 +136,6 @@ static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int); int);
static void recovery_func(unsigned long data); static void recovery_func(unsigned long data);
struct workqueue_struct *ccw_device_work;
wait_queue_head_t ccw_device_init_wq; wait_queue_head_t ccw_device_init_wq;
atomic_t ccw_device_init_count; atomic_t ccw_device_init_count;
@ -159,11 +158,16 @@ static int io_subchannel_prepare(struct subchannel *sch)
return 0; return 0;
} }
static void io_subchannel_settle(void) static int io_subchannel_settle(void)
{ {
wait_event(ccw_device_init_wq, int ret;
atomic_read(&ccw_device_init_count) == 0);
flush_workqueue(ccw_device_work); ret = wait_event_interruptible(ccw_device_init_wq,
atomic_read(&ccw_device_init_count) == 0);
if (ret)
return -EINTR;
flush_workqueue(cio_work_q);
return 0;
} }
static struct css_driver io_subchannel_driver = { static struct css_driver io_subchannel_driver = {
@ -188,27 +192,13 @@ int __init io_subchannel_init(void)
atomic_set(&ccw_device_init_count, 0); atomic_set(&ccw_device_init_count, 0);
setup_timer(&recovery_timer, recovery_func, 0); setup_timer(&recovery_timer, recovery_func, 0);
ccw_device_work = create_singlethread_workqueue("cio"); ret = bus_register(&ccw_bus_type);
if (!ccw_device_work) if (ret)
return -ENOMEM; return ret;
slow_path_wq = create_singlethread_workqueue("kslowcrw");
if (!slow_path_wq) {
ret = -ENOMEM;
goto out_err;
}
if ((ret = bus_register (&ccw_bus_type)))
goto out_err;
ret = css_driver_register(&io_subchannel_driver); ret = css_driver_register(&io_subchannel_driver);
if (ret) if (ret)
goto out_err; bus_unregister(&ccw_bus_type);
return 0;
out_err:
if (ccw_device_work)
destroy_workqueue(ccw_device_work);
if (slow_path_wq)
destroy_workqueue(slow_path_wq);
return ret; return ret;
} }
@ -1348,7 +1338,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
/* Not operational. */ /* Not operational. */
if (!cdev) if (!cdev)
return IO_SCH_UNREG; return IO_SCH_UNREG;
if (!ccw_device_notify(cdev, CIO_GONE)) if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
return IO_SCH_UNREG; return IO_SCH_UNREG;
return IO_SCH_ORPH_UNREG; return IO_SCH_ORPH_UNREG;
} }
@ -1356,12 +1346,12 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
if (!cdev) if (!cdev)
return IO_SCH_ATTACH; return IO_SCH_ATTACH;
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
if (!ccw_device_notify(cdev, CIO_GONE)) if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
return IO_SCH_UNREG_ATTACH; return IO_SCH_UNREG_ATTACH;
return IO_SCH_ORPH_ATTACH; return IO_SCH_ORPH_ATTACH;
} }
if ((sch->schib.pmcw.pam & sch->opm) == 0) { if ((sch->schib.pmcw.pam & sch->opm) == 0) {
if (!ccw_device_notify(cdev, CIO_NO_PATH)) if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
return IO_SCH_UNREG; return IO_SCH_UNREG;
return IO_SCH_DISC; return IO_SCH_DISC;
} }
@ -1410,6 +1400,12 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
rc = 0; rc = 0;
goto out_unlock; goto out_unlock;
case IO_SCH_VERIFY: case IO_SCH_VERIFY:
if (cdev->private->flags.resuming == 1) {
if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) {
ccw_device_set_notoper(cdev);
break;
}
}
/* Trigger path verification. */ /* Trigger path verification. */
io_subchannel_verify(sch); io_subchannel_verify(sch);
rc = 0; rc = 0;
@ -1448,7 +1444,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
break; break;
case IO_SCH_UNREG_ATTACH: case IO_SCH_UNREG_ATTACH:
/* Unregister ccw device. */ /* Unregister ccw device. */
ccw_device_unregister(cdev); if (!cdev->private->flags.resuming)
ccw_device_unregister(cdev);
break; break;
default: default:
break; break;
@ -1457,7 +1454,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
switch (action) { switch (action) {
case IO_SCH_ORPH_UNREG: case IO_SCH_ORPH_UNREG:
case IO_SCH_UNREG: case IO_SCH_UNREG:
css_sch_device_unregister(sch); if (!cdev || !cdev->private->flags.resuming)
css_sch_device_unregister(sch);
break; break;
case IO_SCH_ORPH_ATTACH: case IO_SCH_ORPH_ATTACH:
case IO_SCH_UNREG_ATTACH: case IO_SCH_UNREG_ATTACH:
@ -1779,26 +1777,42 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
{ {
struct subchannel *sch = to_subchannel(cdev->dev.parent); struct subchannel *sch = to_subchannel(cdev->dev.parent);
if (cio_is_console(sch->schid)) spin_lock_irq(sch->lock);
goto out; if (cio_is_console(sch->schid)) {
cio_enable_subchannel(sch, (u32)(addr_t)sch);
goto out_unlock;
}
/* /*
* While we were sleeping, devices may have gone or become * While we were sleeping, devices may have gone or become
* available again. Kick re-detection. * available again. Kick re-detection.
*/ */
spin_lock_irq(sch->lock);
cdev->private->flags.resuming = 1; cdev->private->flags.resuming = 1;
css_schedule_eval(sch->schid);
spin_unlock_irq(sch->lock);
css_complete_work();
/* cdev may have been moved to a different subchannel. */
sch = to_subchannel(cdev->dev.parent);
spin_lock_irq(sch->lock);
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_OFFLINE)
goto out_unlock;
ccw_device_recognition(cdev); ccw_device_recognition(cdev);
spin_unlock_irq(sch->lock); spin_unlock_irq(sch->lock);
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED); cdev->private->state == DEV_STATE_DISCONNECTED);
out: spin_lock_irq(sch->lock);
out_unlock:
cdev->private->flags.resuming = 0; cdev->private->flags.resuming = 0;
spin_unlock_irq(sch->lock);
} }
static int resume_handle_boxed(struct ccw_device *cdev) static int resume_handle_boxed(struct ccw_device *cdev)
{ {
cdev->private->state = DEV_STATE_BOXED; cdev->private->state = DEV_STATE_BOXED;
if (ccw_device_notify(cdev, CIO_BOXED)) if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
return 0; return 0;
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
return -ENODEV; return -ENODEV;
@ -1807,7 +1821,7 @@ static int resume_handle_boxed(struct ccw_device *cdev)
static int resume_handle_disc(struct ccw_device *cdev) static int resume_handle_disc(struct ccw_device *cdev)
{ {
cdev->private->state = DEV_STATE_DISCONNECTED; cdev->private->state = DEV_STATE_DISCONNECTED;
if (ccw_device_notify(cdev, CIO_GONE)) if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
return 0; return 0;
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
return -ENODEV; return -ENODEV;
@ -1816,40 +1830,31 @@ static int resume_handle_disc(struct ccw_device *cdev)
static int ccw_device_pm_restore(struct device *dev) static int ccw_device_pm_restore(struct device *dev)
{ {
struct ccw_device *cdev = to_ccwdev(dev); struct ccw_device *cdev = to_ccwdev(dev);
struct subchannel *sch = to_subchannel(cdev->dev.parent); struct subchannel *sch;
int ret = 0, cm_enabled; int ret = 0;
__ccw_device_pm_restore(cdev); __ccw_device_pm_restore(cdev);
sch = to_subchannel(cdev->dev.parent);
spin_lock_irq(sch->lock); spin_lock_irq(sch->lock);
if (cio_is_console(sch->schid)) { if (cio_is_console(sch->schid))
cio_enable_subchannel(sch, (u32)(addr_t)sch);
spin_unlock_irq(sch->lock);
goto out_restore; goto out_restore;
}
cdev->private->flags.donotify = 0;
/* check recognition results */ /* check recognition results */
switch (cdev->private->state) { switch (cdev->private->state) {
case DEV_STATE_OFFLINE: case DEV_STATE_OFFLINE:
case DEV_STATE_ONLINE:
cdev->private->flags.donotify = 0;
break; break;
case DEV_STATE_BOXED: case DEV_STATE_BOXED:
ret = resume_handle_boxed(cdev); ret = resume_handle_boxed(cdev);
spin_unlock_irq(sch->lock);
if (ret) if (ret)
goto out; goto out_unlock;
goto out_restore; goto out_restore;
case DEV_STATE_DISCONNECTED:
goto out_disc_unlock;
default: default:
goto out_unreg_unlock; ret = resume_handle_disc(cdev);
} if (ret)
/* check if the device id has changed */ goto out_unlock;
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { goto out_restore;
CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
"changed from %04x to %04x)\n",
sch->schid.ssid, sch->schid.sch_no,
cdev->private->dev_id.devno,
sch->schib.pmcw.dev);
goto out_unreg_unlock;
} }
/* check if the device type has changed */ /* check if the device type has changed */
if (!ccw_device_test_sense_data(cdev)) { if (!ccw_device_test_sense_data(cdev)) {
@ -1858,24 +1863,30 @@ static int ccw_device_pm_restore(struct device *dev)
ret = -ENODEV; ret = -ENODEV;
goto out_unlock; goto out_unlock;
} }
if (!cdev->online) { if (!cdev->online)
ret = 0; goto out_unlock;
if (ccw_device_online(cdev)) {
ret = resume_handle_disc(cdev);
if (ret)
goto out_unlock;
goto out_restore;
}
spin_unlock_irq(sch->lock);
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
spin_lock_irq(sch->lock);
if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
ret = -ENODEV;
goto out_unlock; goto out_unlock;
} }
ret = ccw_device_online(cdev);
if (ret)
goto out_disc_unlock;
cm_enabled = cdev->private->cmb != NULL; /* reenable cmf, if needed */
spin_unlock_irq(sch->lock); if (cdev->private->cmb) {
spin_unlock_irq(sch->lock);
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
if (cdev->private->state != DEV_STATE_ONLINE) {
spin_lock_irq(sch->lock);
goto out_disc_unlock;
}
if (cm_enabled) {
ret = ccw_set_cmf(cdev, 1); ret = ccw_set_cmf(cdev, 1);
spin_lock_irq(sch->lock);
if (ret) { if (ret) {
CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
"(rc=%d)\n", cdev->private->dev_id.ssid, "(rc=%d)\n", cdev->private->dev_id.ssid,
@ -1885,21 +1896,11 @@ static int ccw_device_pm_restore(struct device *dev)
} }
out_restore: out_restore:
spin_unlock_irq(sch->lock);
if (cdev->online && cdev->drv && cdev->drv->restore) if (cdev->online && cdev->drv && cdev->drv->restore)
ret = cdev->drv->restore(cdev); ret = cdev->drv->restore(cdev);
out:
return ret; return ret;
out_disc_unlock:
ret = resume_handle_disc(cdev);
spin_unlock_irq(sch->lock);
if (ret)
return ret;
goto out_restore;
out_unreg_unlock:
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
ret = -ENODEV;
out_unlock: out_unlock:
spin_unlock_irq(sch->lock); spin_unlock_irq(sch->lock);
return ret; return ret;
@ -2028,7 +2029,7 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
/* Get workqueue ref. */ /* Get workqueue ref. */
if (!get_device(&cdev->dev)) if (!get_device(&cdev->dev))
return; return;
if (!queue_work(slow_path_wq, &cdev->private->todo_work)) { if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
/* Already queued, release workqueue ref. */ /* Already queued, release workqueue ref. */
put_device(&cdev->dev); put_device(&cdev->dev);
} }
@ -2041,5 +2042,4 @@ EXPORT_SYMBOL(ccw_driver_register);
EXPORT_SYMBOL(ccw_driver_unregister); EXPORT_SYMBOL(ccw_driver_unregister);
EXPORT_SYMBOL(get_ccwdev_by_busid); EXPORT_SYMBOL(get_ccwdev_by_busid);
EXPORT_SYMBOL(ccw_bus_type); EXPORT_SYMBOL(ccw_bus_type);
EXPORT_SYMBOL(ccw_device_work);
EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);

View File

@ -4,7 +4,7 @@
#include <asm/ccwdev.h> #include <asm/ccwdev.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/notifier.h>
#include "io_sch.h" #include "io_sch.h"
/* /*
@ -71,7 +71,6 @@ dev_fsm_final_state(struct ccw_device *cdev)
cdev->private->state == DEV_STATE_BOXED); cdev->private->state == DEV_STATE_BOXED);
} }
extern struct workqueue_struct *ccw_device_work;
extern wait_queue_head_t ccw_device_init_wq; extern wait_queue_head_t ccw_device_init_wq;
extern atomic_t ccw_device_init_count; extern atomic_t ccw_device_init_count;
int __init io_subchannel_init(void); int __init io_subchannel_init(void);

View File

@ -313,21 +313,43 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
} }
} }
/**
* ccw_device_notify() - inform the device's driver about an event
* @cdev: device for which an event occured
* @event: event that occurred
*
* Returns:
* -%EINVAL if the device is offline or has no driver.
* -%EOPNOTSUPP if the device's driver has no notifier registered.
* %NOTIFY_OK if the driver wants to keep the device.
* %NOTIFY_BAD if the driver doesn't want to keep the device.
*/
int ccw_device_notify(struct ccw_device *cdev, int event) int ccw_device_notify(struct ccw_device *cdev, int event)
{ {
int ret = -EINVAL;
if (!cdev->drv) if (!cdev->drv)
return 0; goto out;
if (!cdev->online) if (!cdev->online)
return 0; goto out;
CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
cdev->private->dev_id.ssid, cdev->private->dev_id.devno, cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
event); event);
return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; if (!cdev->drv->notify) {
ret = -EOPNOTSUPP;
goto out;
}
if (cdev->drv->notify(cdev, event))
ret = NOTIFY_OK;
else
ret = NOTIFY_BAD;
out:
return ret;
} }
static void ccw_device_oper_notify(struct ccw_device *cdev) static void ccw_device_oper_notify(struct ccw_device *cdev)
{ {
if (ccw_device_notify(cdev, CIO_OPER)) { if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
/* Reenable channel measurements, if needed. */ /* Reenable channel measurements, if needed. */
ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
return; return;
@ -361,14 +383,15 @@ ccw_device_done(struct ccw_device *cdev, int state)
case DEV_STATE_BOXED: case DEV_STATE_BOXED:
CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no); cdev->private->dev_id.devno, sch->schid.sch_no);
if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) if (cdev->online &&
ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
cdev->private->flags.donotify = 0; cdev->private->flags.donotify = 0;
break; break;
case DEV_STATE_NOT_OPER: case DEV_STATE_NOT_OPER:
CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no); cdev->private->dev_id.devno, sch->schid.sch_no);
if (!ccw_device_notify(cdev, CIO_GONE)) if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else else
ccw_device_set_disconnected(cdev); ccw_device_set_disconnected(cdev);
@ -378,7 +401,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
"%04x\n", cdev->private->dev_id.devno, "%04x\n", cdev->private->dev_id.devno,
sch->schid.sch_no); sch->schid.sch_no);
if (!ccw_device_notify(cdev, CIO_NO_PATH)) if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else else
ccw_device_set_disconnected(cdev); ccw_device_set_disconnected(cdev);
@ -586,7 +609,7 @@ ccw_device_offline(struct ccw_device *cdev)
static void ccw_device_generic_notoper(struct ccw_device *cdev, static void ccw_device_generic_notoper(struct ccw_device *cdev,
enum dev_event dev_event) enum dev_event dev_event)
{ {
if (!ccw_device_notify(cdev, CIO_GONE)) if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else else
ccw_device_set_disconnected(cdev); ccw_device_set_disconnected(cdev);
@ -667,7 +690,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
struct irb *irb; struct irb *irb;
int is_cmd; int is_cmd;
irb = (struct irb *) __LC_IRB; irb = (struct irb *)&S390_lowcore.irb;
is_cmd = !scsw_is_tm(&irb->scsw); is_cmd = !scsw_is_tm(&irb->scsw);
/* Check for unsolicited interrupt. */ /* Check for unsolicited interrupt. */
if (!scsw_is_solicited(&irb->scsw)) { if (!scsw_is_solicited(&irb->scsw)) {
@ -732,7 +755,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
{ {
struct irb *irb; struct irb *irb;
irb = (struct irb *) __LC_IRB; irb = (struct irb *)&S390_lowcore.irb;
/* Check for unsolicited interrupt. */ /* Check for unsolicited interrupt. */
if (scsw_stctl(&irb->scsw) == if (scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {

View File

@ -208,18 +208,27 @@ struct qdio_dev_perf_stat {
unsigned int eqbs_partial; unsigned int eqbs_partial;
unsigned int sqbs; unsigned int sqbs;
unsigned int sqbs_partial; unsigned int sqbs_partial;
} ____cacheline_aligned;
struct qdio_queue_perf_stat {
/*
* Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
* Since max. 127 SBALs are scanned reuse entry for 128 as queue full
* aka 127 SBALs found.
*/
unsigned int nr_sbals[8];
unsigned int nr_sbal_error;
unsigned int nr_sbal_nop;
unsigned int nr_sbal_total;
}; };
struct qdio_input_q { struct qdio_input_q {
/* input buffer acknowledgement flag */ /* input buffer acknowledgement flag */
int polling; int polling;
/* first ACK'ed buffer */ /* first ACK'ed buffer */
int ack_start; int ack_start;
/* how much sbals are acknowledged with qebsm */ /* how much sbals are acknowledged with qebsm */
int ack_count; int ack_count;
/* last time of noticing incoming data */ /* last time of noticing incoming data */
u64 timestamp; u64 timestamp;
}; };
@ -227,21 +236,47 @@ struct qdio_input_q {
struct qdio_output_q { struct qdio_output_q {
/* PCIs are enabled for the queue */ /* PCIs are enabled for the queue */
int pci_out_enabled; int pci_out_enabled;
/* IQDIO: output multiple buffers (enhanced SIGA) */ /* IQDIO: output multiple buffers (enhanced SIGA) */
int use_enh_siga; int use_enh_siga;
/* timer to check for more outbound work */ /* timer to check for more outbound work */
struct timer_list timer; struct timer_list timer;
}; };
/*
* Note on cache alignment: grouped slsb and write mostly data at the beginning
* sbal[] is read-only and starts on a new cacheline followed by read mostly.
*/
struct qdio_q { struct qdio_q {
struct slsb slsb; struct slsb slsb;
union { union {
struct qdio_input_q in; struct qdio_input_q in;
struct qdio_output_q out; struct qdio_output_q out;
} u; } u;
/*
* inbound: next buffer the program should check for
* outbound: next buffer to check if adapter processed it
*/
int first_to_check;
/* first_to_check of the last time */
int last_move;
/* beginning position for calling the program */
int first_to_kick;
/* number of buffers in use by the adapter */
atomic_t nr_buf_used;
/* error condition during a data transfer */
unsigned int qdio_error;
struct tasklet_struct tasklet;
struct qdio_queue_perf_stat q_stats;
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
/* queue number */ /* queue number */
int nr; int nr;
@ -257,32 +292,9 @@ struct qdio_q {
/* upper-layer program handler */ /* upper-layer program handler */
qdio_handler_t (*handler); qdio_handler_t (*handler);
/*
* inbound: next buffer the program should check for
* outbound: next buffer to check for having been processed
* by the card
*/
int first_to_check;
/* first_to_check of the last time */
int last_move;
/* beginning position for calling the program */
int first_to_kick;
/* number of buffers in use by the adapter */
atomic_t nr_buf_used;
struct qdio_irq *irq_ptr;
struct dentry *debugfs_q; struct dentry *debugfs_q;
struct tasklet_struct tasklet; struct qdio_irq *irq_ptr;
/* error condition during a data transfer */
unsigned int qdio_error;
struct sl *sl; struct sl *sl;
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
/* /*
* Warning: Leave this member at the end so it won't be cleared in * Warning: Leave this member at the end so it won't be cleared in
* qdio_fill_qs. A page is allocated under this pointer and used for * qdio_fill_qs. A page is allocated under this pointer and used for
@ -317,12 +329,8 @@ struct qdio_irq {
struct qdio_ssqd_desc ssqd_desc; struct qdio_ssqd_desc ssqd_desc;
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
struct qdio_dev_perf_stat perf_stat;
int perf_stat_enabled; int perf_stat_enabled;
/*
* Warning: Leave these members together at the end so they won't be
* cleared in qdio_setup_irq.
*/
struct qdr *qdr; struct qdr *qdr;
unsigned long chsc_page; unsigned long chsc_page;
@ -331,6 +339,7 @@ struct qdio_irq {
debug_info_t *debug_area; debug_info_t *debug_area;
struct mutex setup_mutex; struct mutex setup_mutex;
struct qdio_dev_perf_stat perf_stat;
}; };
/* helper functions */ /* helper functions */
@ -341,9 +350,20 @@ struct qdio_irq {
(irq->qib.qfmt == QDIO_IQDIO_QFMT || \ (irq->qib.qfmt == QDIO_IQDIO_QFMT || \
css_general_characteristics.aif_osa) css_general_characteristics.aif_osa)
#define qperf(qdev,attr) qdev->perf_stat.attr #define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
#define qperf_inc(q,attr) if (q->irq_ptr->perf_stat_enabled) \
q->irq_ptr->perf_stat.attr++ #define qperf_inc(__q, __attr) \
({ \
struct qdio_irq *qdev = (__q)->irq_ptr; \
if (qdev->perf_stat_enabled) \
(qdev->perf_stat.__attr)++; \
})
static inline void account_sbals_error(struct qdio_q *q, int count)
{
q->q_stats.nr_sbal_error += count;
q->q_stats.nr_sbal_total += count;
}
/* the highest iqdio queue is used for multicast */ /* the highest iqdio queue is used for multicast */
static inline int multicast_outbound(struct qdio_q *q) static inline int multicast_outbound(struct qdio_q *q)

View File

@ -60,7 +60,7 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move);
seq_printf(m, "polling: %d ack start: %d ack count: %d\n", seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count);
seq_printf(m, "slsb buffer states:\n"); seq_printf(m, "SBAL states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
@ -97,6 +97,20 @@ static int qstat_show(struct seq_file *m, void *v)
} }
seq_printf(m, "\n"); seq_printf(m, "\n");
seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
seq_printf(m, "\nSBAL statistics:");
if (!q->irq_ptr->perf_stat_enabled) {
seq_printf(m, " disabled\n");
return 0;
}
seq_printf(m, "\n1 2.. 4.. 8.. "
"16.. 32.. 64.. 127\n");
for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
q->q_stats.nr_sbal_total);
return 0; return 0;
} }
@ -181,9 +195,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
{ {
struct seq_file *seq = file->private_data; struct seq_file *seq = file->private_data;
struct qdio_irq *irq_ptr = seq->private; struct qdio_irq *irq_ptr = seq->private;
struct qdio_q *q;
unsigned long val; unsigned long val;
char buf[8]; char buf[8];
int ret; int ret, i;
if (!irq_ptr) if (!irq_ptr)
return 0; return 0;
@ -201,6 +216,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
case 0: case 0:
irq_ptr->perf_stat_enabled = 0; irq_ptr->perf_stat_enabled = 0;
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
for_each_input_queue(irq_ptr, q, i)
memset(&q->q_stats, 0, sizeof(q->q_stats));
for_each_output_queue(irq_ptr, q, i)
memset(&q->q_stats, 0, sizeof(q->q_stats));
break; break;
case 1: case 1:
irq_ptr->perf_stat_enabled = 1; irq_ptr->perf_stat_enabled = 1;

View File

@ -392,6 +392,20 @@ static inline void qdio_stop_polling(struct qdio_q *q)
set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
} }
static inline void account_sbals(struct qdio_q *q, int count)
{
int pos = 0;
q->q_stats.nr_sbal_total += count;
if (count == QDIO_MAX_BUFFERS_MASK) {
q->q_stats.nr_sbals[7]++;
return;
}
while (count >>= 1)
pos++;
q->q_stats.nr_sbals[pos]++;
}
static void announce_buffer_error(struct qdio_q *q, int count) static void announce_buffer_error(struct qdio_q *q, int count)
{ {
q->qdio_error |= QDIO_ERROR_SLSB_STATE; q->qdio_error |= QDIO_ERROR_SLSB_STATE;
@ -487,16 +501,22 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
q->first_to_check = add_buf(q->first_to_check, count); q->first_to_check = add_buf(q->first_to_check, count);
if (atomic_sub(count, &q->nr_buf_used) == 0) if (atomic_sub(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full); qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
break; break;
case SLSB_P_INPUT_ERROR: case SLSB_P_INPUT_ERROR:
announce_buffer_error(q, count); announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */ /* process the buffer, the upper layer will take care of it */
q->first_to_check = add_buf(q->first_to_check, count); q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used); atomic_sub(count, &q->nr_buf_used);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
break; break;
case SLSB_CU_INPUT_EMPTY: case SLSB_CU_INPUT_EMPTY:
case SLSB_P_INPUT_NOT_INIT: case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_INPUT_ACK: case SLSB_P_INPUT_ACK:
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
break; break;
default: default:
@ -514,7 +534,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
if ((bufnr != q->last_move) || q->qdio_error) { if ((bufnr != q->last_move) || q->qdio_error) {
q->last_move = bufnr; q->last_move = bufnr;
if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM) if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
q->u.in.timestamp = get_usecs(); q->u.in.timestamp = get_usecs();
return 1; return 1;
} else } else
@ -643,15 +663,21 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
atomic_sub(count, &q->nr_buf_used); atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count); q->first_to_check = add_buf(q->first_to_check, count);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
break; break;
case SLSB_P_OUTPUT_ERROR: case SLSB_P_OUTPUT_ERROR:
announce_buffer_error(q, count); announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */ /* process the buffer, the upper layer will take care of it */
q->first_to_check = add_buf(q->first_to_check, count); q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used); atomic_sub(count, &q->nr_buf_used);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
break; break;
case SLSB_CU_OUTPUT_PRIMED: case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */ /* the adapter has not fetched the output yet */
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
break; break;
case SLSB_P_OUTPUT_NOT_INIT: case SLSB_P_OUTPUT_NOT_INIT:

View File

@ -333,10 +333,10 @@ static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
irq_ptr->qdr->qdf0[i + nr].slsba = irq_ptr->qdr->qdf0[i + nr].slsba =
(unsigned long)&irq_ptr_qs[i]->slsb.val[0]; (unsigned long)&irq_ptr_qs[i]->slsb.val[0];
irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY; irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY; irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY; irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY; irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
} }
static void setup_qdr(struct qdio_irq *irq_ptr, static void setup_qdr(struct qdio_irq *irq_ptr,
@ -350,7 +350,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY; irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
for (i = 0; i < qdio_init->no_input_qs; i++) for (i = 0; i < qdio_init->no_input_qs; i++)
__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
@ -382,7 +382,15 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
int rc; int rc;
memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr)); memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
/* wipes qib.ac, required by ar7063 */ /* wipes qib.ac, required by ar7063 */
memset(irq_ptr->qdr, 0, sizeof(struct qdr)); memset(irq_ptr->qdr, 0, sizeof(struct qdr));

View File

@ -198,8 +198,8 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
.code = 0x0021, .code = 0x0021,
}; };
scssc_area->operation_code = 0; scssc_area->operation_code = 0;
scssc_area->ks = PAGE_DEFAULT_KEY; scssc_area->ks = PAGE_DEFAULT_KEY >> 4;
scssc_area->kc = PAGE_DEFAULT_KEY; scssc_area->kc = PAGE_DEFAULT_KEY >> 4;
scssc_area->isc = QDIO_AIRQ_ISC; scssc_area->isc = QDIO_AIRQ_ISC;
scssc_area->schid = irq_ptr->schid; scssc_area->schid = irq_ptr->schid;

View File

@ -33,6 +33,7 @@
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <asm/atomic.h> #include <asm/atomic.h>
@ -912,126 +913,105 @@ static struct miscdevice zcrypt_misc_device = {
*/ */
static struct proc_dir_entry *zcrypt_entry; static struct proc_dir_entry *zcrypt_entry;
static int sprintcl(unsigned char *outaddr, unsigned char *addr, static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len)
unsigned int len)
{ {
int hl, i; int i;
hl = 0;
for (i = 0; i < len; i++) for (i = 0; i < len; i++)
hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]); seq_printf(m, "%01x", (unsigned int) addr[i]);
hl += sprintf(outaddr+hl, " "); seq_putc(m, ' ');
return hl;
} }
static int sprintrw(unsigned char *outaddr, unsigned char *addr, static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len)
unsigned int len)
{ {
int hl, inl, c, cx; int inl, c, cx;
hl = sprintf(outaddr, " "); seq_printf(m, " ");
inl = 0; inl = 0;
for (c = 0; c < (len / 16); c++) { for (c = 0; c < (len / 16); c++) {
hl += sprintcl(outaddr+hl, addr+inl, 16); sprintcl(m, addr+inl, 16);
inl += 16; inl += 16;
} }
cx = len%16; cx = len%16;
if (cx) { if (cx) {
hl += sprintcl(outaddr+hl, addr+inl, cx); sprintcl(m, addr+inl, cx);
inl += cx; inl += cx;
} }
hl += sprintf(outaddr+hl, "\n"); seq_putc(m, '\n');
return hl;
} }
static int sprinthx(unsigned char *title, unsigned char *outaddr, static void sprinthx(unsigned char *title, struct seq_file *m,
unsigned char *addr, unsigned int len) unsigned char *addr, unsigned int len)
{ {
int hl, inl, r, rx; int inl, r, rx;
hl = sprintf(outaddr, "\n%s\n", title); seq_printf(m, "\n%s\n", title);
inl = 0; inl = 0;
for (r = 0; r < (len / 64); r++) { for (r = 0; r < (len / 64); r++) {
hl += sprintrw(outaddr+hl, addr+inl, 64); sprintrw(m, addr+inl, 64);
inl += 64; inl += 64;
} }
rx = len % 64; rx = len % 64;
if (rx) { if (rx) {
hl += sprintrw(outaddr+hl, addr+inl, rx); sprintrw(m, addr+inl, rx);
inl += rx; inl += rx;
} }
hl += sprintf(outaddr+hl, "\n"); seq_putc(m, '\n');
return hl;
} }
static int sprinthx4(unsigned char *title, unsigned char *outaddr, static void sprinthx4(unsigned char *title, struct seq_file *m,
unsigned int *array, unsigned int len) unsigned int *array, unsigned int len)
{ {
int hl, r; int r;
hl = sprintf(outaddr, "\n%s\n", title); seq_printf(m, "\n%s\n", title);
for (r = 0; r < len; r++) { for (r = 0; r < len; r++) {
if ((r % 8) == 0) if ((r % 8) == 0)
hl += sprintf(outaddr+hl, " "); seq_printf(m, " ");
hl += sprintf(outaddr+hl, "%08X ", array[r]); seq_printf(m, "%08X ", array[r]);
if ((r % 8) == 7) if ((r % 8) == 7)
hl += sprintf(outaddr+hl, "\n"); seq_putc(m, '\n');
} }
hl += sprintf(outaddr+hl, "\n"); seq_putc(m, '\n');
return hl;
} }
static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, static int zcrypt_proc_show(struct seq_file *m, void *v)
int count, int *eof, void *data)
{ {
unsigned char *workarea; char workarea[sizeof(int) * AP_DEVICES];
int len;
len = 0; seq_printf(m, "\nzcrypt version: %d.%d.%d\n",
ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
/* resp_buff is a page. Use the right half for a work area */ seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index);
workarea = resp_buff + 2000; seq_printf(m, "Total device count: %d\n", zcrypt_device_count);
len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n", seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA));
ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC));
len += sprintf(resp_buff + len, "Cryptographic domain: %d\n", seq_printf(m, "PCIXCC MCL2 count: %d\n",
ap_domain_index); zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
len += sprintf(resp_buff + len, "Total device count: %d\n", seq_printf(m, "PCIXCC MCL3 count: %d\n",
zcrypt_device_count); zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
len += sprintf(resp_buff + len, "PCICA count: %d\n", seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C));
zcrypt_count_type(ZCRYPT_PCICA)); seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A));
len += sprintf(resp_buff + len, "PCICC count: %d\n", seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C));
zcrypt_count_type(ZCRYPT_PCICC)); seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A));
len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n", seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count());
zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count());
len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n", seq_printf(m, "Total open handles: %d\n\n",
zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); atomic_read(&zcrypt_open_count));
len += sprintf(resp_buff + len, "CEX2C count: %d\n",
zcrypt_count_type(ZCRYPT_CEX2C));
len += sprintf(resp_buff + len, "CEX2A count: %d\n",
zcrypt_count_type(ZCRYPT_CEX2A));
len += sprintf(resp_buff + len, "CEX3C count: %d\n",
zcrypt_count_type(ZCRYPT_CEX3C));
len += sprintf(resp_buff + len, "CEX3A count: %d\n",
zcrypt_count_type(ZCRYPT_CEX3A));
len += sprintf(resp_buff + len, "requestq count: %d\n",
zcrypt_requestq_count());
len += sprintf(resp_buff + len, "pendingq count: %d\n",
zcrypt_pendingq_count());
len += sprintf(resp_buff + len, "Total open handles: %d\n\n",
atomic_read(&zcrypt_open_count));
zcrypt_status_mask(workarea); zcrypt_status_mask(workarea);
len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
"4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
resp_buff+len, workarea, AP_DEVICES); m, workarea, AP_DEVICES);
zcrypt_qdepth_mask(workarea); zcrypt_qdepth_mask(workarea);
len += sprinthx("Waiting work element counts", sprinthx("Waiting work element counts", m, workarea, AP_DEVICES);
resp_buff+len, workarea, AP_DEVICES);
zcrypt_perdev_reqcnt((int *) workarea); zcrypt_perdev_reqcnt((int *) workarea);
len += sprinthx4("Per-device successfully completed request counts", sprinthx4("Per-device successfully completed request counts",
resp_buff+len,(unsigned int *) workarea, AP_DEVICES); m, (unsigned int *) workarea, AP_DEVICES);
*eof = 1; return 0;
memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int)); }
return len;
static int zcrypt_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, zcrypt_proc_show, NULL);
} }
static void zcrypt_disable_card(int index) static void zcrypt_disable_card(int index)
@ -1061,11 +1041,11 @@ static void zcrypt_enable_card(int index)
spin_unlock_bh(&zcrypt_device_lock); spin_unlock_bh(&zcrypt_device_lock);
} }
static int zcrypt_status_write(struct file *file, const char __user *buffer, static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
unsigned long count, void *data) size_t count, loff_t *pos)
{ {
unsigned char *lbuf, *ptr; unsigned char *lbuf, *ptr;
unsigned long local_count; size_t local_count;
int j; int j;
if (count <= 0) if (count <= 0)
@ -1115,6 +1095,15 @@ out:
return count; return count;
} }
static const struct file_operations zcrypt_proc_fops = {
.owner = THIS_MODULE,
.open = zcrypt_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = zcrypt_proc_write,
};
static int zcrypt_rng_device_count; static int zcrypt_rng_device_count;
static u32 *zcrypt_rng_buffer; static u32 *zcrypt_rng_buffer;
static int zcrypt_rng_buffer_index; static int zcrypt_rng_buffer_index;
@ -1197,14 +1186,11 @@ int __init zcrypt_api_init(void)
goto out; goto out;
/* Set up the proc file system */ /* Set up the proc file system */
zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops);
if (!zcrypt_entry) { if (!zcrypt_entry) {
rc = -ENOMEM; rc = -ENOMEM;
goto out_misc; goto out_misc;
} }
zcrypt_entry->data = NULL;
zcrypt_entry->read_proc = zcrypt_status_read;
zcrypt_entry->write_proc = zcrypt_status_write;
return 0; return 0;

View File

@ -340,11 +340,11 @@ static void kvm_extint_handler(u16 code)
return; return;
/* The LSB might be overloaded, we have to mask it */ /* The LSB might be overloaded, we have to mask it */
vq = (struct virtqueue *) ((*(long *) __LC_PFAULT_INTPARM) & ~1UL); vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL);
/* We use the LSB of extparam, to decide, if this interrupt is a config /* We use the LSB of extparam, to decide, if this interrupt is a config
* change or a "standard" interrupt */ * change or a "standard" interrupt */
config_changed = (*(int *) __LC_EXT_PARAMS & 1); config_changed = S390_lowcore.ext_params & 1;
if (config_changed) { if (config_changed) {
struct virtio_driver *drv; struct virtio_driver *drv;

View File

@ -362,6 +362,11 @@ typedef struct elf64_shdr {
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ #define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */ #define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
#define NT_S390_TIMER 0x301 /* s390 timer register */
#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */
#define NT_S390_CTRS 0x304 /* s390 control registers */
#define NT_S390_PREFIX 0x305 /* s390 prefix register */
/* Note header in a PT_NOTE section */ /* Note header in a PT_NOTE section */