1
0
Fork 0

- Always initialize kernel stack backchain when entering the kernel, so

that unwinding works properly.
 
 - Fix stack  unwinder test case to avoid rare interrupt stack corruption.
 
 - Simplify udelay() and just let it busy loop instead of implementing a
   complex logic.
 
 - arch_cpu_idle() cleanup.
 
 - Some other minor improvements.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEECMNfWEw3SLnmiLkZIg7DeRspbsIFAl/c800ACgkQIg7DeRsp
 bsJBTRAAxJz7J4X1CqyBf+exDcWhjc+FXUEgwDCNbmkPRezvOrivKSymXDoVbvVo
 D2ptGGQtpnUsrFqHZ6o0DwEWfcxrDSXlyJV16ABkPDcARuV2bDaor7HzaHJfyuor
 nUD0rb/0dWbzzFMlNo+WAG8htrhmS5mA4f1p5XSOohf9zP8Sm6NTVq0A7pK4oJuw
 AU6723chxE326yoB2DcyFHaNqByn7jNyVLxfZgH1tyCTRGvqi6ERT+kKLb58eSi8
 t1YYEEuwanUUZSjSDHqZeHA2evfJl/ilWAkUdAJWwJL7hoYnCBhqcjexseeinQ7n
 09GEGTVVdv09YPZYgDCU+YpJ853gS5zAHYh2ItC3kluCcXV0XNrNyCDT11OxQ4I4
 s1uoMhx6S2RvEXKuJZTatmEhNpKd5UXTUoiM0NDYgwdpcxKcyE0cA4FH3Ik+KE/1
 np4CsskOYU/XuFxOlu29gB7jJ7R/x2AXyJQdSELU+QXKUuaIF8uINnbzUyCc9mcY
 pG9+NKWycRzTXT/1nbKOTBFEhjQi20XcoWRLqX5T0o9D9wLnq4Q+wVhLTt/e5DMb
 pw94JDK9HNX2QTULd6YDR4gXxPrypiX4IBli8CHvZcwNnm6N5vdz9nMvxX+v4s/B
 lbdo4JHnmIpTsTJf8YdFZPggYlJsxuV4ITNRu4BfFwtdCrZhfc8=
 =1l0g
 -----END PGP SIGNATURE-----

Merge tag 's390-5.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Heiko Carstens:
 "This is mainly to decouple udelay() and arch_cpu_idle() and simplify
  both of them.

  Summary:

   - Always initialize kernel stack backchain when entering the kernel,
     so that unwinding works properly.

   - Fix stack unwinder test case to avoid rare interrupt stack
     corruption.

   - Simplify udelay() and just let it busy loop instead of implementing
     a complex logic.

   - arch_cpu_idle() cleanup.

   - Some other minor improvements"

* tag 's390-5.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/zcrypt: convert comma to semicolon
  s390/idle: allow arch_cpu_idle() to be kprobed
  s390/idle: remove raw_local_irq_save()/restore() from arch_cpu_idle()
  s390/idle: merge enabled_wait() and arch_cpu_idle()
  s390/delay: remove udelay_simple()
  s390/irq: select HAVE_IRQ_EXIT_ON_IRQ_STACK
  s390/delay: simplify udelay
  s390/test_unwind: use timer instead of udelay
  s390/test_unwind: fix CALL_ON_STACK tests
  s390: make calls to TRACE_IRQS_OFF/TRACE_IRQS_ON balanced
  s390: always clear kernel stack backchain before calling functions
zero-sugar-mainline-defconfig
Linus Torvalds 2020-12-18 11:08:06 -08:00
commit a087241716
12 changed files with 46 additions and 157 deletions

View File

@ -150,6 +150,7 @@ config S390
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4

View File

@ -13,14 +13,12 @@
#ifndef _S390_DELAY_H
#define _S390_DELAY_H
void udelay_enable(void);
void __ndelay(unsigned long long nsecs);
void __udelay(unsigned long long usecs);
void udelay_simple(unsigned long long usecs);
void __ndelay(unsigned long nsecs);
void __udelay(unsigned long usecs);
void __delay(unsigned long loops);
#define ndelay(n) __ndelay((unsigned long long) (n))
#define udelay(n) __udelay((unsigned long long) (n))
#define mdelay(n) __udelay((unsigned long long) (n) * 1000)
#define ndelay(n) __ndelay((unsigned long)(n))
#define udelay(n) __udelay((unsigned long)(n))
#define mdelay(n) __udelay((unsigned long)(n) * 1000)
#endif /* defined(_S390_DELAY_H) */

View File

@ -16,14 +16,12 @@
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define CIF_FPU 3 /* restore FPU registers */
#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */
#define CIF_MCCK_GUEST 6 /* machine check happening in guest */
#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
#define _CIF_FPU BIT(CIF_FPU)
#define _CIF_IGNORE_IRQ BIT(CIF_IGNORE_IRQ)
#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
@ -292,11 +290,6 @@ static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
return (psw.addr - ilc) & mask;
}
/*
* Function to stop a processor until the next interrupt occurs
*/
void enabled_wait(void);
/*
* Function to drop a processor into disabled wait state
*/

View File

@ -414,6 +414,7 @@ ENTRY(system_call)
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
ENABLE_INTS
.Lsysc_do_svc:
# clear user controlled register to prevent speculative use
@ -430,7 +431,6 @@ ENTRY(system_call)
jnl .Lsysc_nr_ok
slag %r8,%r1,3
.Lsysc_nr_ok:
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r9,0(%r8,%r10) # get system call add.
@ -699,8 +699,8 @@ ENTRY(pgm_check_handler)
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
6: RESTORE_SM_CLEAR_PER
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
6: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
RESTORE_SM_CLEAR_PER
larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11)
nill %r10,0x007f
@ -731,8 +731,8 @@ ENTRY(pgm_check_handler)
# PER event in supervisor state, must be kprobes
#
.Lpgm_kprobe:
RESTORE_SM_CLEAR_PER
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
RESTORE_SM_CLEAR_PER
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_per_trap
j .Lpgm_return
@ -778,10 +778,8 @@ ENTRY(io_int_handler)
.Lio_skip_asce:
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore
TRACE_IRQS_OFF
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
TRACE_IRQS_OFF
.Lio_loop:
lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,IO_INTERRUPT
@ -966,10 +964,8 @@ ENTRY(ext_int_handler)
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore
TRACE_IRQS_OFF
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
TRACE_IRQS_OFF
lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,EXT_INTERRUPT
brasl %r14,do_IRQ

View File

@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/cpu.h>
@ -21,22 +20,19 @@
static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
void enabled_wait(void)
void arch_cpu_idle(void)
{
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
unsigned long long idle_time;
unsigned long psw_mask, flags;
unsigned long psw_mask;
/* Wait for external, I/O or machine check interrupt. */
psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
raw_local_irq_save(flags);
/* Call the assembler magic in entry.S */
/* psw_idle() returns with interrupts disabled. */
psw_idle(idle, psw_mask);
raw_local_irq_restore(flags);
/* Account time spent with enabled wait psw loaded as idle time. */
raw_write_seqcount_begin(&idle->seqcount);
@ -46,8 +42,8 @@ void enabled_wait(void)
idle->idle_count++;
account_idle_time(cputime_to_nsecs(idle_time));
raw_write_seqcount_end(&idle->seqcount);
raw_local_irq_enable();
}
NOKPROBE_SYMBOL(enabled_wait);
static ssize_t show_idle_count(struct device *dev,
struct device_attribute *attr, char *buf)
@ -120,12 +116,6 @@ void arch_cpu_idle_enter(void)
{
}
void arch_cpu_idle(void)
{
enabled_wait();
raw_local_irq_enable();
}
void arch_cpu_idle_exit(void)
{
}

View File

@ -1512,7 +1512,7 @@ static void diag308_dump(void *dump_block)
while (1) {
if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302)
break;
udelay_simple(USEC_PER_SEC);
udelay(USEC_PER_SEC);
}
}

View File

@ -335,7 +335,6 @@ int __init arch_early_irq_init(void)
if (!stack)
panic("Couldn't allocate async stack");
S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
udelay_enable();
return 0;
}

View File

@ -19,13 +19,6 @@
#include <asm/div64.h>
#include <asm/idle.h>
static DEFINE_STATIC_KEY_FALSE(udelay_ready);
void __init udelay_enable(void)
{
static_branch_enable(&udelay_ready);
}
void __delay(unsigned long loops)
{
/*
@ -39,105 +32,25 @@ void __delay(unsigned long loops)
}
EXPORT_SYMBOL(__delay);
static void __udelay_disabled(unsigned long long usecs)
static void delay_loop(unsigned long delta)
{
unsigned long cr0, cr0_new, psw_mask;
struct s390_idle_data idle;
u64 end;
unsigned long end;
end = get_tod_clock() + (usecs << 12);
__ctl_store(cr0, 0, 0);
cr0_new = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
cr0_new |= (1UL << (63 - 52)); /* enable clock comparator irq */
__ctl_load(cr0_new, 0, 0);
psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT;
set_clock_comparator(end);
set_cpu_flag(CIF_IGNORE_IRQ);
psw_idle(&idle, psw_mask);
trace_hardirqs_off();
clear_cpu_flag(CIF_IGNORE_IRQ);
set_clock_comparator(S390_lowcore.clock_comparator);
__ctl_load(cr0, 0, 0);
}
static void __udelay_enabled(unsigned long long usecs)
{
u64 clock_saved, end;
end = get_tod_clock_fast() + (usecs << 12);
do {
clock_saved = 0;
if (tod_after(S390_lowcore.clock_comparator, end)) {
clock_saved = local_tick_disable();
set_clock_comparator(end);
}
enabled_wait();
if (clock_saved)
local_tick_enable(clock_saved);
} while (get_tod_clock_fast() < end);
}
/*
* Waits for 'usecs' microseconds using the TOD clock comparator.
*/
void __udelay(unsigned long long usecs)
{
unsigned long flags;
if (!static_branch_likely(&udelay_ready)) {
udelay_simple(usecs);
return;
}
preempt_disable();
local_irq_save(flags);
if (in_irq()) {
__udelay_disabled(usecs);
goto out;
}
if (in_softirq()) {
if (raw_irqs_disabled_flags(flags))
__udelay_disabled(usecs);
else
__udelay_enabled(usecs);
goto out;
}
if (raw_irqs_disabled_flags(flags)) {
local_bh_disable();
__udelay_disabled(usecs);
_local_bh_enable();
goto out;
}
__udelay_enabled(usecs);
out:
local_irq_restore(flags);
preempt_enable();
}
EXPORT_SYMBOL(__udelay);
/*
* Simple udelay variant. To be used on startup and reboot
* when the interrupt handler isn't working.
*/
void udelay_simple(unsigned long long usecs)
{
u64 end;
end = get_tod_clock_fast() + (usecs << 12);
while (get_tod_clock_fast() < end)
end = get_tod_clock_monotonic() + delta;
while (!tod_after(get_tod_clock_monotonic(), end))
cpu_relax();
}
void __ndelay(unsigned long long nsecs)
void __udelay(unsigned long usecs)
{
u64 end;
delay_loop(usecs << 12);
}
EXPORT_SYMBOL(__udelay);
void __ndelay(unsigned long nsecs)
{
nsecs <<= 9;
do_div(nsecs, 125);
end = get_tod_clock_fast() + nsecs;
if (nsecs & ~0xfffUL)
__udelay(nsecs >> 12);
while (get_tod_clock_fast() < end)
barrier();
delay_loop(nsecs);
}
EXPORT_SYMBOL(__ndelay);

View File

@ -9,12 +9,12 @@
#include <linux/kallsyms.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/kprobes.h>
#include <linux/wait.h>
#include <asm/irq.h>
#include <asm/delay.h>
#define BT_BUF_SIZE (PAGE_SIZE * 4)
@ -205,12 +205,15 @@ static noinline int unwindme_func3(struct unwindme *u)
/* This function must appear in the backtrace. */
static noinline int unwindme_func2(struct unwindme *u)
{
unsigned long flags;
int rc;
if (u->flags & UWM_SWITCH_STACK) {
preempt_disable();
local_irq_save(flags);
local_mcck_disable();
rc = CALL_ON_STACK(unwindme_func3, S390_lowcore.nodat_stack, 1, u);
preempt_enable();
local_mcck_enable();
local_irq_restore(flags);
return rc;
} else {
return unwindme_func3(u);
@ -223,31 +226,27 @@ static noinline int unwindme_func1(void *u)
return unwindme_func2((struct unwindme *)u);
}
static void unwindme_irq_handler(struct ext_code ext_code,
unsigned int param32,
unsigned long param64)
static void unwindme_timer_fn(struct timer_list *unused)
{
struct unwindme *u = READ_ONCE(unwindme);
if (u && u->task == current) {
if (u) {
unwindme = NULL;
u->task = NULL;
u->ret = unwindme_func1(u);
complete(&u->task_ready);
}
}
static struct timer_list unwind_timer;
static int test_unwind_irq(struct unwindme *u)
{
preempt_disable();
if (register_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler)) {
pr_info("Couldn't register external interrupt handler");
return -1;
}
u->task = current;
unwindme = u;
udelay(1);
unregister_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler);
preempt_enable();
init_completion(&u->task_ready);
timer_setup(&unwind_timer, unwindme_timer_fn, 0);
mod_timer(&unwind_timer, jiffies + 1);
wait_for_completion(&u->task_ready);
return u->ret;
}

View File

@ -1681,7 +1681,7 @@ void ccw_device_wait_idle(struct ccw_device *cdev)
cio_tsch(sch);
if (sch->schib.scsw.cmd.actl == 0)
break;
udelay_simple(100);
udelay(100);
}
}
#endif

View File

@ -175,7 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
atomic_set(&zq->load, 0);
ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2A_CLEANUP_TIME,
aq->request_timeout = CEX2A_CLEANUP_TIME;
aq->private = zq;
rc = zcrypt_queue_register(zq);
if (rc) {

View File

@ -631,7 +631,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
atomic_set(&zq->load, 0);
ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX4_CLEANUP_TIME,
aq->request_timeout = CEX4_CLEANUP_TIME;
aq->private = zq;
rc = zcrypt_queue_register(zq);
if (rc) {