1
0
Fork 0
alistair23-linux/arch/powerpc/platforms/cell/spufs/fault.c

168 lines
4.3 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Low-level SPU handling
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
*
* Author: Arnd Bergmann <arndb@de.ibm.com>
*/
#include <linux/sched/signal.h>
#include <linux/mm.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
#include "spufs.h"
/**
* Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag.
*
* If the context was created with events, we just set the return event.
* Otherwise, send an appropriate signal to the process.
*/
static void spufs_handle_event(struct spu_context *ctx,
unsigned long ea, int type)
{
if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
ctx->event_return |= type;
wake_up_all(&ctx->stop_wq);
return;
}
switch (type) {
case SPE_EVENT_INVALID_DMA:
force_sig_fault(SIGBUS, BUS_OBJERR, NULL);
break;
case SPE_EVENT_SPE_DATA_STORAGE:
ctx->ops->restart_dma(ctx);
force_sig_fault(SIGSEGV, SEGV_ACCERR, (void __user *)ea);
break;
case SPE_EVENT_DMA_ALIGNMENT:
/* DAR isn't set for an alignment fault :( */
force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
break;
case SPE_EVENT_SPE_ERROR:
force_sig_fault(
SIGILL, ILL_ILLOPC,
(void __user *)(unsigned long)
ctx->ops->npc_read(ctx) - 4);
break;
}
}
int spufs_handle_class0(struct spu_context *ctx)
{
unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK;
if (likely(!stat))
return 0;
if (stat & CLASS0_DMA_ALIGNMENT_INTR)
spufs_handle_event(ctx, ctx->csa.class_0_dar,
SPE_EVENT_DMA_ALIGNMENT);
if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
spufs_handle_event(ctx, ctx->csa.class_0_dar,
SPE_EVENT_INVALID_DMA);
if (stat & CLASS0_SPU_ERROR_INTR)
spufs_handle_event(ctx, ctx->csa.class_0_dar,
SPE_EVENT_SPE_ERROR);
ctx->csa.class_0_pending = 0;
return -EIO;
}
/*
* bottom half handler for page faults, we can't do this from
* interrupt context, since we might need to sleep.
* we also need to give up the mutex so we can get scheduled
* out while waiting for the backing store.
*
* TODO: try calling hash_page from the interrupt handler first
* in order to speed up the easy case.
*/
int spufs_handle_class1(struct spu_context *ctx)
{
u64 ea, dsisr, access;
unsigned long flags;
mm: convert return type of handle_mm_fault() caller to vm_fault_t Use new return type vm_fault_t for fault handler. For now, this is just documenting that the function returns a VM_FAULT value rather than an errno. Once all instances are converted, vm_fault_t will become a distinct type. Ref-> commit 1c8f422059ae ("mm: change return type to vm_fault_t") In this patch all the caller of handle_mm_fault() are changed to return vm_fault_t type. Link: http://lkml.kernel.org/r/20180617084810.GA6730@jordon-HP-15-Notebook-PC Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Tony Luck <tony.luck@intel.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Michal Simek <monstr@monstr.eu> Cc: James Hogan <jhogan@kernel.org> Cc: Ley Foon Tan <lftan@altera.com> Cc: Jonas Bonn <jonas@southpole.se> Cc: James E.J. Bottomley <jejb@parisc-linux.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: David S. Miller <davem@davemloft.net> Cc: Richard Weinberger <richard@nod.at> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: "Levin, Alexander (Sasha Levin)" <alexander.levin@verizon.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-08-17 16:44:47 -06:00
vm_fault_t flt = 0;
int ret;
/*
* dar and dsisr get passed from the registers
* to the spu_context, to this function, but not
* back to the spu if it gets scheduled again.
*
* if we don't handle the fault for a saved context
* in time, we can still expect to get the same fault
* the immediately after the context restore.
*/
ea = ctx->csa.class_1_dar;
dsisr = ctx->csa.class_1_dsisr;
if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
return 0;
spuctx_switch_state(ctx, SPU_UTIL_IOWAIT);
pr_debug("ctx %p: ea %016llx, dsisr %016llx state %d\n", ctx, ea,
dsisr, ctx->state);
ctx->stats.hash_flt++;
if (ctx->state == SPU_STATE_RUNNABLE)
ctx->spu->stats.hash_flt++;
/* we must not hold the lock when entering copro_handle_mm_fault */
spu_release(ctx);
access = (_PAGE_PRESENT | _PAGE_READ);
access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_WRITE : 0UL;
local_irq_save(flags);
powerpc/mm: don't do tlbie for updatepp request with NO HPTE fault upatepp can get called for a nohpte fault when we find from the linux page table that the translation was hashed before. In that case we are sure that there is no existing translation, hence we could avoid doing tlbie. We could possibly race with a parallel fault filling the TLB. But that should be ok because updatepp is only ever relaxing permissions. We also look at linux pte permission bits when filling hash pte permission bits. We also hold the linux pte busy bits while inserting/updating a hashpte entry, hence a paralle update of linux pte is not possible. On the other hand mprotect involves ptep_modify_prot_start which cause a hpte invalidate and not updatepp. Performance number: We use randbox_access_bench written by Anton. Kernel with THP disabled and smaller hash page table size. 86.60% random_access_b [kernel.kallsyms] [k] .native_hpte_updatepp 2.10% random_access_b random_access_bench [.] doit 1.99% random_access_b [kernel.kallsyms] [k] .do_raw_spin_lock 1.85% random_access_b [kernel.kallsyms] [k] .native_hpte_insert 1.26% random_access_b [kernel.kallsyms] [k] .native_flush_hash_range 1.18% random_access_b [kernel.kallsyms] [k] .__delay 0.69% random_access_b [kernel.kallsyms] [k] .native_hpte_remove 0.37% random_access_b [kernel.kallsyms] [k] .clear_user_page 0.34% random_access_b [kernel.kallsyms] [k] .__hash_page_64K 0.32% random_access_b [kernel.kallsyms] [k] fast_exception_return 0.30% random_access_b [kernel.kallsyms] [k] .hash_page_mm With Fix: 27.54% random_access_b random_access_bench [.] doit 22.90% random_access_b [kernel.kallsyms] [k] .native_hpte_insert 5.76% random_access_b [kernel.kallsyms] [k] .native_hpte_remove 5.20% random_access_b [kernel.kallsyms] [k] fast_exception_return 5.12% random_access_b [kernel.kallsyms] [k] .__hash_page_64K 4.80% random_access_b [kernel.kallsyms] [k] .hash_page_mm 3.31% random_access_b [kernel.kallsyms] [k] data_access_common 1.84% random_access_b [kernel.kallsyms] [k] .trace_hardirqs_on_caller Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-12-03 22:30:14 -07:00
ret = hash_page(ea, access, 0x300, dsisr);
local_irq_restore(flags);
/* hashing failed, so try the actual fault handler */
if (ret)
ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt);
/*
* This is nasty: we need the state_mutex for all the bookkeeping even
* if the syscall was interrupted by a signal. ewww.
*/
mutex_lock(&ctx->state_mutex);
/*
* Clear dsisr under ctxt lock after handling the fault, so that
* time slicing will not preempt the context while the page fault
* handler is running. Context switch code removes mappings.
*/
ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0;
/*
* If we handled the fault successfully and are in runnable
* state, restart the DMA.
* In case of unhandled error report the problem to user space.
*/
if (!ret) {
if (flt & VM_FAULT_MAJOR)
ctx->stats.maj_flt++;
else
ctx->stats.min_flt++;
if (ctx->state == SPU_STATE_RUNNABLE) {
if (flt & VM_FAULT_MAJOR)
ctx->spu->stats.maj_flt++;
else
ctx->spu->stats.min_flt++;
}
if (ctx->spu)
ctx->ops->restart_dma(ctx);
} else
spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
return ret;
}