1
0
Fork 0

Merge branch 'parisc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:

 - parisc now uses the generic dma_noncoherent_ops implementation
   (Christoph Hellwig)

 - further memory barrier and spinlock improvements (John David Anglin)

 - prepare removal of current_text_addr() functions (Nick Desaulniers)

 - improve kernel stack unwinding on parisc (me)

 - drop ENOTSUP which was defined on parisc only (me)

* 'parisc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Fix and improve kernel stack unwinding
  parisc: Remove unnecessary barriers from spinlock.h
  parisc: Remove ordered stores from syscall.S
  parisc: prefer _THIS_IP_ and _RET_IP_ statement expressions
  parisc: Add HAVE_REGS_AND_STACK_ACCESS_API feature
  parisc: Drop architecture-specific ENOTSUP define
  parisc: use generic dma_noncoherent_ops
  parisc: always use flush_kernel_dcache_range for DMA cache maintainance
  parisc: merge pcx_dma_ops and pcxl_dma_ops
hifive-unleashed-5.1
Linus Torvalds 2018-08-13 19:18:02 -07:00
commit 2280a5360e
22 changed files with 270 additions and 456 deletions

View File

@ -45,6 +45,7 @@ config PARISC
select HAVE_ARCH_HASH
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_REGS_AND_STACK_ACCESS_API
select GENERIC_SCHED_CLOCK
select HAVE_UNSTABLE_SCHED_CLOCK if SMP
select GENERIC_CLOCKEVENTS
@ -187,6 +188,10 @@ config PA20
config PA11
def_bool y
depends on PA7000 || PA7100LC || PA7200 || PA7300LC
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select DMA_NONCOHERENT_OPS
select DMA_NONCOHERENT_CACHE_SYNC
config PREFETCH
def_bool y

View File

@ -36,6 +36,7 @@
#define RP_OFFSET 16
#define FRAME_SIZE 128
#define CALLEE_REG_FRAME_SIZE 144
#define REG_SZ 8
#define ASM_ULONG_INSN .dword
#else /* CONFIG_64BIT */
#define LDREG ldw
@ -50,6 +51,7 @@
#define RP_OFFSET 20
#define FRAME_SIZE 64
#define CALLEE_REG_FRAME_SIZE 128
#define REG_SZ 4
#define ASM_ULONG_INSN .word
#endif

View File

@ -21,11 +21,6 @@
** flush/purge and allocate "regular" cacheable pages for everything.
*/
#ifdef CONFIG_PA11
extern const struct dma_map_ops pcxl_dma_ops;
extern const struct dma_map_ops pcx_dma_ops;
#endif
extern const struct dma_map_ops *hppa_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)

View File

@ -18,9 +18,9 @@
#ifdef __ASSEMBLY__
#define ENTRY(name) \
.export name !\
ALIGN !\
name:
ALIGN !\
name: ASM_NL\
.export name
#ifdef CONFIG_64BIT
#define ENDPROC(name) \
@ -31,13 +31,18 @@ name:
END(name)
#endif
#define ENTRY_CFI(name) \
#define ENTRY_CFI(name, ...) \
ENTRY(name) ASM_NL\
.proc ASM_NL\
.callinfo __VA_ARGS__ ASM_NL\
.entry ASM_NL\
CFI_STARTPROC
#define ENDPROC_CFI(name) \
ENDPROC(name) ASM_NL\
CFI_ENDPROC
CFI_ENDPROC ASM_NL\
.exit ASM_NL\
.procend ASM_NL\
ENDPROC(name)
#endif /* __ASSEMBLY__ */

View File

@ -25,4 +25,15 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->gr[20];
}
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->iaoq[0] = val;
}
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ipsw))
#endif

View File

@ -20,7 +20,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
{
volatile unsigned int *a;
mb();
a = __ldcw_align(x);
while (__ldcw(a) == 0)
while (*a == 0)
@ -30,17 +29,16 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
local_irq_disable();
} else
cpu_relax();
mb();
}
#define arch_spin_lock_flags arch_spin_lock_flags
static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
mb();
a = __ldcw_align(x);
*a = 1;
mb();
*a = 1;
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
@ -48,10 +46,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
volatile unsigned int *a;
int ret;
mb();
a = __ldcw_align(x);
ret = __ldcw(a) != 0;
mb();
return ret;
}

View File

@ -4,6 +4,9 @@
#include <linux/list.h>
/* Max number of levels to backtrace */
#define MAX_UNWIND_ENTRIES 30
/* From ABI specifications */
struct unwind_table_entry {
unsigned int region_start;

View File

@ -113,7 +113,6 @@
#define ELOOP 249 /* Too many symbolic links encountered */
#define ENOSYS 251 /* Function not implemented */
#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */

View File

@ -766,7 +766,6 @@ END(fault_vector_11)
#endif
/* Fault vector is separately protected and *must* be on its own page */
.align PAGE_SIZE
ENTRY(end_fault_vector)
.import handle_interruption,code
.import do_cpu_irq_mask,code
@ -778,7 +777,6 @@ ENTRY(end_fault_vector)
*/
ENTRY_CFI(ret_from_kernel_thread)
/* Call schedule_tail first though */
BL schedule_tail, %r2
nop
@ -817,8 +815,9 @@ ENTRY_CFI(_switch_to)
LDREG TASK_THREAD_INFO(%r25), %r25
bv %r0(%r2)
mtctl %r25,%cr30
ENDPROC_CFI(_switch_to)
_switch_to_ret:
ENTRY_CFI(_switch_to_ret)
mtctl %r0, %cr0 /* Needed for single stepping */
callee_rest
callee_rest_float
@ -826,7 +825,7 @@ _switch_to_ret:
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
copy %r26, %r28
ENDPROC_CFI(_switch_to)
ENDPROC_CFI(_switch_to_ret)
/*
* Common rfi return path for interruptions, kernel execve, and
@ -887,12 +886,14 @@ ENTRY_CFI(syscall_exit_rfi)
STREG %r19,PT_SR5(%r16)
STREG %r19,PT_SR6(%r16)
STREG %r19,PT_SR7(%r16)
ENDPROC_CFI(syscall_exit_rfi)
intr_return:
ENTRY_CFI(intr_return)
/* check for reschedule */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
ENDPROC_CFI(intr_return)
.import do_notify_resume,code
intr_check_sig:
@ -1048,7 +1049,6 @@ intr_extint:
b do_cpu_irq_mask
ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
ENDPROC_CFI(syscall_exit_rfi)
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
@ -1999,12 +1999,9 @@ ENDPROC_CFI(syscall_exit)
.align L1_CACHE_BYTES
.globl mcount
.type mcount, @function
ENTRY(mcount)
ENTRY_CFI(mcount, caller)
_mcount:
.export _mcount,data
.proc
.callinfo caller,frame=0
.entry
/*
* The 64bit mcount() function pointer needs 4 dwords, of which the
* first two are free. We optimize it here and put 2 instructions for
@ -2026,18 +2023,13 @@ ftrace_stub:
.dword mcount
.dword 0 /* code in head.S puts value of global gp here */
#endif
.exit
.procend
ENDPROC(mcount)
ENDPROC_CFI(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.align 8
.globl return_to_handler
.type return_to_handler, @function
ENTRY_CFI(return_to_handler)
.proc
.callinfo caller,frame=FRAME_SIZE
.entry
ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
.export parisc_return_to_handler,data
parisc_return_to_handler:
copy %r3,%r1
@ -2076,8 +2068,6 @@ parisc_return_to_handler:
bv %r0(%rp)
#endif
LDREGM -FRAME_SIZE(%sp),%r3
.exit
.procend
ENDPROC_CFI(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@ -2087,31 +2077,30 @@ ENDPROC_CFI(return_to_handler)
#ifdef CONFIG_IRQSTACKS
/* void call_on_stack(unsigned long param1, void *func,
unsigned long new_stack) */
ENTRY_CFI(call_on_stack)
ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
copy %sp, %r1
/* Regarding the HPPA calling conventions for function pointers,
we assume the PIC register is not changed across call. For
CONFIG_64BIT, the argument pointer is left to point at the
argument region allocated for the call to call_on_stack. */
/* Switch to new stack. We allocate two frames. */
ldo 2*FRAME_SIZE(%arg2), %sp
# ifdef CONFIG_64BIT
/* Switch to new stack. We allocate two 128 byte frames. */
ldo 256(%arg2), %sp
/* Save previous stack pointer and return pointer in frame marker */
STREG %rp, -144(%sp)
STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls always use function descriptor */
LDREG 16(%arg1), %arg1
bve,l (%arg1), %rp
STREG %r1, -136(%sp)
LDREG -144(%sp), %rp
STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bve (%rp)
LDREG -136(%sp), %sp
LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# else
/* Switch to new stack. We allocate two 64 byte frames. */
ldo 128(%arg2), %sp
/* Save previous stack pointer and return pointer in frame marker */
STREG %r1, -68(%sp)
STREG %rp, -84(%sp)
STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls use function descriptor if PLABEL bit is set */
bb,>=,n %arg1, 30, 1f
depwi 0,31,2, %arg1
@ -2119,9 +2108,9 @@ ENTRY_CFI(call_on_stack)
1:
be,l 0(%sr4,%arg1), %sr0, %r31
copy %r31, %rp
LDREG -84(%sp), %rp
LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bv (%rp)
LDREG -68(%sp), %sp
LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# endif /* CONFIG_64BIT */
ENDPROC_CFI(call_on_stack)
#endif /* CONFIG_IRQSTACKS */

View File

@ -44,10 +44,6 @@
.align 16
ENTRY_CFI(flush_tlb_all_local)
.proc
.callinfo NO_CALLS
.entry
/*
* The pitlbe and pdtlbe instructions should only be used to
* flush the entire tlb. Also, there needs to be no intervening
@ -189,18 +185,11 @@ fdtdone:
2: bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_tlb_all_local)
.import cache_info,data
ENTRY_CFI(flush_instruction_cache_local)
.proc
.callinfo NO_CALLS
.entry
load32 cache_info, %r1
/* Flush Instruction Cache */
@ -256,18 +245,11 @@ fisync:
mtsm %r22 /* restore I-bit */
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_instruction_cache_local)
.import cache_info, data
ENTRY_CFI(flush_data_cache_local)
.proc
.callinfo NO_CALLS
.entry
load32 cache_info, %r1
/* Flush Data Cache */
@ -324,9 +306,6 @@ fdsync:
mtsm %r22 /* restore I-bit */
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_data_cache_local)
/* Macros to serialize TLB purge operations on SMP. */
@ -362,10 +341,6 @@ ENDPROC_CFI(flush_data_cache_local)
/* Clear page using kernel mapping. */
ENTRY_CFI(clear_page_asm)
.proc
.callinfo NO_CALLS
.entry
#ifdef CONFIG_64BIT
/* Unroll the loop. */
@ -424,18 +399,11 @@ ENTRY_CFI(clear_page_asm)
#endif
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(clear_page_asm)
/* Copy page using kernel mapping. */
ENTRY_CFI(copy_page_asm)
.proc
.callinfo NO_CALLS
.entry
#ifdef CONFIG_64BIT
/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
* Unroll the loop by hand and arrange insn appropriately.
@ -542,9 +510,6 @@ ENTRY_CFI(copy_page_asm)
#endif
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(copy_page_asm)
/*
@ -598,10 +563,6 @@ ENDPROC_CFI(copy_page_asm)
*/
ENTRY_CFI(copy_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
/* Convert virtual `to' and `from' addresses to physical addresses.
Move `from' physical address to non shadowed register. */
ldil L%(__PAGE_OFFSET), %r1
@ -750,16 +711,9 @@ ENTRY_CFI(copy_user_page_asm)
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(copy_user_page_asm)
ENTRY_CFI(clear_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
tophys_r1 %r26
ldil L%(TMPALIAS_MAP_START), %r28
@ -836,16 +790,9 @@ ENTRY_CFI(clear_user_page_asm)
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(clear_user_page_asm)
ENTRY_CFI(flush_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
@ -903,16 +850,9 @@ ENTRY_CFI(flush_dcache_page_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_dcache_page_asm)
ENTRY_CFI(flush_icache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
@ -977,16 +917,9 @@ ENTRY_CFI(flush_icache_page_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_icache_page_asm)
ENTRY_CFI(flush_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
@ -1020,16 +953,9 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY_CFI(purge_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
@ -1062,16 +988,9 @@ ENTRY_CFI(purge_kernel_dcache_page_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(purge_kernel_dcache_page_asm)
ENTRY_CFI(flush_user_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@ -1083,16 +1002,9 @@ ENTRY_CFI(flush_user_dcache_range_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_user_dcache_range_asm)
ENTRY_CFI(flush_kernel_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@ -1105,16 +1017,9 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
syncdma
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_kernel_dcache_range_asm)
ENTRY_CFI(purge_kernel_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
@ -1127,16 +1032,9 @@ ENTRY_CFI(purge_kernel_dcache_range_asm)
syncdma
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(purge_kernel_dcache_range_asm)
ENTRY_CFI(flush_user_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
@ -1148,16 +1046,9 @@ ENTRY_CFI(flush_user_icache_range_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_user_icache_range_asm)
ENTRY_CFI(flush_kernel_icache_page)
.proc
.callinfo NO_CALLS
.entry
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
@ -1191,16 +1082,9 @@ ENTRY_CFI(flush_kernel_icache_page)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_kernel_icache_page)
ENTRY_CFI(flush_kernel_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
@ -1212,8 +1096,6 @@ ENTRY_CFI(flush_kernel_icache_range_asm)
sync
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(flush_kernel_icache_range_asm)
__INIT
@ -1223,10 +1105,6 @@ ENDPROC_CFI(flush_kernel_icache_range_asm)
*/
.align 256
ENTRY_CFI(disable_sr_hashing_asm)
.proc
.callinfo NO_CALLS
.entry
/*
* Switch to real mode
*/
@ -1308,9 +1186,6 @@ srdis_done:
2: bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(disable_sr_hashing_asm)
.end

View File

@ -21,13 +21,12 @@
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/export.h>
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <asm/cacheflush.h>
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
@ -395,7 +394,7 @@ pcxl_dma_init(void)
__initcall(pcxl_dma_init);
static void *pa11_dma_alloc(struct device *dev, size_t size,
static void *pcxl_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
unsigned long vaddr;
@ -422,158 +421,6 @@ static void *pa11_dma_alloc(struct device *dev, size_t size,
return (void *)vaddr;
}
static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
int order;
order = get_order(size);
size = 1 << (order + PAGE_SHIFT);
unmap_uncached_pages((unsigned long)vaddr, size);
pcxl_free_range((unsigned long)vaddr, size);
free_pages((unsigned long)__va(dma_handle), order);
}
static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction, unsigned long attrs)
{
void *addr = page_address(page) + offset;
BUG_ON(direction == DMA_NONE);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
flush_kernel_dcache_range((unsigned long) addr, size);
return virt_to_phys(addr);
}
static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
BUG_ON(direction == DMA_NONE);
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return;
if (direction == DMA_TO_DEVICE)
return;
/*
* For PCI_DMA_FROMDEVICE this flush is not necessary for the
* simple map/unmap case. However, it IS necessary if if
* pci_dma_sync_single_* has been called and the buffer reused.
*/
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
}
static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
int i;
struct scatterlist *sg;
BUG_ON(direction == DMA_NONE);
for_each_sg(sglist, sg, nents, i) {
unsigned long vaddr = (unsigned long)sg_virt(sg);
sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
sg_dma_len(sg) = sg->length;
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
continue;
flush_kernel_dcache_range(vaddr, sg->length);
}
return nents;
}
static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
int i;
struct scatterlist *sg;
BUG_ON(direction == DMA_NONE);
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return;
if (direction == DMA_TO_DEVICE)
return;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for_each_sg(sglist, sg, nents, i)
flush_kernel_vmap_range(sg_virt(sg), sg->length);
}
static void pa11_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
size);
}
static void pa11_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
size);
}
static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
struct scatterlist *sg;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for_each_sg(sglist, sg, nents, i)
flush_kernel_vmap_range(sg_virt(sg), sg->length);
}
static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
struct scatterlist *sg;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for_each_sg(sglist, sg, nents, i)
flush_kernel_vmap_range(sg_virt(sg), sg->length);
}
static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
flush_kernel_dcache_range((unsigned long)vaddr, size);
}
const struct dma_map_ops pcxl_dma_ops = {
.alloc = pa11_dma_alloc,
.free = pa11_dma_free,
.map_page = pa11_dma_map_page,
.unmap_page = pa11_dma_unmap_page,
.map_sg = pa11_dma_map_sg,
.unmap_sg = pa11_dma_unmap_sg,
.sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
.sync_single_for_device = pa11_dma_sync_single_for_device,
.sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.sync_sg_for_device = pa11_dma_sync_sg_for_device,
.cache_sync = pa11_dma_cache_sync,
};
static void *pcx_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
@ -589,23 +436,45 @@ static void *pcx_dma_alloc(struct device *dev, size_t size,
return addr;
}
static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t iova, unsigned long attrs)
void *arch_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
return;
if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl)
return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs);
else
return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
}
const struct dma_map_ops pcx_dma_ops = {
.alloc = pcx_dma_alloc,
.free = pcx_dma_free,
.map_page = pa11_dma_map_page,
.unmap_page = pa11_dma_unmap_page,
.map_sg = pa11_dma_map_sg,
.unmap_sg = pa11_dma_unmap_sg,
.sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
.sync_single_for_device = pa11_dma_sync_single_for_device,
.sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.sync_sg_for_device = pa11_dma_sync_sg_for_device,
.cache_sync = pa11_dma_cache_sync,
};
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
int order = get_order(size);
if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
size = 1 << (order + PAGE_SHIFT);
unmap_uncached_pages((unsigned long)vaddr, size);
pcxl_free_range((unsigned long)vaddr, size);
vaddr = __va(dma_handle);
}
free_pages((unsigned long)vaddr, get_order(size));
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
flush_kernel_dcache_range((unsigned long)vaddr, size);
}

View File

@ -302,7 +302,7 @@ get_wchan(struct task_struct *p)
ip = info.ip;
if (!in_sched_functions(ip))
return ip;
} while (count++ < 16);
} while (count++ < MAX_UNWIND_ENTRIES);
return 0;
}

View File

@ -676,3 +676,103 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
#endif
return &user_parisc_native_view;
}
/* HAVE_REGS_AND_STACK_ACCESS_API feature */
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define REG_OFFSET_INDEX(r,i) {.name = #r#i, .offset = offsetof(struct pt_regs, r[i])}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_INDEX(gr,0),
REG_OFFSET_INDEX(gr,1),
REG_OFFSET_INDEX(gr,2),
REG_OFFSET_INDEX(gr,3),
REG_OFFSET_INDEX(gr,4),
REG_OFFSET_INDEX(gr,5),
REG_OFFSET_INDEX(gr,6),
REG_OFFSET_INDEX(gr,7),
REG_OFFSET_INDEX(gr,8),
REG_OFFSET_INDEX(gr,9),
REG_OFFSET_INDEX(gr,10),
REG_OFFSET_INDEX(gr,11),
REG_OFFSET_INDEX(gr,12),
REG_OFFSET_INDEX(gr,13),
REG_OFFSET_INDEX(gr,14),
REG_OFFSET_INDEX(gr,15),
REG_OFFSET_INDEX(gr,16),
REG_OFFSET_INDEX(gr,17),
REG_OFFSET_INDEX(gr,18),
REG_OFFSET_INDEX(gr,19),
REG_OFFSET_INDEX(gr,20),
REG_OFFSET_INDEX(gr,21),
REG_OFFSET_INDEX(gr,22),
REG_OFFSET_INDEX(gr,23),
REG_OFFSET_INDEX(gr,24),
REG_OFFSET_INDEX(gr,25),
REG_OFFSET_INDEX(gr,26),
REG_OFFSET_INDEX(gr,27),
REG_OFFSET_INDEX(gr,28),
REG_OFFSET_INDEX(gr,29),
REG_OFFSET_INDEX(gr,30),
REG_OFFSET_INDEX(gr,31),
REG_OFFSET_INDEX(sr,0),
REG_OFFSET_INDEX(sr,1),
REG_OFFSET_INDEX(sr,2),
REG_OFFSET_INDEX(sr,3),
REG_OFFSET_INDEX(sr,4),
REG_OFFSET_INDEX(sr,5),
REG_OFFSET_INDEX(sr,6),
REG_OFFSET_INDEX(sr,7),
REG_OFFSET_INDEX(iasq,0),
REG_OFFSET_INDEX(iasq,1),
REG_OFFSET_INDEX(iaoq,0),
REG_OFFSET_INDEX(iaoq,1),
REG_OFFSET_NAME(cr27),
REG_OFFSET_NAME(ksp),
REG_OFFSET_NAME(kpc),
REG_OFFSET_NAME(sar),
REG_OFFSET_NAME(iir),
REG_OFFSET_NAME(isr),
REG_OFFSET_NAME(ior),
REG_OFFSET_NAME(ipsw),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}

View File

@ -35,12 +35,6 @@ real32_stack:
real64_stack:
.block 8192
#ifdef CONFIG_64BIT
# define REG_SZ 8
#else
# define REG_SZ 4
#endif
#define N_SAVED_REGS 9
save_cr_space:

View File

@ -97,14 +97,12 @@ void __init dma_ops_init(void)
panic( "PA-RISC Linux currently only supports machines that conform to\n"
"the PA-RISC 1.1 or 2.0 architecture specification.\n");
case pcxs:
case pcxt:
hppa_dma_ops = &pcx_dma_ops;
break;
case pcxl2:
pa7300lc_init();
case pcxl: /* falls through */
hppa_dma_ops = &pcxl_dma_ops;
case pcxs:
case pcxt:
hppa_dma_ops = &dma_noncoherent_ops;
break;
default:
break;

View File

@ -629,12 +629,12 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
1: ldw,ma 0(%r26), %r28
1: ldw 0(%r26), %r28
sub,<> %r28, %r25, %r0
2: stw,ma %r24, 0(%r26)
2: stw %r24, 0(%r26)
/* Free lock */
sync
stw,ma %r20, 0(%sr2,%r20)
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
@ -798,30 +798,30 @@ cas2_action:
ldo 1(%r0),%r28
/* 8bit CAS */
13: ldb,ma 0(%r26), %r29
13: ldb 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
14: stb,ma %r24, 0(%r26)
14: stb %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
15: ldh,ma 0(%r26), %r29
15: ldh 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
16: sth,ma %r24, 0(%r26)
16: sth %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
17: ldw,ma 0(%r26), %r29
17: ldw 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
18: stw,ma %r24, 0(%r26)
18: stw %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
@ -829,10 +829,10 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
19: ldd,ma 0(%r26), %r29
19: ldd 0(%r26), %r29
sub,*= %r29, %r25, %r0
b,n cas2_end
20: std,ma %r24, 0(%r26)
20: std %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
@ -851,7 +851,7 @@ cas2_action:
cas2_end:
/* Free lock */
sync
stw,ma %r20, 0(%sr2,%r20)
stw %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */

View File

@ -172,7 +172,7 @@ static void do_show_stack(struct unwind_frame_info *info)
int i = 1;
printk(KERN_CRIT "Backtrace:\n");
while (i <= 16) {
while (i <= MAX_UNWIND_ENTRIES) {
if (unwind_once(info) < 0 || info->ip == 0)
break;

View File

@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>
#include <linux/sort.h>
#include <linux/uaccess.h>
@ -117,7 +116,8 @@ unwind_table_init(struct unwind_table *table, const char *name,
for (; start <= end; start++) {
if (start < end &&
start->region_end > (start+1)->region_start) {
printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
pr_warn("Out of order unwind entry! %px and %px\n",
start, start+1);
}
start->region_start += base_addr;
@ -203,26 +203,61 @@ int __init unwind_init(void)
return 0;
}
#ifdef CONFIG_64BIT
#define get_func_addr(fptr) fptr[2]
#else
#define get_func_addr(fptr) fptr[0]
#endif
static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
{
extern void handle_interruption(int, struct pt_regs *);
static unsigned long *hi = (unsigned long *)&handle_interruption;
/*
* We have to use void * instead of a function pointer, because
* function pointers aren't a pointer to the function on 64-bit.
* Make them const so the compiler knows they live in .text
*/
extern void * const handle_interruption;
extern void * const ret_from_kernel_thread;
extern void * const syscall_exit;
extern void * const intr_return;
extern void * const _switch_to_ret;
#ifdef CONFIG_IRQSTACKS
extern void * const call_on_stack;
#endif /* CONFIG_IRQSTACKS */
if (pc == get_func_addr(hi)) {
if (pc == (unsigned long) &handle_interruption) {
struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
dbg("Unwinding through handle_interruption()\n");
info->prev_sp = regs->gr[30];
info->prev_ip = regs->iaoq[0];
return 1;
}
if (pc == (unsigned long) &ret_from_kernel_thread ||
pc == (unsigned long) &syscall_exit) {
info->prev_sp = info->prev_ip = 0;
return 1;
}
if (pc == (unsigned long) &intr_return) {
struct pt_regs *regs;
dbg("Found intr_return()\n");
regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
info->prev_sp = regs->gr[30];
info->prev_ip = regs->iaoq[0];
info->rp = regs->gr[2];
return 1;
}
if (pc == (unsigned long) &_switch_to_ret) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
return 1;
}
#ifdef CONFIG_IRQSTACKS
if (pc == (unsigned long) &call_on_stack) {
info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
return 1;
}
#endif
return 0;
}
@ -238,34 +273,8 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
if (e == NULL) {
unsigned long sp;
dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
#ifdef CONFIG_KALLSYMS
/* Handle some frequent special cases.... */
{
char symname[KSYM_NAME_LEN];
char *modname;
kallsyms_lookup(info->ip, NULL, NULL, &modname,
symname);
dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
if (strcmp(symname, "_switch_to_ret") == 0) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
dbg("_switch_to_ret @ %lx - setting "
"prev_sp=%lx prev_ip=%lx\n",
info->ip, info->prev_sp,
info->prev_ip);
return;
} else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
strcmp(symname, "syscall_exit") == 0) {
info->prev_ip = info->prev_sp = 0;
return;
}
}
#endif
dbg("Cannot find unwind entry for %pS; forced unwinding\n",
(void *) info->ip);
/* Since we are doing the unwinding blind, we don't know if
we are adjusting the stack correctly or extracting the rp
@ -439,8 +448,8 @@ unsigned long return_address(unsigned int level)
/* initialize unwind info */
asm volatile ("copy %%r30, %0" : "=r"(sp));
memset(&r, 0, sizeof(struct pt_regs));
r.iaoq[0] = (unsigned long) current_text_addr();
r.gr[2] = (unsigned long) __builtin_return_address(0);
r.iaoq[0] = _THIS_IP_;
r.gr[2] = _RET_IP_;
r.gr[30] = sp;
unwind_frame_init(&info, current, &r);

View File

@ -64,9 +64,6 @@
*/
ENTRY_CFI(lclear_user)
.proc
.callinfo NO_CALLS
.entry
comib,=,n 0,%r25,$lclu_done
get_sr
$lclu_loop:
@ -81,13 +78,9 @@ $lclu_done:
ldo 1(%r25),%r25
ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
.exit
ENDPROC_CFI(lclear_user)
.procend
/*
* long lstrnlen_user(char *s, long n)
*
@ -97,9 +90,6 @@ ENDPROC_CFI(lclear_user)
*/
ENTRY_CFI(lstrnlen_user)
.proc
.callinfo NO_CALLS
.entry
comib,= 0,%r25,$lslen_nzero
copy %r26,%r24
get_sr
@ -111,7 +101,6 @@ $lslen_loop:
$lslen_done:
bv %r0(%r2)
sub %r26,%r24,%r28
.exit
$lslen_nzero:
b $lslen_done
@ -125,9 +114,6 @@ $lslen_nzero:
ENDPROC_CFI(lstrnlen_user)
.procend
/*
* unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
@ -186,10 +172,6 @@ ENDPROC_CFI(lstrnlen_user)
save_len = r31
ENTRY_CFI(pa_memcpy)
.proc
.callinfo NO_CALLS
.entry
/* Last destination address */
add dst,len,end
@ -439,9 +421,6 @@ ENTRY_CFI(pa_memcpy)
b .Lcopy_done
10: stw,ma t1,4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
.exit
ENDPROC_CFI(pa_memcpy)
.procend
.end

View File

@ -19,7 +19,6 @@
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
#include <linux/initrd.h>
#include <linux/swap.h>
#include <linux/unistd.h>
@ -616,17 +615,13 @@ void __init mem_init(void)
free_all_bootmem();
#ifdef CONFIG_PA11
if (hppa_dma_ops == &pcxl_dma_ops) {
if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
+ PCXL_DMA_MAP_SIZE);
} else {
pcxl_dma_start = 0;
parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
}
#else
parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
} else
#endif
parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
mem_init_print_info(NULL);

View File

@ -85,15 +85,6 @@ static const struct k_clock clock_realtime, clock_monotonic;
#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
#endif
/*
* parisc wants ENOTSUP instead of EOPNOTSUPP
*/
#ifndef ENOTSUP
# define ENANOSLEEP_NOTSUP EOPNOTSUPP
#else
# define ENANOSLEEP_NOTSUP ENOTSUP
#endif
/*
* The timer ID is turned into a timer address by idr_find().
* Verifying a valid ID consists of:
@ -1231,7 +1222,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
if (!kc)
return -EINVAL;
if (!kc->nsleep)
return -ENANOSLEEP_NOTSUP;
return -EOPNOTSUPP;
if (get_timespec64(&t, rqtp))
return -EFAULT;
@ -1258,7 +1249,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
if (!kc)
return -EINVAL;
if (!kc->nsleep)
return -ENANOSLEEP_NOTSUP;
return -EOPNOTSUPP;
if (compat_get_timespec64(&t, rqtp))
return -EFAULT;

View File

@ -113,7 +113,6 @@
#define ELOOP 249 /* Too many symbolic links encountered */
#define ENOSYS 251 /* Function not implemented */
#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */