1
0
Fork 0

Merge branch 'parisc-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:
 "Many great new features, fixes and optimizations, including:

   - Convert page table updates to use per-pagetable spinlocks which
     overall improves performance on SMP machines a lot, by Mikulas
     Patocka

   - Kernel debugger (KGDB) support, by Sven Schnelle

   - KPROBES support, by Sven Schnelle

   - Lots of TLB lock/flush improvements, by Dave Anglin

   - Drop DISCONTIGMEM and switch to SPARSEMEM

   - Added JUMP_LABEL, branch runtime-patching support

   - Lots of other small speedups and cleanups, e.g. for QEMU, stack
     randomization, avoidance of name clashes, documentation updates,
     etc ..."

* 'parisc-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: (28 commits)
  parisc: Add static branch and JUMP_LABEL feature
  parisc: Use PA_ASM_LEVEL in boot code
  parisc: Rename LEVEL to PA_ASM_LEVEL to avoid name clash with DRBD code
  parisc: Update huge TLB page support to use per-pagetable spinlock
  parisc: Use per-pagetable spinlock
  parisc: Allow live-patching of __meminit functions
  parisc: Add memory barrier to asm pdc and sync instructions
  parisc: Add memory clobber to TLB purges
  parisc: Use ldcw instruction for SMP spinlock release barrier
  parisc: Remove lock code to serialize TLB operations in pacache.S
  parisc: Switch from DISCONTIGMEM to SPARSEMEM
  parisc: enable wide mode early
  parisc: update feature lists
  parisc: Show n/a if product number not available
  parisc: remove unused flags parameter in __patch_text()
  doc: update kprobes supported architecture list
  parisc: Implement kretprobes
  parisc: remove kprobes.h from generic-y
  parisc: Implement kprobes
  parisc: add functions required by KPROBE_EVENTS
  ...
hifive-unleashed-5.2
Linus Torvalds 2019-05-07 19:34:17 -07:00
commit d3511f53bb
50 changed files with 1258 additions and 295 deletions

View File

@ -21,7 +21,7 @@
| nds32: | TODO |
| nios2: | ok |
| openrisc: | TODO |
| parisc: | TODO |
| parisc: | ok |
| powerpc: | ok |
| riscv: | TODO |
| s390: | TODO |

View File

@ -21,7 +21,7 @@
| nds32: | TODO |
| nios2: | TODO |
| openrisc: | TODO |
| parisc: | TODO |
| parisc: | ok |
| powerpc: | ok |
| riscv: | ok |
| s390: | ok |

View File

@ -21,7 +21,7 @@
| nds32: | TODO |
| nios2: | TODO |
| openrisc: | TODO |
| parisc: | TODO |
| parisc: | ok |
| powerpc: | ok |
| riscv: | TODO |
| s390: | ok |

View File

@ -321,6 +321,7 @@ architectures:
- ppc
- mips
- s390
- parisc
Configuring Kprobes
===================

View File

@ -36,6 +36,7 @@ config PARISC
select GENERIC_STRNCPY_FROM_USER
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE
select ARCH_DISCARD_MEMBLOCK
select HAVE_MOD_ARCH_SPECIFIC
select VIRT_TO_BUS
select MODULES_USE_ELF_RELA
@ -44,6 +45,8 @@ config PARISC
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HASH
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_REGS_AND_STACK_ACCESS_API
@ -54,6 +57,9 @@ config PARISC
select CPU_NO_EFFICIENT_FFS
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
select HAVE_ARCH_KGDB
select HAVE_KPROBES
select HAVE_KRETPROBES
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
@ -305,21 +311,16 @@ config ARCH_SELECT_MEMORY_MODEL
def_bool y
depends on 64BIT
config ARCH_DISCONTIGMEM_ENABLE
config ARCH_SPARSEMEM_ENABLE
def_bool y
depends on 64BIT
config ARCH_FLATMEM_ENABLE
def_bool y
config ARCH_DISCONTIGMEM_DEFAULT
config ARCH_SPARSEMEM_DEFAULT
def_bool y
depends on ARCH_DISCONTIGMEM_ENABLE
config NODES_SHIFT
int
default "3"
depends on NEED_MULTIPLE_NODES
depends on ARCH_SPARSEMEM_ENABLE
source "kernel/Kconfig.hz"

View File

@ -22,7 +22,7 @@
__HEAD
ENTRY(startup)
.level LEVEL
.level PA_ASM_LEVEL
#define PSW_W_SM 0x200
#define PSW_W_BIT 36
@ -63,7 +63,7 @@ $bss_loop:
load32 BOOTADDR(decompress_kernel),%r3
#ifdef CONFIG_64BIT
.level LEVEL
.level PA_ASM_LEVEL
ssm PSW_W_SM, %r0 /* set W-bit */
depdi 0, 31, 32, %r3
#endif
@ -72,7 +72,7 @@ $bss_loop:
startup_continue:
#ifdef CONFIG_64BIT
.level LEVEL
.level PA_ASM_LEVEL
rsm PSW_W_SM, %r0 /* clear W-bit */
#endif

View File

@ -145,14 +145,13 @@ static int putchar(int c)
void __noreturn error(char *x)
{
puts("\n\n");
puts(x);
puts("\n\n -- System halted");
if (x) puts(x);
puts("\n -- System halted\n");
while (1) /* wait forever */
;
}
static int print_hex(unsigned long num)
static int print_num(unsigned long num, int base)
{
const char hex[] = "0123456789abcdef";
char str[40];
@ -160,12 +159,14 @@ static int print_hex(unsigned long num)
str[i--] = '\0';
do {
str[i--] = hex[num & 0x0f];
num >>= 4;
str[i--] = hex[num % base];
num = num / base;
} while (num);
str[i--] = 'x';
str[i] = '0';
if (base == 16) {
str[i--] = 'x';
str[i] = '0';
} else i++;
puts(&str[i]);
return 0;
@ -187,8 +188,9 @@ put:
if (fmt[++i] == '%')
goto put;
print_num(va_arg(args, unsigned long),
fmt[i] == 'x' ? 16:10);
++i;
print_hex(va_arg(args, unsigned long));
}
va_end(args);
@ -327,8 +329,15 @@ unsigned long decompress_kernel(unsigned int started_wide,
free_mem_end_ptr = rd_start;
#endif
if (free_mem_ptr >= free_mem_end_ptr)
error("Kernel too big for machine.");
if (free_mem_ptr >= free_mem_end_ptr) {
int free_ram;
free_ram = (free_mem_ptr >> 20) + 1;
if (free_ram < 32)
free_ram = 32;
printf("\nKernel requires at least %d MB RAM.\n",
free_ram);
error(NULL);
}
#ifdef DEBUG
printf("\n");

View File

@ -10,7 +10,6 @@ generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kprobes.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h

View File

@ -61,14 +61,14 @@
#define LDCW ldcw,co
#define BL b,l
# ifdef CONFIG_64BIT
# define LEVEL 2.0w
# define PA_ASM_LEVEL 2.0w
# else
# define LEVEL 2.0
# define PA_ASM_LEVEL 2.0
# endif
#else
#define LDCW ldcw
#define BL bl
#define LEVEL 1.1
#define PA_ASM_LEVEL 1.1
#endif
#ifdef __ASSEMBLY__

View File

@ -44,22 +44,22 @@ void parisc_setup_cache_timing(void);
#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
: : "r" (addr))
: : "r" (addr) : "memory")
#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \
: : "r" (addr))
: : "r" (addr) : "memory")
#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
: : "r" (addr))
: : "r" (addr) : "memory")
#define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \
ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) \
: : "r" (addr))
: : "r" (addr) : "memory")
#define asm_io_sync() asm volatile("sync" \
ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :: )
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory")
#endif /* ! __ASSEMBLY__ */

View File

@ -15,17 +15,34 @@
* from areas congruently mapped with user space. It is 8MB large
* and must be 16MB aligned */
#define TMPALIAS_MAP_START ((__PAGE_OFFSET) - 16*1024*1024)
#define FIXMAP_SIZE (FIX_BITMAP_COUNT << PAGE_SHIFT)
#define FIXMAP_START (TMPALIAS_MAP_START - FIXMAP_SIZE)
/* This is the kernel area for all maps (vmalloc, dma etc.) most
* usually, it extends up to TMPALIAS_MAP_START. Virtual addresses
* 0..GATEWAY_PAGE_SIZE are reserved for the gateway page */
#define KERNEL_MAP_START (GATEWAY_PAGE_SIZE)
#define KERNEL_MAP_END (TMPALIAS_MAP_START)
#define KERNEL_MAP_END (FIXMAP_START)
#ifndef __ASSEMBLY__
enum fixed_addresses {
/* Support writing RO kernel text via kprobes, jump labels, etc. */
FIX_TEXT_POKE0,
FIX_BITMAP_COUNT
};
extern void *parisc_vmalloc_start;
#define PCXL_DMA_MAP_SIZE (8*1024*1024)
#define VMALLOC_START ((unsigned long)parisc_vmalloc_start)
#define VMALLOC_END (KERNEL_MAP_END)
#define __fix_to_virt(_x) (FIXMAP_START + ((_x) << PAGE_SHIFT))
void set_fixmap(enum fixed_addresses idx, phys_addr_t phys);
void clear_fixmap(enum fixed_addresses idx);
#endif /*__ASSEMBLY__*/
#endif /*_ASM_FIXMAP_H*/

View File

@ -120,7 +120,7 @@ extern void get_pci_node_path(struct pci_dev *dev, struct hardware_path *path);
extern void init_parisc_bus(void);
extern struct device *hwpath_to_device(struct hardware_path *modpath);
extern void device_to_hwpath(struct device *dev, struct hardware_path *path);
extern int machine_has_merced_bus(void);
/* inventory.c: */
extern void do_memory_inventory(void);

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_PARISC_JUMP_LABEL_H
#define _ASM_PARISC_JUMP_LABEL_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <asm/assembly.h>
#define JUMP_LABEL_NOP_SIZE 4
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
"b,n %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
return true;
}
#endif /* __ASSEMBLY__ */
#endif

View File

@ -0,0 +1,68 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* PA-RISC KGDB support
*
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
*
*/
#ifndef __PARISC_KGDB_H__
#define __PARISC_KGDB_H__
#define BREAK_INSTR_SIZE 4
#define PARISC_KGDB_COMPILED_BREAK_INSN 0x3ffc01f
#define PARISC_KGDB_BREAK_INSN 0x3ffa01f
#define NUMREGBYTES sizeof(struct parisc_gdb_regs)
#define BUFMAX 4096
#define CACHE_FLUSH_IS_SAFE 1
#ifndef __ASSEMBLY__
static inline void arch_kgdb_breakpoint(void)
{
asm(".word %0" : : "i"(PARISC_KGDB_COMPILED_BREAK_INSN) : "memory");
}
struct parisc_gdb_regs {
unsigned long gpr[32];
unsigned long sar;
unsigned long iaoq_f;
unsigned long iasq_f;
unsigned long iaoq_b;
unsigned long iasq_b;
unsigned long eiem;
unsigned long iir;
unsigned long isr;
unsigned long ior;
unsigned long ipsw;
unsigned long __unused0;
unsigned long sr4;
unsigned long sr0;
unsigned long sr1;
unsigned long sr2;
unsigned long sr3;
unsigned long sr5;
unsigned long sr6;
unsigned long sr7;
unsigned long cr0;
unsigned long pid1;
unsigned long pid2;
unsigned long scrccr;
unsigned long pid3;
unsigned long pid4;
unsigned long cr24;
unsigned long cr25;
unsigned long cr26;
unsigned long cr27;
unsigned long cr28;
unsigned long cr29;
unsigned long cr30;
u64 fr[32];
};
#endif
#endif

View File

@ -0,0 +1,55 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/parisc/include/asm/kprobes.h
*
* PA-RISC kprobes implementation
*
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
*/
#ifndef _PARISC_KPROBES_H
#define _PARISC_KPROBES_H
#ifdef CONFIG_KPROBES
#include <asm-generic/kprobes.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/notifier.h>
#define PARISC_KPROBES_BREAK_INSN 0x3ff801f
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 1
typedef u32 kprobe_opcode_t;
struct kprobe;
void arch_remove_kprobe(struct kprobe *p);
#define flush_insn_slot(p) \
flush_icache_range((unsigned long)&(p)->ainsn.insn[0], \
(unsigned long)&(p)->ainsn.insn[0] + \
sizeof(kprobe_opcode_t))
#define kretprobe_blacklist_size 0
struct arch_specific_insn {
kprobe_opcode_t *insn;
};
struct prev_kprobe {
struct kprobe *kp;
unsigned long status;
};
struct kprobe_ctlblk {
unsigned int kprobe_status;
struct prev_kprobe prev_kprobe;
unsigned long iaoq[2];
};
int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs);
int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs);
#endif /* CONFIG_KPROBES */
#endif /* _PARISC_KPROBES_H */

View File

@ -2,62 +2,6 @@
#ifndef _PARISC_MMZONE_H
#define _PARISC_MMZONE_H
#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
#define MAX_PHYSMEM_RANGES 4 /* Fix the size for now (current known max is 3) */
#ifdef CONFIG_DISCONTIGMEM
extern int npmem_ranges;
struct node_map_data {
pg_data_t pg_data;
};
extern struct node_map_data node_data[];
#define NODE_DATA(nid) (&node_data[nid].pg_data)
/* We have these possible memory map layouts:
* Astro: 0-3.75, 67.75-68, 4-64
* zx1: 0-1, 257-260, 4-256
* Stretch (N-class): 0-2, 4-32, 34-xxx
*/
/* Since each 1GB can only belong to one region (node), we can create
* an index table for pfn to nid lookup; each entry in pfnnid_map
* represents 1GB, and contains the node that the memory belongs to. */
#define PFNNID_SHIFT (30 - PAGE_SHIFT)
#define PFNNID_MAP_MAX 512 /* support 512GB */
extern signed char pfnnid_map[PFNNID_MAP_MAX];
#ifndef CONFIG_64BIT
#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
#else
/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */
#define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
#endif
static inline int pfn_to_nid(unsigned long pfn)
{
unsigned int i;
if (unlikely(pfn_is_io(pfn)))
return 0;
i = pfn >> PFNNID_SHIFT;
BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
return pfnnid_map[i];
}
static inline int pfn_valid(int pfn)
{
int nid = pfn_to_nid(pfn);
if (nid >= 0)
return (pfn < node_end_pfn(nid));
return 0;
}
#endif
#endif /* _PARISC_MMZONE_H */

View File

@ -147,9 +147,9 @@ extern int npmem_ranges;
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#ifndef CONFIG_DISCONTIGMEM
#ifndef CONFIG_SPARSEMEM
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* CONFIG_DISCONTIGMEM */
#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PARISC_KERNEL_PATCH_H
#define _PARISC_KERNEL_PATCH_H
/* stop machine and patch kernel text */
void patch_text(void *addr, unsigned int insn);
/* patch kernel text with machine already stopped (e.g. in kgdb) */
void __patch_text(void *addr, unsigned int insn);
#endif

View File

@ -41,6 +41,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
#endif
}
spin_lock_init(pgd_spinlock(actual_pgd));
return actual_pgd;
}

View File

@ -17,7 +17,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
extern spinlock_t pa_tlb_lock;
static inline spinlock_t *pgd_spinlock(pgd_t *);
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
@ -34,16 +34,46 @@ extern spinlock_t pa_tlb_lock;
*/
#define kern_addr_valid(addr) (1)
/* Purge data and instruction TLB entries. Must be called holding
* the pa_tlb_lock. The TLB purge instructions are slow on SMP
* machines since the purge must be broadcast to all CPUs.
/* This is for the serialization of PxTLB broadcasts. At least on the N class
* systems, only one PxTLB inter processor broadcast can be active at any one
* time on the Merced bus.
* PTE updates are protected by locks in the PMD.
*/
extern spinlock_t pa_tlb_flush_lock;
extern spinlock_t pa_swapper_pg_lock;
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
extern int pa_serialize_tlb_flushes;
#else
#define pa_serialize_tlb_flushes (0)
#endif
#define purge_tlb_start(flags) do { \
if (pa_serialize_tlb_flushes) \
spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
else \
local_irq_save(flags); \
} while (0)
#define purge_tlb_end(flags) do { \
if (pa_serialize_tlb_flushes) \
spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
else \
local_irq_restore(flags); \
} while (0)
/* Purge data and instruction TLB entries. The TLB purge instructions
* are slow on SMP machines since the purge must be broadcast to all CPUs.
*/
static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
unsigned long flags;
purge_tlb_start(flags);
mtsp(mm->context, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);
}
/* Certain architectures need to do special things when PTEs
@ -59,11 +89,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
do { \
pte_t old_pte; \
unsigned long flags; \
spin_lock_irqsave(&pa_tlb_lock, flags); \
spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
old_pte = *ptep; \
set_pte(ptep, pteval); \
purge_tlb_entries(mm, addr); \
spin_unlock_irqrestore(&pa_tlb_lock, flags); \
spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
} while (0)
#endif /* !__ASSEMBLY__ */
@ -88,10 +118,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#if CONFIG_PGTABLE_LEVELS == 3
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PMD_ORDER 1 /* Number of pages per pmd */
#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */
#define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */
#else
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PGD_ALLOC_ORDER PGD_ORDER
#define PGD_ALLOC_ORDER (PGD_ORDER + 1)
#endif
/* Definitions for 3rd level (we use PLD here for Page Lower directory
@ -459,6 +489,15 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
static inline spinlock_t *pgd_spinlock(pgd_t *pgd)
{
if (unlikely(pgd == swapper_pg_dir))
return &pa_swapper_pg_lock;
return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1)));
}
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
pte_t pte;
@ -467,15 +506,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
if (!pte_young(*ptep))
return 0;
spin_lock_irqsave(&pa_tlb_lock, flags);
spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags);
pte = *ptep;
if (!pte_young(pte)) {
spin_unlock_irqrestore(&pa_tlb_lock, flags);
spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
return 0;
}
set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags);
spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
return 1;
}
@ -485,11 +524,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t old_pte;
unsigned long flags;
spin_lock_irqsave(&pa_tlb_lock, flags);
spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
old_pte = *ptep;
set_pte(ptep, __pte(0));
purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags);
spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
return old_pte;
}
@ -497,10 +536,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
unsigned long flags;
spin_lock_irqsave(&pa_tlb_lock, flags);
spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags);
spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))

View File

@ -37,4 +37,17 @@ extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ipsw))
#define kernel_stack_pointer(regs) ((regs)->gr[30])
static inline unsigned long regs_get_register(struct pt_regs *regs,
unsigned int offset)
{
if (unlikely(offset > MAX_REG_OFFSET))
return 0;
return *(unsigned long *)((unsigned long)regs + offset);
}
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
#endif

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASM_PARISC_SPARSEMEM_H
#define ASM_PARISC_SPARSEMEM_H
/* We have these possible memory map layouts:
* Astro: 0-3.75, 67.75-68, 4-64
* zx1: 0-1, 257-260, 4-256
* Stretch (N-class): 0-2, 4-32, 34-xxx
*/
#define MAX_PHYSMEM_BITS 39 /* 512 GB */
#define SECTION_SIZE_BITS 27 /* 128 MB */
#endif

View File

@ -37,7 +37,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
volatile unsigned int *a;
a = __ldcw_align(x);
#ifdef CONFIG_SMP
(void) __ldcw(a);
#else
mb();
#endif
*a = 1;
}

View File

@ -8,21 +8,6 @@
#include <linux/sched.h>
#include <asm/mmu_context.h>
/* This is for the serialisation of PxTLB broadcasts. At least on the
* N class systems, only one PxTLB inter processor broadcast can be
* active at any one time on the Merced bus. This tlb purge
* synchronisation is fairly lightweight and harmless so we activate
* it on all systems not just the N class.
* It is also used to ensure PTE updates are atomic and consistent
* with the TLB.
*/
extern spinlock_t pa_tlb_lock;
#define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags)
#define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags)
extern void flush_tlb_all(void);
extern void flush_tlb_all_local(void *);
@ -79,13 +64,6 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
unsigned long flags, sid;
sid = vma->vm_mm->context;
purge_tlb_start(flags);
mtsp(sid, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);
purge_tlb_entries(vma->vm_mm, addr);
}
#endif

View File

@ -9,7 +9,8 @@ obj-y := cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
ptrace.o hardware.o inventory.o drivers.o alternative.o \
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
patch.o
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
@ -32,3 +33,6 @@ obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y)
obj-$(CONFIG_PARISC_CPU_TOPOLOGY) += topology.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KPROBES) += kprobes.o

View File

@ -40,12 +40,19 @@ void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
/* On some machines (e.g. ones with the Merced bus), there can be
/* On some machines (i.e., ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We put a spinlock around all TLB flushes to
* ensure this.
* by software. We need a spinlock around all TLB flushes to ensure
* this.
*/
DEFINE_SPINLOCK(pa_tlb_lock);
DEFINE_SPINLOCK(pa_tlb_flush_lock);
/* Swapper page setup lock. */
DEFINE_SPINLOCK(pa_swapper_pg_lock);
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
int pa_serialize_tlb_flushes __read_mostly;
#endif
struct pdc_cache_info cache_info __read_mostly;
#ifndef CONFIG_PA20

View File

@ -38,6 +38,7 @@
#include <asm/io.h>
#include <asm/pdc.h>
#include <asm/parisc-device.h>
#include <asm/ropes.h>
/* See comments in include/asm-parisc/pci.h */
const struct dma_map_ops *hppa_dma_ops __read_mostly;
@ -257,6 +258,30 @@ static struct parisc_device *find_device_by_addr(unsigned long hpa)
return ret ? d.dev : NULL;
}
static int __init is_IKE_device(struct device *dev, void *data)
{
struct parisc_device *pdev = to_parisc_device(dev);
if (!check_dev(dev))
return 0;
if (pdev->id.hw_type != HPHW_BCPORT)
return 0;
if (IS_IKE(pdev) ||
(pdev->id.hversion == REO_MERCED_PORT) ||
(pdev->id.hversion == REOG_MERCED_PORT)) {
return 1;
}
return 0;
}
int __init machine_has_merced_bus(void)
{
int ret;
ret = for_each_padev(is_IKE_device, NULL);
return ret ? 1 : 0;
}
/**
* find_pa_parent_type - Find a parent of a specific type
* @dev: The device to start searching from

View File

@ -50,12 +50,8 @@
.import pa_tlb_lock,data
.macro load_pa_tlb_lock reg
#if __PA_LDCW_ALIGNMENT > 4
load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
#else
load32 PA(pa_tlb_lock), \reg
#endif
mfctl %cr25,\reg
addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg
.endm
/* space_to_prot macro creates a prot id from a space id */
@ -471,8 +467,9 @@
nop
LDREG 0(\ptp),\pte
bb,<,n \pte,_PAGE_PRESENT_BIT,3f
LDCW 0(\tmp),\tmp1
b \fault
stw,ma \spc,0(\tmp)
stw \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
2: LDREG 0(\ptp),\pte
@ -481,20 +478,22 @@
.endm
/* Release pa_tlb_lock lock without reloading lock address. */
.macro tlb_unlock0 spc,tmp
.macro tlb_unlock0 spc,tmp,tmp1
#ifdef CONFIG_SMP
98: or,COND(=) %r0,\spc,%r0
stw,ma \spc,0(\tmp)
LDCW 0(\tmp),\tmp1
or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
/* Release pa_tlb_lock lock. */
.macro tlb_unlock1 spc,tmp
.macro tlb_unlock1 spc,tmp,tmp1
#ifdef CONFIG_SMP
98: load_pa_tlb_lock \tmp
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
tlb_unlock0 \spc,\tmp
tlb_unlock0 \spc,\tmp,\tmp1
#endif
.endm
@ -1177,7 +1176,7 @@ dtlb_miss_20w:
idtlbt pte,prot
tlb_unlock1 spc,t0
tlb_unlock1 spc,t0,t1
rfir
nop
@ -1203,7 +1202,7 @@ nadtlb_miss_20w:
idtlbt pte,prot
tlb_unlock1 spc,t0
tlb_unlock1 spc,t0,t1
rfir
nop
@ -1237,7 +1236,7 @@ dtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0
tlb_unlock1 spc,t0,t1
rfir
nop
@ -1270,7 +1269,7 @@ nadtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0
tlb_unlock1 spc,t0,t1
rfir
nop
@ -1299,7 +1298,7 @@ dtlb_miss_20:
idtlbt pte,prot
tlb_unlock1 spc,t0
tlb_unlock1 spc,t0,t1
rfir
nop
@ -1327,7 +1326,7 @@ nadtlb_miss_20:
idtlbt pte,prot
tlb_unlock1 spc,t0
tlb_unlock1 spc,t0,t1
rfir
nop
@ -1434,7 +1433,7 @@ itlb_miss_20w:
iitlbt pte,prot
tlb_unlock1 spc,t0
tlb_unlock1 spc,t0,t1
rfir
nop
@ -1458,7 +1457,7 @@ naitlb_miss_20w:
iitlbt pte,prot
tlb_unlock1 spc,t0
tlb_unlock1 spc,t0,t1
rfir
nop
@ -1492,7 +1491,7 @@ itlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0
tlb_unlock1 spc,t0,t1
rfir
nop
@ -1516,7 +1515,7 @@ naitlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0
tlb_unlock1 spc,t0,t1
rfir
nop
@ -1546,7 +1545,7 @@ itlb_miss_20:
iitlbt pte,prot
tlb_unlock1 spc,t0
tlb_unlock1 spc,t0,t1
rfir
nop
@ -1566,7 +1565,7 @@ naitlb_miss_20:
iitlbt pte,prot
tlb_unlock1 spc,t0
tlb_unlock1 spc,t0,t1
rfir
nop
@ -1596,7 +1595,7 @@ dbit_trap_20w:
idtlbt pte,prot
tlb_unlock0 spc,t0
tlb_unlock0 spc,t0,t1
rfir
nop
#else
@ -1622,7 +1621,7 @@ dbit_trap_11:
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock0 spc,t0
tlb_unlock0 spc,t0,t1
rfir
nop
@ -1642,7 +1641,7 @@ dbit_trap_20:
idtlbt pte,prot
tlb_unlock0 spc,t0
tlb_unlock0 spc,t0,t1
rfir
nop
#endif

View File

@ -22,7 +22,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
.level LEVEL
.level PA_ASM_LEVEL
__INITDATA
ENTRY(boot_args)
@ -258,7 +258,7 @@ stext_pdc_ret:
ldo R%PA(fault_vector_11)(%r10),%r10
$is_pa20:
.level LEVEL /* restore 1.1 || 2.0w */
.level PA_ASM_LEVEL /* restore 1.1 || 2.0w */
#endif /*!CONFIG_64BIT*/
load32 PA(fault_vector_20),%r10
@ -329,6 +329,19 @@ smp_slave_stext:
mtsp %r0,%sr6
mtsp %r0,%sr7
#ifdef CONFIG_64BIT
/*
* Enable Wide mode early, in case the task_struct for the idle
* task in smp_init_current_idle_task was allocated above 4GB.
*/
1: mfia %rp /* clear upper part of pcoq */
ldo 2f-1b(%rp),%rp
depdi 0,31,32,%rp
bv (%rp)
ssm PSW_SM_W,%r0
2:
#endif
/* Initialize the SP - monarch sets up smp_init_current_idle_task */
load32 PA(smp_init_current_idle_task),%sp
LDREG 0(%sp),%sp /* load task address */

View File

@ -31,6 +31,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/parisc-device.h>
#include <asm/tlbflush.h>
/*
** Debug options
@ -638,4 +639,10 @@ void __init do_device_inventory(void)
}
printk(KERN_INFO "Found devices:\n");
print_parisc_devices();
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
pa_serialize_tlb_flushes = machine_has_merced_bus();
if (pa_serialize_tlb_flushes)
pr_info("Merced bus found: Enable PxTLB serialization.\n");
#endif
}

View File

@ -0,0 +1,55 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 Helge Deller <deller@gmx.de>
*
* Based on arch/arm64/kernel/jump_label.c
*/
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <linux/bug.h>
#include <asm/alternative.h>
#include <asm/patch.h>
static inline int reassemble_17(int as17)
{
return (((as17 & 0x10000) >> 16) |
((as17 & 0x0f800) << 5) |
((as17 & 0x00400) >> 8) |
((as17 & 0x003ff) << 3));
}
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
void *addr = (void *)jump_entry_code(entry);
u32 insn;
if (type == JUMP_LABEL_JMP) {
void *target = (void *)jump_entry_target(entry);
int distance = target - addr;
/*
* Encode the PA1.1 "b,n" instruction with a 17-bit
* displacement. In case we hit the BUG(), we could use
* another branch instruction with a 22-bit displacement on
* 64-bit CPUs instead. But this seems sufficient for now.
*/
distance -= 8;
BUG_ON(distance > 262143 || distance < -262144);
insn = 0xe8000002 | reassemble_17(distance >> 2);
} else {
insn = INSN_NOP;
}
patch_text(addr, insn);
}
void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
/*
* We use the architected NOP in arch_static_branch, so there's no
* need to patch an identical NOP over the top of it here. The core
* will call arch_jump_label_transform from a module notifier if the
* NOP needs to be replaced by a branch.
*/
}

View File

@ -0,0 +1,209 @@
// SPDX-License-Identifier: GPL-2.0
/*
* PA-RISC KGDB support
*
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
*
*/
#include <linux/kgdb.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/notifier.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <asm/ptrace.h>
#include <asm/traps.h>
#include <asm/processor.h>
#include <asm/patch.h>
#include <asm/cacheflush.h>
const struct kgdb_arch arch_kgdb_ops = {
.gdb_bpt_instr = { 0x03, 0xff, 0xa0, 0x1f }
};
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;
if (kgdb_handle_exception(1, args->signr, cmd, regs))
return NOTIFY_DONE;
return NOTIFY_STOP;
}
static int kgdb_notify(struct notifier_block *self,
unsigned long cmd, void *ptr)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = __kgdb_notify(ptr, cmd);
local_irq_restore(flags);
return ret;
}
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify,
.priority = -INT_MAX,
};
int kgdb_arch_init(void)
{
return register_die_notifier(&kgdb_notifier);
}
void kgdb_arch_exit(void)
{
unregister_die_notifier(&kgdb_notifier);
}
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
struct parisc_gdb_regs *gr = (struct parisc_gdb_regs *)gdb_regs;
memset(gr, 0, sizeof(struct parisc_gdb_regs));
memcpy(gr->gpr, regs->gr, sizeof(gr->gpr));
memcpy(gr->fr, regs->fr, sizeof(gr->fr));
gr->sr0 = regs->sr[0];
gr->sr1 = regs->sr[1];
gr->sr2 = regs->sr[2];
gr->sr3 = regs->sr[3];
gr->sr4 = regs->sr[4];
gr->sr5 = regs->sr[5];
gr->sr6 = regs->sr[6];
gr->sr7 = regs->sr[7];
gr->sar = regs->sar;
gr->iir = regs->iir;
gr->isr = regs->isr;
gr->ior = regs->ior;
gr->ipsw = regs->ipsw;
gr->cr27 = regs->cr27;
gr->iaoq_f = regs->iaoq[0];
gr->iasq_f = regs->iasq[0];
gr->iaoq_b = regs->iaoq[1];
gr->iasq_b = regs->iasq[1];
}
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
struct parisc_gdb_regs *gr = (struct parisc_gdb_regs *)gdb_regs;
memcpy(regs->gr, gr->gpr, sizeof(regs->gr));
memcpy(regs->fr, gr->fr, sizeof(regs->fr));
regs->sr[0] = gr->sr0;
regs->sr[1] = gr->sr1;
regs->sr[2] = gr->sr2;
regs->sr[3] = gr->sr3;
regs->sr[4] = gr->sr4;
regs->sr[5] = gr->sr5;
regs->sr[6] = gr->sr6;
regs->sr[7] = gr->sr7;
regs->sar = gr->sar;
regs->iir = gr->iir;
regs->isr = gr->isr;
regs->ior = gr->ior;
regs->ipsw = gr->ipsw;
regs->cr27 = gr->cr27;
regs->iaoq[0] = gr->iaoq_f;
regs->iasq[0] = gr->iasq_f;
regs->iaoq[1] = gr->iaoq_b;
regs->iasq[1] = gr->iasq_b;
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
struct task_struct *task)
{
struct pt_regs *regs = task_pt_regs(task);
unsigned long gr30, iaoq;
gr30 = regs->gr[30];
iaoq = regs->iaoq[0];
regs->gr[30] = regs->ksp;
regs->iaoq[0] = regs->kpc;
pt_regs_to_gdb_regs(gdb_regs, regs);
regs->gr[30] = gr30;
regs->iaoq[0] = iaoq;
}
static void step_instruction_queue(struct pt_regs *regs)
{
regs->iaoq[0] = regs->iaoq[1];
regs->iaoq[1] += 4;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->iaoq[0] = ip;
regs->iaoq[1] = ip + 4;
}
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
int ret = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
BREAK_INSTR_SIZE);
if (ret)
return ret;
__patch_text((void *)bpt->bpt_addr,
*(unsigned int *)&arch_kgdb_ops.gdb_bpt_instr);
return ret;
}
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
__patch_text((void *)bpt->bpt_addr, *(unsigned int *)&bpt->saved_instr);
return 0;
}
int kgdb_arch_handle_exception(int trap, int signo,
int err_code, char *inbuf, char *outbuf,
struct pt_regs *regs)
{
unsigned long addr;
char *p = inbuf + 1;
switch (inbuf[0]) {
case 'D':
case 'c':
case 'k':
kgdb_contthread = NULL;
kgdb_single_step = 0;
if (kgdb_hex2long(&p, &addr))
kgdb_arch_set_pc(regs, addr);
else if (trap == 9 && regs->iir ==
PARISC_KGDB_COMPILED_BREAK_INSN)
step_instruction_queue(regs);
return 0;
case 's':
kgdb_single_step = 1;
if (kgdb_hex2long(&p, &addr)) {
kgdb_arch_set_pc(regs, addr);
} else if (trap == 9 && regs->iir ==
PARISC_KGDB_COMPILED_BREAK_INSN) {
step_instruction_queue(regs);
mtctl(-1, 0);
} else {
mtctl(0, 0);
}
regs->gr[0] |= PSW_R;
return 0;
}
return -1;
}

View File

@ -0,0 +1,291 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/parisc/kernel/kprobes.c
*
* PA-RISC kprobes implementation
*
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
*/
#include <linux/types.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <asm/patch.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
if ((unsigned long)p->addr & 3UL)
return -EINVAL;
p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn)
return -ENOMEM;
memcpy(p->ainsn.insn, p->addr,
MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
flush_insn_slot(p);
return 0;
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
if (!p->ainsn.insn)
return;
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
patch_text(p->addr, PARISC_KPROBES_BREAK_INSN);
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
patch_text(p->addr, p->opcode);
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static inline void __kprobes set_current_kprobe(struct kprobe *p)
{
__this_cpu_write(current_kprobe, p);
}
static void __kprobes setup_singlestep(struct kprobe *p,
struct kprobe_ctlblk *kcb, struct pt_regs *regs)
{
kcb->iaoq[0] = regs->iaoq[0];
kcb->iaoq[1] = regs->iaoq[1];
regs->iaoq[0] = (unsigned long)p->ainsn.insn;
mtctl(0, 0);
regs->gr[0] |= PSW_R;
}
int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
preempt_disable();
kcb = get_kprobe_ctlblk();
p = get_kprobe((unsigned long *)regs->iaoq[0]);
if (!p) {
preempt_enable_no_resched();
return 0;
}
if (kprobe_running()) {
/*
* We have reentered the kprobe_handler, since another kprobe
* was hit while within the handler, we save the original
* kprobes and single step on the instruction of the new probe
* without calling any user handlers to avoid recursive
* kprobes.
*/
save_previous_kprobe(kcb);
set_current_kprobe(p);
kprobes_inc_nmissed_count(p);
setup_singlestep(p, kcb, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
}
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
/* If we have no pre-handler or it returned 0, we continue with
* normal processing. If we have a pre-handler and it returned
* non-zero - which means user handler setup registers to exit
* to another instruction, we must skip the single stepping.
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
setup_singlestep(p, kcb, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
} else {
reset_current_kprobe();
preempt_enable_no_resched();
}
return 1;
}
int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
struct kprobe *p = kprobe_running();
if (regs->iaoq[0] != (unsigned long)p->ainsn.insn+4)
return 0;
/* restore back original saved kprobe variables and continue */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
return 1;
}
/* for absolute branch instructions we can copy iaoq_b. for relative
* branch instructions we need to calculate the new address based on the
* difference between iaoq_f and iaoq_b. We cannot use iaoq_b without
* modificationt because it's based on our ainsn.insn address.
*/
if (p->post_handler)
p->post_handler(p, regs, 0);
switch (regs->iir >> 26) {
case 0x38: /* BE */
case 0x39: /* BE,L */
case 0x3a: /* BV */
case 0x3b: /* BVE */
/* for absolute branches, regs->iaoq[1] has already the right
* address
*/
regs->iaoq[0] = kcb->iaoq[1];
break;
default:
regs->iaoq[1] = kcb->iaoq[0];
regs->iaoq[1] += (regs->iaoq[1] - regs->iaoq[0]) + 4;
regs->iaoq[0] = kcb->iaoq[1];
break;
}
kcb->kprobe_status = KPROBE_HIT_SSDONE;
reset_current_kprobe();
return 1;
}
static inline void kretprobe_trampoline(void)
{
asm volatile("nop");
asm volatile("nop");
}
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs);
static struct kprobe trampoline_p = {
.pre_handler = trampoline_probe_handler
};
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)trampoline_p.addr;
kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/*
* It is possible to have multiple instances associated with a given
* task either because multiple functions in the call path have
* a return probe installed on them, and/or more than one return
* probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
correct_ret_addr = ri->ret_addr;
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
instruction_pointer_set(regs, orig_ret_address);
return 1;
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->gr[2];
/* Replace the return addr with trampoline addr. */
regs->gr[2] = (unsigned long)trampoline_p.addr;
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
return p->addr == trampoline_p.addr;
}
bool arch_kprobe_on_func_entry(unsigned long offset)
{
return !offset;
}
int __init arch_init_kprobes(void)
{
trampoline_p.addr = (kprobe_opcode_t *)
dereference_function_descriptor(kretprobe_trampoline);
return register_kprobe(&trampoline_p);
}

View File

@ -311,39 +311,6 @@ fdsync:
nop
ENDPROC_CFI(flush_data_cache_local)
/* Macros to serialize TLB purge operations on SMP. */
.macro tlb_lock la,flags,tmp
#ifdef CONFIG_SMP
98:
#if __PA_LDCW_ALIGNMENT > 4
load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
#else
load32 pa_tlb_lock, \la
#endif
rsm PSW_SM_I,\flags
1: LDCW 0(\la),\tmp
cmpib,<>,n 0,\tmp,3f
2: ldw 0(\la),\tmp
cmpb,<> %r0,\tmp,1b
nop
b,n 2b
3:
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
.macro tlb_unlock la,flags,tmp
#ifdef CONFIG_SMP
98: ldi 1,\tmp
sync
stw \tmp,0(\la)
mtsm \flags
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
/* Clear page using kernel mapping. */
ENTRY_CFI(clear_page_asm)
@ -601,10 +568,8 @@ ENTRY_CFI(copy_user_page_asm)
pdtlb,l %r0(%r28)
pdtlb,l %r0(%r29)
#else
tlb_lock %r20,%r21,%r22
0: pdtlb %r0(%r28)
1: pdtlb %r0(%r29)
tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
@ -743,9 +708,7 @@ ENTRY_CFI(clear_user_page_asm)
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
0: pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
@ -821,9 +784,7 @@ ENTRY_CFI(flush_dcache_page_asm)
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
0: pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
@ -882,9 +843,7 @@ ENTRY_CFI(purge_dcache_page_asm)
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
0: pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
@ -948,10 +907,8 @@ ENTRY_CFI(flush_icache_page_asm)
1: pitlb,l %r0(%sr4,%r28)
ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP)
#else
tlb_lock %r20,%r21,%r22
0: pdtlb %r0(%r28)
1: pitlb %r0(%sr4,%r28)
tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB)
ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP)

View File

@ -138,12 +138,6 @@ extern void $$dyncall(void);
EXPORT_SYMBOL($$dyncall);
#endif
#ifdef CONFIG_DISCONTIGMEM
#include <asm/mmzone.h>
EXPORT_SYMBOL(node_data);
EXPORT_SYMBOL(pfnnid_map);
#endif
#ifdef CONFIG_FUNCTION_TRACER
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);

View File

@ -0,0 +1,77 @@
// SPDX-License-Identifier: GPL-2.0
/*
* functions to patch RO kernel text during runtime
*
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/kprobes.h>
#include <linux/mm.h>
#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/patch.h>
struct patch {
void *addr;
unsigned int insn;
};
static void __kprobes *patch_map(void *addr, int fixmap)
{
unsigned long uintaddr = (uintptr_t) addr;
bool module = !core_kernel_text(uintaddr);
struct page *page;
if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
page = vmalloc_to_page(addr);
else if (!module && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
page = virt_to_page(addr);
else
return addr;
set_fixmap(fixmap, page_to_phys(page));
return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
}
static void __kprobes patch_unmap(int fixmap)
{
clear_fixmap(fixmap);
}
void __kprobes __patch_text(void *addr, unsigned int insn)
{
void *waddr = addr;
int size;
waddr = patch_map(addr, FIX_TEXT_POKE0);
*(u32 *)waddr = insn;
size = sizeof(u32);
flush_kernel_vmap_range(waddr, size);
patch_unmap(FIX_TEXT_POKE0);
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);
}
static int __kprobes patch_text_stop_machine(void *data)
{
struct patch *patch = data;
__patch_text(patch->addr, patch->insn);
return 0;
}
void __kprobes patch_text(void *addr, unsigned int insn)
{
struct patch patch = {
.addr = addr,
.insn = insn,
};
stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
}

View File

@ -193,6 +193,7 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
*/
int running_on_qemu __read_mostly;
EXPORT_SYMBOL(running_on_qemu);
void __cpuidle arch_cpu_idle_dead(void)
{

View File

@ -305,7 +305,8 @@ void __init collect_boot_cpu_data(void)
if (pdc_model_platform_info(orig_prod_num, current_prod_num, serial_no) == PDC_OK) {
printk(KERN_INFO "product %s, original product %s, S/N: %s\n",
current_prod_num, orig_prod_num, serial_no);
current_prod_num[0] ? current_prod_num : "n/a",
orig_prod_num, serial_no);
add_device_randomness(orig_prod_num, strlen(orig_prod_num));
add_device_randomness(current_prod_num, strlen(current_prod_num));
add_device_randomness(serial_no, strlen(serial_no));

View File

@ -789,3 +789,38 @@ const char *regs_query_register_name(unsigned int offset)
return roff->name;
return NULL;
}
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr -= n;
if (!regs_within_kernel_stack(regs, (unsigned long)addr))
return 0;
return *addr;
}

View File

@ -343,6 +343,12 @@ static int __init parisc_init(void)
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
/* Don't serialize TLB flushes if we run on one CPU only. */
if (num_online_cpus() == 1)
pa_serialize_tlb_flushes = 0;
#endif
apply_alternatives_all();
parisc_setup_cache_timing();

View File

@ -86,7 +86,8 @@ static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
stack_base = STACK_SIZE_MAX;
/* Add space for stack randomization. */
stack_base += (STACK_RND_MASK << PAGE_SHIFT);
if (current->flags & PF_RANDOMIZE)
stack_base += (STACK_RND_MASK << PAGE_SHIFT);
return PAGE_ALIGN(STACK_TOP - stack_base);
}

View File

@ -48,7 +48,7 @@ registers).
*/
#define KILL_INSN break 0,0
.level LEVEL
.level PA_ASM_LEVEL
.text
@ -640,7 +640,9 @@ cas_action:
sub,<> %r28, %r25, %r0
2: stw %r24, 0(%r26)
/* Free lock */
sync
#ifdef CONFIG_SMP
LDCW 0(%sr2,%r20), %r1 /* Barrier */
#endif
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
@ -655,7 +657,9 @@ cas_action:
3:
/* Error occurred on load or store */
/* Free lock */
sync
#ifdef CONFIG_SMP
LDCW 0(%sr2,%r20), %r1 /* Barrier */
#endif
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
@ -857,7 +861,9 @@ cas2_action:
cas2_end:
/* Free lock */
sync
#ifdef CONFIG_SMP
LDCW 0(%sr2,%r20), %r1 /* Barrier */
#endif
stw %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
@ -868,7 +874,9 @@ cas2_end:
22:
/* Error occurred on load or store */
/* Free lock */
sync
#ifdef CONFIG_SMP
LDCW 0(%sr2,%r20), %r1 /* Barrier */
#endif
stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28

View File

@ -42,6 +42,8 @@
#include <asm/unwind.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <linux/kgdb.h>
#include <linux/kprobes.h>
#include "../math-emu/math-emu.h" /* for handle_fpe() */
@ -293,6 +295,22 @@ static void handle_break(struct pt_regs *regs)
(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
}
#ifdef CONFIG_KPROBES
if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) {
parisc_kprobe_break_handler(regs);
return;
}
#endif
#ifdef CONFIG_KGDB
if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
iir == PARISC_KGDB_BREAK_INSN)) {
kgdb_handle_exception(9, SIGTRAP, 0, regs);
return;
}
#endif
if (unlikely(iir != GDB_BREAK_INSN))
parisc_printk_ratelimited(0, regs,
KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
@ -518,6 +536,19 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
case 3:
/* Recovery counter trap */
regs->gr[0] &= ~PSW_R;
#ifdef CONFIG_KPROBES
if (parisc_kprobe_ss_handler(regs))
return;
#endif
#ifdef CONFIG_KGDB
if (kgdb_single_step) {
kgdb_handle_exception(0, SIGTRAP, 0, regs);
return;
}
#endif
if (user_space(regs))
handle_gdb_break(regs, TRAP_TRACE);
/* else this must be the start of a syscall - just let it run */

View File

@ -18,6 +18,9 @@
*(.data..vm0.pgd) \
*(.data..vm0.pte)
/* No __ro_after_init data in the .rodata section - which will always be ro */
#define RO_AFTER_INIT_DATA
#include <asm-generic/vmlinux.lds.h>
/* needed for the processor specific cache alignment size */

View File

@ -2,5 +2,5 @@
# Makefile for arch/parisc/mm
#
obj-y := init.o fault.o ioremap.o
obj-y := init.o fault.o ioremap.o fixmap.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o

View File

@ -0,0 +1,41 @@
// SPDX-License-Identifier: GPL-2.0
/*
* fixmaps for parisc
*
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
*/
#include <linux/kprobes.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
void set_fixmap(enum fixed_addresses idx, phys_addr_t phys)
{
unsigned long vaddr = __fix_to_virt(idx);
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
pte_t *pte;
if (pmd_none(*pmd))
pmd = pmd_alloc(NULL, pgd, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
if (pte_none(*pte))
pte = pte_alloc_kernel(pmd, vaddr);
set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX));
flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
}
void clear_fixmap(enum fixed_addresses idx)
{
unsigned long vaddr = __fix_to_virt(idx);
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
pte_t *pte = pte_offset_kernel(pmd, vaddr);
pte_clear(&init_mm, vaddr, pte);
flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
}

View File

@ -139,9 +139,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
{
unsigned long flags;
purge_tlb_start(flags);
spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
__set_huge_pte_at(mm, addr, ptep, entry);
purge_tlb_end(flags);
spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
}
@ -151,10 +151,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
unsigned long flags;
pte_t entry;
purge_tlb_start(flags);
spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
entry = *ptep;
__set_huge_pte_at(mm, addr, ptep, __pte(0));
purge_tlb_end(flags);
spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
return entry;
}
@ -166,10 +166,10 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long flags;
pte_t old_pte;
purge_tlb_start(flags);
spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
old_pte = *ptep;
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
purge_tlb_end(flags);
spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
}
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
@ -178,13 +178,14 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
{
unsigned long flags;
int changed;
struct mm_struct *mm = vma->vm_mm;
purge_tlb_start(flags);
spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
changed = !pte_same(*ptep, pte);
if (changed) {
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
__set_huge_pte_at(mm, addr, ptep, pte);
}
purge_tlb_end(flags);
spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
return changed;
}

View File

@ -32,6 +32,7 @@
#include <asm/mmzone.h>
#include <asm/sections.h>
#include <asm/msgbuf.h>
#include <asm/sparsemem.h>
extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
@ -48,11 +49,6 @@ pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
#ifdef CONFIG_DISCONTIGMEM
struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
#endif
static struct resource data_resource = {
.name = "Kernel data",
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
@ -76,11 +72,11 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
* information retrieved in kernel/inventory.c.
*/
physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
int npmem_ranges __read_mostly;
physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
int npmem_ranges __initdata;
#ifdef CONFIG_64BIT
#define MAX_MEM (~0UL)
#define MAX_MEM (1UL << MAX_PHYSMEM_BITS)
#else /* !CONFIG_64BIT */
#define MAX_MEM (3584U*1024U*1024U)
#endif /* !CONFIG_64BIT */
@ -119,7 +115,7 @@ static void __init mem_limit_func(void)
static void __init setup_bootmem(void)
{
unsigned long mem_max;
#ifndef CONFIG_DISCONTIGMEM
#ifndef CONFIG_SPARSEMEM
physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
int npmem_holes;
#endif
@ -137,23 +133,20 @@ static void __init setup_bootmem(void)
int j;
for (j = i; j > 0; j--) {
unsigned long tmp;
physmem_range_t tmp;
if (pmem_ranges[j-1].start_pfn <
pmem_ranges[j].start_pfn) {
break;
}
tmp = pmem_ranges[j-1].start_pfn;
pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
pmem_ranges[j].start_pfn = tmp;
tmp = pmem_ranges[j-1].pages;
pmem_ranges[j-1].pages = pmem_ranges[j].pages;
pmem_ranges[j].pages = tmp;
tmp = pmem_ranges[j-1];
pmem_ranges[j-1] = pmem_ranges[j];
pmem_ranges[j] = tmp;
}
}
#ifndef CONFIG_DISCONTIGMEM
#ifndef CONFIG_SPARSEMEM
/*
* Throw out ranges that are too far apart (controlled by
* MAX_GAP).
@ -165,7 +158,7 @@ static void __init setup_bootmem(void)
pmem_ranges[i-1].pages) > MAX_GAP) {
npmem_ranges = i;
printk("Large gap in memory detected (%ld pages). "
"Consider turning on CONFIG_DISCONTIGMEM\n",
"Consider turning on CONFIG_SPARSEMEM\n",
pmem_ranges[i].start_pfn -
(pmem_ranges[i-1].start_pfn +
pmem_ranges[i-1].pages));
@ -230,9 +223,8 @@ static void __init setup_bootmem(void)
printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
#ifndef CONFIG_DISCONTIGMEM
#ifndef CONFIG_SPARSEMEM
/* Merge the ranges, keeping track of the holes */
{
unsigned long end_pfn;
unsigned long hole_pages;
@ -255,18 +247,6 @@ static void __init setup_bootmem(void)
}
#endif
#ifdef CONFIG_DISCONTIGMEM
for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
memset(NODE_DATA(i), 0, sizeof(pg_data_t));
}
memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
for (i = 0; i < npmem_ranges; i++) {
node_set_state(i, N_NORMAL_MEMORY);
node_set_online(i);
}
#endif
/*
* Initialize and free the full range of memory in each range.
*/
@ -314,7 +294,7 @@ static void __init setup_bootmem(void)
memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
(unsigned long)(_end - KERNEL_BINARY_TEXT_START));
#ifndef CONFIG_DISCONTIGMEM
#ifndef CONFIG_SPARSEMEM
/* reserve the holes */
@ -360,6 +340,9 @@ static void __init setup_bootmem(void)
/* Initialize Page Deallocation Table (PDT) and check for bad memory. */
pdc_pdt_init();
memblock_allow_resize();
memblock_dump_all();
}
static int __init parisc_text_address(unsigned long vaddr)
@ -495,7 +478,7 @@ static void __init map_pages(unsigned long start_vaddr,
void __init set_kernel_text_rw(int enable_read_write)
{
unsigned long start = (unsigned long) _text;
unsigned long start = (unsigned long) __init_begin;
unsigned long end = (unsigned long) &data_start;
map_pages(start, __pa(start), end-start,
@ -622,15 +605,19 @@ void __init mem_init(void)
* But keep code for debugging purposes.
*/
printk("virtual kernel memory layout:\n"
" vmalloc : 0x%px - 0x%px (%4ld MB)\n"
" memory : 0x%px - 0x%px (%4ld MB)\n"
" .init : 0x%px - 0x%px (%4ld kB)\n"
" .data : 0x%px - 0x%px (%4ld kB)\n"
" .text : 0x%px - 0x%px (%4ld kB)\n",
" vmalloc : 0x%px - 0x%px (%4ld MB)\n"
" fixmap : 0x%px - 0x%px (%4ld kB)\n"
" memory : 0x%px - 0x%px (%4ld MB)\n"
" .init : 0x%px - 0x%px (%4ld kB)\n"
" .data : 0x%px - 0x%px (%4ld kB)\n"
" .text : 0x%px - 0x%px (%4ld kB)\n",
(void*)VMALLOC_START, (void*)VMALLOC_END,
(VMALLOC_END - VMALLOC_START) >> 20,
(void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE),
(unsigned long)(FIXMAP_SIZE / 1024),
__va(0), high_memory,
((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
@ -709,37 +696,46 @@ static void __init gateway_init(void)
PAGE_SIZE, PAGE_GATEWAY, 1);
}
void __init paging_init(void)
static void __init parisc_bootmem_free(void)
{
unsigned long zones_size[MAX_NR_ZONES] = { 0, };
unsigned long holes_size[MAX_NR_ZONES] = { 0, };
unsigned long mem_start_pfn = ~0UL, mem_end_pfn = 0, mem_size_pfn = 0;
int i;
for (i = 0; i < npmem_ranges; i++) {
unsigned long start = pmem_ranges[i].start_pfn;
unsigned long size = pmem_ranges[i].pages;
unsigned long end = start + size;
if (mem_start_pfn > start)
mem_start_pfn = start;
if (mem_end_pfn < end)
mem_end_pfn = end;
mem_size_pfn += size;
}
zones_size[0] = mem_end_pfn - mem_start_pfn;
holes_size[0] = zones_size[0] - mem_size_pfn;
free_area_init_node(0, zones_size, mem_start_pfn, holes_size);
}
void __init paging_init(void)
{
setup_bootmem();
pagetable_init();
gateway_init();
flush_cache_all_local(); /* start with known state */
flush_tlb_all_local(NULL);
for (i = 0; i < npmem_ranges; i++) {
unsigned long zones_size[MAX_NR_ZONES] = { 0, };
zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
#ifdef CONFIG_DISCONTIGMEM
/* Need to initialize the pfnnid_map before we can initialize
the zone */
{
int j;
for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
j++) {
pfnnid_map[j] = i;
}
}
#endif
free_area_init_node(i, zones_size,
pmem_ranges[i].start_pfn, NULL);
}
/*
* Mark all memblocks as present for sparsemem using
* memory_present() and then initialize sparsemem.
*/
memblocks_present();
sparse_init();
parisc_bootmem_free();
}
#ifdef CONFIG_PA20

View File

@ -105,7 +105,7 @@
#define DMA_WBACK_INV(ndev, addr, len) \
do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
#define SYSBUS 0x0000006c;
#define SYSBUS 0x0000006c
/* big endian CPU, 82596 "big" endian mode */
#define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
@ -141,7 +141,8 @@ static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
}
gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS);
udelay(1);
if (!running_on_qemu)
udelay(1);
gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
}

View File

@ -568,6 +568,9 @@ int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long d
break;
case DISPLAY_MODEL_LASI:
/* Skip to register LED in QEMU */
if (running_on_qemu)
return 1;
LED_DATA_REG = data_reg;
led_func_ptr = led_LASI_driver;
printk(KERN_INFO "LED display at %lx registered\n", LED_DATA_REG);