1
0
Fork 0

ARM updates for 5.4-rc1:

- fix various clang build and cppcheck issues
 - switch ARM to use new common outgoing-CPU-notification code
 - add some additional explanation about the boot code
 - kbuild "make clean" fixes
 - get rid of another "(____ptrval____)", this time for the VDSO code
 - avoid treating cache maintenance faults as a write
 - add a frame pointer unwinder implementation for clang
 - add EDAC support for Aurora L2 cache
 - improve robustness of adjust_lowmem_bounds() finding the bounds of
   lowmem.
 - add reset control for AMBA primecell devices
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIVAwUAXYZCdvTnkBvkraxkAQK7vQ//UO0XJ1InSLnWzPYuNwJGcCmzHIg6p40A
 VxnvDTVxZH6UKDhBg8xx+gpPOhwZElGyc0H563p5jgmjzbIesESS5Xy3hUUMkQ9y
 A6Ta9Nk+NhL+j9O9VtcOk90oQJsLuVyYtHTfk6Wl9xaVLjM1OALWNzCSDqXIPTjF
 qEhTRahlv9Nc9aisFJAPduf/zQx9ULaZVvDzTo6clXSD7ieSy0MZRiRbcH3MJwiY
 Q5AbImF49NGcNtlknPh8Gnz/4P3q+bxQDmrzki9d4Fcy2brko845q9Ca5PC+iXro
 fZHvs8q2+8xz4PuOddBrYebqPIIv+3W6uPlJAPjO0MQrxPTUxRBxqAkYXxwTZBx/
 A79AQsbnmUSyOV4EI2lk9USmN/GF2QwGOusRoiA/XMbSVfqnVZWH5mE98dr+2vn+
 rUnTq9yvSz2y6QH7+UI+7Q7T8jg4QFBBmPDfCP+yTOWqPb8u070h+VgLBr28g1JL
 t6VtzOeI4wyl7E/WInmoM/d8SXnjv/1yNzLBcCdvgBV94fUQAV5EP+cDGJ0hv1SJ
 TGywm8adf3zAa7ZUAOhBoAK3gkNqjJB28ynsH4QmBUmsKkozxoKwwb4jjbGgcoUY
 rYII4VyoQB/0eX5/i8u69krA+3QNRhehLWC/zM4ZK5lKfFRCnNDvLgiIEM5b59JW
 nBywRtpyw2I=
 =Evmc
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

 - fix various clang build and cppcheck issues

 - switch ARM to use new common outgoing-CPU-notification code

 - add some additional explanation about the boot code

 - kbuild "make clean" fixes

 - get rid of another "(____ptrval____)", this time for the VDSO code

 - avoid treating cache maintenance faults as a write

 - add a frame pointer unwinder implementation for clang

 - add EDAC support for Aurora L2 cache

 - improve robustness of adjust_lowmem_bounds() finding the bounds of
   lowmem.

 - add reset control for AMBA primecell devices

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (24 commits)
  ARM: 8906/1: drivers/amba: add reset control to amba bus probe
  ARM: 8905/1: Emit __gnu_mcount_nc when using Clang 10.0.0 or newer
  ARM: 8904/1: skip nomap memblocks while finding the lowmem/highmem boundary
  ARM: 8903/1: ensure that usable memory in bank 0 starts from a PMD-aligned address
  ARM: 8891/1: EDAC: armada_xp: Add support for more SoCs
  ARM: 8888/1: EDAC: Add driver for the Marvell Armada XP SDRAM and L2 cache ECC
  ARM: 8892/1: EDAC: Add missing debugfs_create_x32 wrapper
  ARM: 8890/1: l2x0: add marvell,ecc-enable property for aurora
  ARM: 8889/1: dt-bindings: document marvell,ecc-enable binding
  ARM: 8886/1: l2x0: support parity-enable/disable on aurora
  ARM: 8885/1: aurora-l2: add defines for parity and ECC registers
  ARM: 8887/1: aurora-l2: add prefix to MAX_RANGE_SIZE
  ARM: 8902/1: l2c: move cache-aurora-l2.h to asm/hardware
  ARM: 8900/1: UNWINDER_FRAME_POINTER implementation for Clang
  ARM: 8898/1: mm: Don't treat faults reported from cache maintenance as writes
  ARM: 8896/1: VDSO: Don't leak kernel addresses
  ARM: 8895/1: visit mach-* and plat-* directories when cleaning
  ARM: 8894/1: boot: Replace open-coded nop with macro
  ARM: 8893/1: boot: Explain the 8 nops
  ARM: 8876/1: fix O= building with CONFIG_FPE_FASTFPE
  ...
alistair/sunxi64-5.4-dsi
Linus Torvalds 2019-09-22 09:39:09 -07:00
commit 8808cf8cbc
21 changed files with 1027 additions and 24 deletions

View File

@ -176,6 +176,10 @@ properties:
description: disable parity checking on the L2 cache (L220 or PL310).
type: boolean
marvell,ecc-enable:
description: enable ECC protection on the L2 cache
type: boolean
arm,outer-sync-disable:
description: disable the outer sync operation on the L2 cache.
Some core tiles, especially ARM PB11MPCore have a faulty L220 cache that

View File

@ -5802,6 +5802,12 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/amd64_edac*
EDAC-ARMADA
M: Jan Luebbe <jlu@pengutronix.de>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/armada_xp_*
EDAC-AST2500
M: Stefan Schaeckeler <sschaeck@cisco.com>
S: Supported

View File

@ -82,7 +82,7 @@ config ARM
select HAVE_FAST_GUP if ARM_LPAE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && (CC_IS_GCC || CLANG_VERSION >= 100000)
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_IDE if PCI || ISA || PCMCIA
@ -1476,8 +1476,9 @@ config ARM_PATCH_IDIV
code to do integer division.
config AEABI
bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K
default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K
bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && \
!CPU_V7M && !CPU_V6 && !CPU_V6K && !CC_IS_CLANG
default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K || CC_IS_CLANG
help
This option allows for the kernel to be compiled using the latest
ARM ABI (aka EABI). This is only useful if you are using a user

View File

@ -56,7 +56,7 @@ choice
config UNWINDER_FRAME_POINTER
bool "Frame pointer unwinder"
depends on !THUMB2_KERNEL && !CC_IS_CLANG
depends on !THUMB2_KERNEL
select ARCH_WANT_FRAME_POINTERS
select FRAME_POINTER
help

View File

@ -36,7 +36,10 @@ KBUILD_CFLAGS += $(call cc-option,-mno-unaligned-access)
endif
ifeq ($(CONFIG_FRAME_POINTER),y)
KBUILD_CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
KBUILD_CFLAGS +=-fno-omit-frame-pointer
ifeq ($(CONFIG_CC_IS_GCC),y)
KBUILD_CFLAGS += -mapcs -mno-sched-prolog
endif
endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
@ -112,6 +115,10 @@ ifeq ($(CONFIG_ARM_UNWIND),y)
CFLAGS_ABI +=-funwind-tables
endif
ifeq ($(CONFIG_CC_IS_CLANG),y)
CFLAGS_ABI += -meabi gnu
endif
# Accept old syntax despite ".syntax unified"
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
@ -266,14 +273,9 @@ endif
export TEXT_OFFSET GZFLAGS MMUEXT
# Do we have FASTFPE?
FASTFPE :=arch/arm/fastfpe
ifeq ($(FASTFPE),$(wildcard $(FASTFPE)))
FASTFPE_OBJ :=$(FASTFPE)/
endif
core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/
core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ)
# Put arch/arm/fastfpe/ to use this.
core-$(CONFIG_FPE_FASTFPE) += $(patsubst $(srctree)/%,%,$(wildcard $(srctree)/arch/arm/fastfpe/))
core-$(CONFIG_VFP) += arch/arm/vfp/
core-$(CONFIG_XEN) += arch/arm/xen/
core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/
@ -286,6 +288,10 @@ core-y += arch/arm/net/
core-y += arch/arm/crypto/
core-y += $(machdirs) $(platdirs)
# For cleaning
core- += $(patsubst %,arch/arm/mach-%/, $(machine-))
core- += $(patsubst %,arch/arm/plat-%/, $(plat-))
drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/
libs-y := arch/arm/lib/ $(libs-y)

View File

@ -153,11 +153,23 @@
AR_CLASS( .arm )
start:
.type start,#function
/*
* These 7 nops along with the 1 nop immediately below for
* !THUMB2 form 8 nops that make the compressed kernel bootable
* on legacy ARM systems that were assuming the kernel in a.out
* binary format. The boot loaders on these systems would
* jump 32 bytes into the image to skip the a.out header.
* with these 8 nops filling exactly 32 bytes, things still
* work as expected on these legacy systems. Thumb2 mode keeps
* 7 of the nops as it turns out that some boot loaders
* were patching the initial instructions of the kernel, i.e
* had started to exploit this "patch area".
*/
.rept 7
__nop
.endr
#ifndef CONFIG_THUMB2_KERNEL
mov r0, r0
__nop
#else
AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
M_CLASS( nop.w ) @ M: already in Thumb2 mode

View File

@ -31,6 +31,9 @@
#define AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU \
(3 << AURORA_ACR_REPLACEMENT_OFFSET)
#define AURORA_ACR_PARITY_EN (1 << 21)
#define AURORA_ACR_ECC_EN (1 << 20)
#define AURORA_ACR_FORCE_WRITE_POLICY_OFFSET 0
#define AURORA_ACR_FORCE_WRITE_POLICY_MASK \
(0x3 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
@ -41,7 +44,52 @@
#define AURORA_ACR_FORCE_WRITE_THRO_POLICY \
(2 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
#define MAX_RANGE_SIZE 1024
#define AURORA_ERR_CNT_REG 0x600
#define AURORA_ERR_ATTR_CAP_REG 0x608
#define AURORA_ERR_ADDR_CAP_REG 0x60c
#define AURORA_ERR_WAY_CAP_REG 0x610
#define AURORA_ERR_INJECT_CTL_REG 0x614
#define AURORA_ERR_INJECT_MASK_REG 0x618
#define AURORA_ERR_CNT_CLR_OFFSET 31
#define AURORA_ERR_CNT_CLR \
(0x1 << AURORA_ERR_CNT_CLR_OFFSET)
#define AURORA_ERR_CNT_UE_OFFSET 16
#define AURORA_ERR_CNT_UE_MASK \
(0x7fff << AURORA_ERR_CNT_UE_OFFSET)
#define AURORA_ERR_CNT_CE_OFFSET 0
#define AURORA_ERR_CNT_CE_MASK \
(0xffff << AURORA_ERR_CNT_CE_OFFSET)
#define AURORA_ERR_ATTR_SRC_OFF 16
#define AURORA_ERR_ATTR_SRC_MSK \
(0x7 << AURORA_ERR_ATTR_SRC_OFF)
#define AURORA_ERR_ATTR_TXN_OFF 12
#define AURORA_ERR_ATTR_TXN_MSK \
(0xf << AURORA_ERR_ATTR_TXN_OFF)
#define AURORA_ERR_ATTR_ERR_OFF 8
#define AURORA_ERR_ATTR_ERR_MSK \
(0x3 << AURORA_ERR_ATTR_ERR_OFF)
#define AURORA_ERR_ATTR_CAP_VALID_OFF 0
#define AURORA_ERR_ATTR_CAP_VALID \
(0x1 << AURORA_ERR_ATTR_CAP_VALID_OFF)
#define AURORA_ERR_ADDR_CAP_ADDR_MASK 0xffffffe0
#define AURORA_ERR_WAY_IDX_OFF 8
#define AURORA_ERR_WAY_IDX_MSK \
(0xfff << AURORA_ERR_WAY_IDX_OFF)
#define AURORA_ERR_WAY_CAP_WAY_OFFSET 1
#define AURORA_ERR_WAY_CAP_WAY_MASK \
(0xf << AURORA_ERR_WAY_CAP_WAY_OFFSET)
#define AURORA_ERR_INJECT_CTL_ADDR_MASK 0xfffffff0
#define AURORA_ERR_ATTR_TXN_OFF 12
#define AURORA_ERR_INJECT_CTL_EN_MASK 0x3
#define AURORA_ERR_INJECT_CTL_EN_PARITY 0x2
#define AURORA_ERR_INJECT_CTL_EN_ECC 0x1
#define AURORA_MAX_RANGE_SIZE 1024
#define AURORA_WAY_SIZE_SHIFT 2

View File

@ -697,9 +697,9 @@ static struct attribute_group armv7_pmuv2_events_attr_group = {
/*
* Event filters for PMUv2
*/
#define ARMV7_EXCLUDE_PL1 (1 << 31)
#define ARMV7_EXCLUDE_USER (1 << 30)
#define ARMV7_INCLUDE_HYP (1 << 27)
#define ARMV7_EXCLUDE_PL1 BIT(31)
#define ARMV7_EXCLUDE_USER BIT(30)
#define ARMV7_INCLUDE_HYP BIT(27)
/*
* Secure debug enable reg

View File

@ -194,7 +194,6 @@ static int __init vdso_init(void)
}
text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
/* Allocate the VDSO text pagelist */
vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),

View File

@ -5,7 +5,7 @@
# Copyright (C) 1995-2000 Russell King
#
lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
lib-y := changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
delay.o delay-loop.o findbit.o memchr.o memcpy.o \
memmove.o memset.o setbit.o \
@ -19,6 +19,12 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
mmu-y := clear_user.o copy_page.o getuser.o putuser.o \
copy_from_user.o copy_to_user.o
ifdef CONFIG_CC_IS_CLANG
lib-y += backtrace-clang.o
else
lib-y += backtrace.o
endif
# using lib_ here won't override already available weak symbols
obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o

View File

@ -0,0 +1,217 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/lib/backtrace-clang.S
*
* Copyright (C) 2019 Nathan Huckleberry
*
*/
#include <linux/kern_levels.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
/* fp is 0 or stack frame */
#define frame r4
#define sv_fp r5
#define sv_pc r6
#define mask r7
#define sv_lr r8
ENTRY(c_backtrace)
#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK)
ret lr
ENDPROC(c_backtrace)
#else
/*
* Clang does not store pc or sp in function prologues so we don't know exactly
* where the function starts.
*
* We can treat the current frame's lr as the saved pc and the preceding
* frame's lr as the current frame's lr, but we can't trace the most recent
* call. Inserting a false stack frame allows us to reference the function
* called last in the stacktrace.
*
* If the call instruction was a bl we can look at the callers branch
* instruction to calculate the saved pc. We can recover the pc in most cases,
* but in cases such as calling function pointers we cannot. In this case,
* default to using the lr. This will be some address in the function, but will
* not be the function start.
*
* Unfortunately due to the stack frame layout we can't dump r0 - r3, but these
* are less frequently saved.
*
* Stack frame layout:
* <larger addresses>
* saved lr
* frame=> saved fp
* optionally saved caller registers (r4 - r10)
* optionally saved arguments (r0 - r3)
* <top of stack frame>
* <smaller addresses>
*
* Functions start with the following code sequence:
* corrected pc => stmfd sp!, {..., fp, lr}
* add fp, sp, #x
* stmfd sp!, {r0 - r3} (optional)
*
*
*
*
*
*
* The diagram below shows an example stack setup for dump_stack.
*
* The frame for c_backtrace has pointers to the code of dump_stack. This is
* why the frame of c_backtrace is used to for the pc calculation of
* dump_stack. This is why we must move back a frame to print dump_stack.
*
* The stored locals for dump_stack are in dump_stack's frame. This means that
* to fully print dump_stack's frame we need both the frame for dump_stack (for
* locals) and the frame that was called by dump_stack (for pc).
*
* To print locals we must know where the function start is. If we read the
* function prologue opcodes we can determine which variables are stored in the
* stack frame.
*
* To find the function start of dump_stack we can look at the stored LR of
* show_stack. It points at the instruction directly after the bl dump_stack.
* We can then read the offset from the bl opcode to determine where the branch
* takes us. The address calculated must be the start of dump_stack.
*
* c_backtrace frame dump_stack:
* {[LR] } ============| ...
* {[FP] } =======| | bl c_backtrace
* | |=> ...
* {[R4-R10]} |
* {[R0-R3] } | show_stack:
* dump_stack frame | ...
* {[LR] } =============| bl dump_stack
* {[FP] } <=======| |=> ...
* {[R4-R10]}
* {[R0-R3] }
*/
stmfd sp!, {r4 - r9, fp, lr} @ Save an extra register
@ to ensure 8 byte alignment
movs frame, r0 @ if frame pointer is zero
beq no_frame @ we have no stack frames
tst r1, #0x10 @ 26 or 32-bit mode?
moveq mask, #0xfc000003
movne mask, #0 @ mask for 32-bit
/*
* Switches the current frame to be the frame for dump_stack.
*/
add frame, sp, #24 @ switch to false frame
for_each_frame: tst frame, mask @ Check for address exceptions
bne no_frame
/*
* sv_fp is the stack frame with the locals for the current considered
* function.
*
* sv_pc is the saved lr frame the frame above. This is a pointer to a code
* address within the current considered function, but it is not the function
* start. This value gets updated to be the function start later if it is
* possible.
*/
1001: ldr sv_pc, [frame, #4] @ get saved 'pc'
1002: ldr sv_fp, [frame, #0] @ get saved fp
teq sv_fp, mask @ make sure next frame exists
beq no_frame
/*
* sv_lr is the lr from the function that called the current function. This is
* a pointer to a code address in the current function's caller. sv_lr-4 is
* the instruction used to call the current function.
*
* This sv_lr can be used to calculate the function start if the function was
* called using a bl instruction. If the function start can be recovered sv_pc
* is overwritten with the function start.
*
* If the current function was called using a function pointer we cannot
* recover the function start and instead continue with sv_pc as an arbitrary
* value within the current function. If this is the case we cannot print
* registers for the current function, but the stacktrace is still printed
* properly.
*/
1003: ldr sv_lr, [sv_fp, #4] @ get saved lr from next frame
ldr r0, [sv_lr, #-4] @ get call instruction
ldr r3, .Lopcode+4
and r2, r3, r0 @ is this a bl call
teq r2, r3
bne finished_setup @ give up if it's not
and r0, #0xffffff @ get call offset 24-bit int
lsl r0, r0, #8 @ sign extend offset
asr r0, r0, #8
ldr sv_pc, [sv_fp, #4] @ get lr address
add sv_pc, sv_pc, #-4 @ get call instruction address
add sv_pc, sv_pc, #8 @ take care of prefetch
add sv_pc, sv_pc, r0, lsl #2@ find function start
finished_setup:
bic sv_pc, sv_pc, mask @ mask PC/LR for the mode
/*
* Print the function (sv_pc) and where it was called from (sv_lr).
*/
1004: mov r0, sv_pc
mov r1, sv_lr
mov r2, frame
bic r1, r1, mask @ mask PC/LR for the mode
bl dump_backtrace_entry
/*
* Test if the function start is a stmfd instruction to determine which
* registers were stored in the function prologue.
*
* If we could not recover the sv_pc because we were called through a function
* pointer the comparison will fail and no registers will print. Unwinding will
* continue as if there had been no registers stored in this frame.
*/
1005: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, lr}
ldr r3, .Lopcode @ instruction exists,
teq r3, r1, lsr #11
ldr r0, [frame] @ locals are stored in
@ the preceding frame
subeq r0, r0, #4
bleq dump_backtrace_stm @ dump saved registers
/*
* If we are out of frames or if the next frame is invalid.
*/
teq sv_fp, #0 @ zero saved fp means
beq no_frame @ no further frames
cmp sv_fp, frame @ next frame must be
mov frame, sv_fp @ above the current frame
bhi for_each_frame
1006: adr r0, .Lbad
mov r1, frame
bl printk
no_frame: ldmfd sp!, {r4 - r9, fp, pc}
ENDPROC(c_backtrace)
.pushsection __ex_table,"a"
.align 3
.long 1001b, 1006b
.long 1002b, 1006b
.long 1003b, 1006b
.long 1004b, 1006b
.long 1005b, 1006b
.popsection
.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
.align
.Lopcode: .word 0xe92d4800 >> 11 @ stmfd sp!, {... fp, lr}
.word 0x0b000000 @ bl if these bits are set
#endif

View File

@ -18,8 +18,8 @@
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/cache-aurora-l2.h>
#include "cache-tauros3.h"
#include "cache-aurora-l2.h"
struct l2c_init_data {
const char *type;
@ -1352,8 +1352,8 @@ static unsigned long aurora_range_end(unsigned long start, unsigned long end)
* since cache range operations stall the CPU pipeline
* until completion.
*/
if (end > start + MAX_RANGE_SIZE)
end = start + MAX_RANGE_SIZE;
if (end > start + AURORA_MAX_RANGE_SIZE)
end = start + AURORA_MAX_RANGE_SIZE;
/*
* Cache range operations can't straddle a page boundary.
@ -1493,6 +1493,18 @@ static void __init aurora_of_parse(const struct device_node *np,
mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
}
if (of_property_read_bool(np, "marvell,ecc-enable")) {
mask |= AURORA_ACR_ECC_EN;
val |= AURORA_ACR_ECC_EN;
}
if (of_property_read_bool(np, "arm,parity-enable")) {
mask |= AURORA_ACR_PARITY_EN;
val |= AURORA_ACR_PARITY_EN;
} else if (of_property_read_bool(np, "arm,parity-disable")) {
mask |= AURORA_ACR_PARITY_EN;
}
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;

View File

@ -191,7 +191,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
{
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
if (fsr & FSR_WRITE)
if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
mask = VM_WRITE;
if (fsr & FSR_LNX_PF)
mask = VM_EXEC;
@ -262,7 +262,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (fsr & FSR_WRITE)
if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
flags |= FAULT_FLAG_WRITE;
/*

View File

@ -6,6 +6,7 @@
* Fault status register encodings. We steal bit 31 for our own purposes.
*/
#define FSR_LNX_PF (1 << 31)
#define FSR_CM (1 << 13)
#define FSR_WRITE (1 << 11)
#define FSR_FS4 (1 << 10)
#define FSR_FS3_0 (15)

View File

@ -1177,10 +1177,29 @@ void __init adjust_lowmem_bounds(void)
*/
vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
/*
* The first usable region must be PMD aligned. Mark its start
* as MEMBLOCK_NOMAP if it isn't
*/
for_each_memblock(memory, reg) {
if (!memblock_is_nomap(reg)) {
if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
phys_addr_t len;
len = round_up(reg->base, PMD_SIZE) - reg->base;
memblock_mark_nomap(reg->base, len);
}
break;
}
}
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size;
if (memblock_is_nomap(reg))
continue;
if (reg->base < vmalloc_limit) {
if (block_end > lowmem_limit)
/*

View File

@ -18,6 +18,7 @@
#include <linux/limits.h>
#include <linux/clk/clk-conf.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <asm/irq.h>
@ -401,6 +402,19 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
ret = amba_get_enable_pclk(dev);
if (ret == 0) {
u32 pid, cid;
struct reset_control *rstc;
/*
* Find reset control(s) of the amba bus and de-assert them.
*/
rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node);
if (IS_ERR(rstc)) {
if (PTR_ERR(rstc) != -EPROBE_DEFER)
dev_err(&dev->dev, "Can't get amba reset!\n");
return PTR_ERR(rstc);
}
reset_control_deassert(rstc);
reset_control_put(rstc);
/*
* Read pid and cid based on size of resource

View File

@ -466,6 +466,13 @@ config EDAC_SIFIVE
help
Support for error detection and correction on the SiFive SoCs.
config EDAC_ARMADA_XP
bool "Marvell Armada XP DDR and L2 Cache ECC"
depends on MACH_MVEBU_V7
help
Support for error correction and detection on the Marvell Aramada XP
DDR RAM and L2 cache controllers.
config EDAC_SYNOPSYS
tristate "Synopsys DDR Memory Controller"
depends on ARCH_ZYNQ || ARCH_ZYNQMP

View File

@ -80,6 +80,7 @@ obj-$(CONFIG_EDAC_THUNDERX) += thunderx_edac.o
obj-$(CONFIG_EDAC_ALTERA) += altera_edac.o
obj-$(CONFIG_EDAC_SIFIVE) += sifive_edac.o
obj-$(CONFIG_EDAC_ARMADA_XP) += armada_xp_edac.o
obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
obj-$(CONFIG_EDAC_TI) += ti_edac.o

View File

@ -0,0 +1,635 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Pengutronix, Jan Luebbe <kernel@pengutronix.de>
*/
#include <linux/kernel.h>
#include <linux/edac.h>
#include <linux/of_platform.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/cache-aurora-l2.h>
#include "edac_mc.h"
#include "edac_device.h"
#include "edac_module.h"
/************************ EDAC MC (DDR RAM) ********************************/
#define SDRAM_NUM_CS 4
#define SDRAM_CONFIG_REG 0x0
#define SDRAM_CONFIG_ECC_MASK BIT(18)
#define SDRAM_CONFIG_REGISTERED_MASK BIT(17)
#define SDRAM_CONFIG_BUS_WIDTH_MASK BIT(15)
#define SDRAM_ADDR_CTRL_REG 0x10
#define SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs) (20+cs)
#define SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(cs) (0x1 << SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs))
#define SDRAM_ADDR_CTRL_ADDR_SEL_MASK(cs) BIT(16+cs)
#define SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs) (cs*4+2)
#define SDRAM_ADDR_CTRL_SIZE_LOW_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs))
#define SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs) (cs*4)
#define SDRAM_ADDR_CTRL_STRUCT_MASK(cs) (0x3 << SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs))
#define SDRAM_ERR_DATA_H_REG 0x40
#define SDRAM_ERR_DATA_L_REG 0x44
#define SDRAM_ERR_RECV_ECC_REG 0x48
#define SDRAM_ERR_RECV_ECC_VALUE_MASK 0xff
#define SDRAM_ERR_CALC_ECC_REG 0x4c
#define SDRAM_ERR_CALC_ECC_ROW_OFFSET 8
#define SDRAM_ERR_CALC_ECC_ROW_MASK (0xffff << SDRAM_ERR_CALC_ECC_ROW_OFFSET)
#define SDRAM_ERR_CALC_ECC_VALUE_MASK 0xff
#define SDRAM_ERR_ADDR_REG 0x50
#define SDRAM_ERR_ADDR_BANK_OFFSET 23
#define SDRAM_ERR_ADDR_BANK_MASK (0x7 << SDRAM_ERR_ADDR_BANK_OFFSET)
#define SDRAM_ERR_ADDR_COL_OFFSET 8
#define SDRAM_ERR_ADDR_COL_MASK (0x7fff << SDRAM_ERR_ADDR_COL_OFFSET)
#define SDRAM_ERR_ADDR_CS_OFFSET 1
#define SDRAM_ERR_ADDR_CS_MASK (0x3 << SDRAM_ERR_ADDR_CS_OFFSET)
#define SDRAM_ERR_ADDR_TYPE_MASK BIT(0)
#define SDRAM_ERR_CTRL_REG 0x54
#define SDRAM_ERR_CTRL_THR_OFFSET 16
#define SDRAM_ERR_CTRL_THR_MASK (0xff << SDRAM_ERR_CTRL_THR_OFFSET)
#define SDRAM_ERR_CTRL_PROP_MASK BIT(9)
#define SDRAM_ERR_SBE_COUNT_REG 0x58
#define SDRAM_ERR_DBE_COUNT_REG 0x5c
#define SDRAM_ERR_CAUSE_ERR_REG 0xd0
#define SDRAM_ERR_CAUSE_MSG_REG 0xd8
#define SDRAM_ERR_CAUSE_DBE_MASK BIT(1)
#define SDRAM_ERR_CAUSE_SBE_MASK BIT(0)
#define SDRAM_RANK_CTRL_REG 0x1e0
#define SDRAM_RANK_CTRL_EXIST_MASK(cs) BIT(cs)
struct axp_mc_drvdata {
void __iomem *base;
/* width in bytes */
unsigned int width;
/* bank interleaving */
bool cs_addr_sel[SDRAM_NUM_CS];
char msg[128];
};
/* derived from "DRAM Address Multiplexing" in the ARAMDA XP Functional Spec */
static uint32_t axp_mc_calc_address(struct axp_mc_drvdata *drvdata,
uint8_t cs, uint8_t bank, uint16_t row,
uint16_t col)
{
if (drvdata->width == 8) {
/* 64 bit */
if (drvdata->cs_addr_sel[cs])
/* bank interleaved */
return (((row & 0xfff8) << 16) |
((bank & 0x7) << 16) |
((row & 0x7) << 13) |
((col & 0x3ff) << 3));
else
return (((row & 0xffff << 16) |
((bank & 0x7) << 13) |
((col & 0x3ff)) << 3));
} else if (drvdata->width == 4) {
/* 32 bit */
if (drvdata->cs_addr_sel[cs])
/* bank interleaved */
return (((row & 0xfff0) << 15) |
((bank & 0x7) << 16) |
((row & 0xf) << 12) |
((col & 0x3ff) << 2));
else
return (((row & 0xffff << 15) |
((bank & 0x7) << 12) |
((col & 0x3ff)) << 2));
} else {
/* 16 bit */
if (drvdata->cs_addr_sel[cs])
/* bank interleaved */
return (((row & 0xffe0) << 14) |
((bank & 0x7) << 16) |
((row & 0x1f) << 11) |
((col & 0x3ff) << 1));
else
return (((row & 0xffff << 14) |
((bank & 0x7) << 11) |
((col & 0x3ff)) << 1));
}
}
static void axp_mc_check(struct mem_ctl_info *mci)
{
struct axp_mc_drvdata *drvdata = mci->pvt_info;
uint32_t data_h, data_l, recv_ecc, calc_ecc, addr;
uint32_t cnt_sbe, cnt_dbe, cause_err, cause_msg;
uint32_t row_val, col_val, bank_val, addr_val;
uint8_t syndrome_val, cs_val;
char *msg = drvdata->msg;
data_h = readl(drvdata->base + SDRAM_ERR_DATA_H_REG);
data_l = readl(drvdata->base + SDRAM_ERR_DATA_L_REG);
recv_ecc = readl(drvdata->base + SDRAM_ERR_RECV_ECC_REG);
calc_ecc = readl(drvdata->base + SDRAM_ERR_CALC_ECC_REG);
addr = readl(drvdata->base + SDRAM_ERR_ADDR_REG);
cnt_sbe = readl(drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
cnt_dbe = readl(drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
cause_err = readl(drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
cause_msg = readl(drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
/* clear cause registers */
writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
/* clear error counter registers */
if (cnt_sbe)
writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
if (cnt_dbe)
writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
if (!cnt_sbe && !cnt_dbe)
return;
if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
if (cnt_sbe)
cnt_sbe--;
else
dev_warn(mci->pdev, "inconsistent SBE count detected");
} else {
if (cnt_dbe)
cnt_dbe--;
else
dev_warn(mci->pdev, "inconsistent DBE count detected");
}
/* report earlier errors */
if (cnt_sbe)
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
cnt_sbe, /* error count */
0, 0, 0, /* pfn, offset, syndrome */
-1, -1, -1, /* top, mid, low layer */
mci->ctl_name,
"details unavailable (multiple errors)");
if (cnt_dbe)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
cnt_sbe, /* error count */
0, 0, 0, /* pfn, offset, syndrome */
-1, -1, -1, /* top, mid, low layer */
mci->ctl_name,
"details unavailable (multiple errors)");
/* report details for most recent error */
cs_val = (addr & SDRAM_ERR_ADDR_CS_MASK) >> SDRAM_ERR_ADDR_CS_OFFSET;
bank_val = (addr & SDRAM_ERR_ADDR_BANK_MASK) >> SDRAM_ERR_ADDR_BANK_OFFSET;
row_val = (calc_ecc & SDRAM_ERR_CALC_ECC_ROW_MASK) >> SDRAM_ERR_CALC_ECC_ROW_OFFSET;
col_val = (addr & SDRAM_ERR_ADDR_COL_MASK) >> SDRAM_ERR_ADDR_COL_OFFSET;
syndrome_val = (recv_ecc ^ calc_ecc) & 0xff;
addr_val = axp_mc_calc_address(drvdata, cs_val, bank_val, row_val,
col_val);
msg += sprintf(msg, "row=0x%04x ", row_val); /* 11 chars */
msg += sprintf(msg, "bank=0x%x ", bank_val); /* 9 chars */
msg += sprintf(msg, "col=0x%04x ", col_val); /* 11 chars */
msg += sprintf(msg, "cs=%d", cs_val); /* 4 chars */
if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1, /* error count */
addr_val >> PAGE_SHIFT,
addr_val & ~PAGE_MASK,
syndrome_val,
cs_val, -1, -1, /* top, mid, low layer */
mci->ctl_name, drvdata->msg);
} else {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1, /* error count */
addr_val >> PAGE_SHIFT,
addr_val & ~PAGE_MASK,
syndrome_val,
cs_val, -1, -1, /* top, mid, low layer */
mci->ctl_name, drvdata->msg);
}
}
static void axp_mc_read_config(struct mem_ctl_info *mci)
{
struct axp_mc_drvdata *drvdata = mci->pvt_info;
uint32_t config, addr_ctrl, rank_ctrl;
unsigned int i, cs_struct, cs_size;
struct dimm_info *dimm;
config = readl(drvdata->base + SDRAM_CONFIG_REG);
if (config & SDRAM_CONFIG_BUS_WIDTH_MASK)
/* 64 bit */
drvdata->width = 8;
else
/* 32 bit */
drvdata->width = 4;
addr_ctrl = readl(drvdata->base + SDRAM_ADDR_CTRL_REG);
rank_ctrl = readl(drvdata->base + SDRAM_RANK_CTRL_REG);
for (i = 0; i < SDRAM_NUM_CS; i++) {
dimm = mci->dimms[i];
if (!(rank_ctrl & SDRAM_RANK_CTRL_EXIST_MASK(i)))
continue;
drvdata->cs_addr_sel[i] =
!!(addr_ctrl & SDRAM_ADDR_CTRL_ADDR_SEL_MASK(i));
cs_struct = (addr_ctrl & SDRAM_ADDR_CTRL_STRUCT_MASK(i)) >> SDRAM_ADDR_CTRL_STRUCT_OFFSET(i);
cs_size = ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(i)) >> (SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(i) - 2) |
((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_LOW_MASK(i)) >> SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(i)));
switch (cs_size) {
case 0: /* 2GBit */
dimm->nr_pages = 524288;
break;
case 1: /* 256MBit */
dimm->nr_pages = 65536;
break;
case 2: /* 512MBit */
dimm->nr_pages = 131072;
break;
case 3: /* 1GBit */
dimm->nr_pages = 262144;
break;
case 4: /* 4GBit */
dimm->nr_pages = 1048576;
break;
case 5: /* 8GBit */
dimm->nr_pages = 2097152;
break;
}
dimm->grain = 8;
dimm->dtype = cs_struct ? DEV_X16 : DEV_X8;
dimm->mtype = (config & SDRAM_CONFIG_REGISTERED_MASK) ?
MEM_RDDR3 : MEM_DDR3;
dimm->edac_mode = EDAC_SECDED;
}
}
static const struct of_device_id axp_mc_of_match[] = {
{.compatible = "marvell,armada-xp-sdram-controller",},
{},
};
MODULE_DEVICE_TABLE(of, axp_mc_of_match);
static int axp_mc_probe(struct platform_device *pdev)
{
struct axp_mc_drvdata *drvdata;
struct edac_mc_layer layers[1];
const struct of_device_id *id;
struct mem_ctl_info *mci;
struct resource *r;
void __iomem *base;
uint32_t config;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "Unable to get mem resource\n");
return -ENODEV;
}
base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(base)) {
dev_err(&pdev->dev, "Unable to map regs\n");
return PTR_ERR(base);
}
config = readl(base + SDRAM_CONFIG_REG);
if (!(config & SDRAM_CONFIG_ECC_MASK)) {
dev_warn(&pdev->dev, "SDRAM ECC is not enabled");
return -EINVAL;
}
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = SDRAM_NUM_CS;
layers[0].is_virt_csrow = true;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*drvdata));
if (!mci)
return -ENOMEM;
drvdata = mci->pvt_info;
drvdata->base = base;
mci->pdev = &pdev->dev;
platform_set_drvdata(pdev, mci);
id = of_match_device(axp_mc_of_match, &pdev->dev);
mci->edac_check = axp_mc_check;
mci->mtype_cap = MEM_FLAG_DDR3;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = pdev->dev.driver->name;
mci->ctl_name = id ? id->compatible : "unknown";
mci->dev_name = dev_name(&pdev->dev);
mci->scrub_mode = SCRUB_NONE;
axp_mc_read_config(mci);
/* These SoCs have a reduced width bus */
if (of_machine_is_compatible("marvell,armada380") ||
of_machine_is_compatible("marvell,armadaxp-98dx3236"))
drvdata->width /= 2;
/* configure SBE threshold */
/* it seems that SBEs are not captured otherwise */
writel(1 << SDRAM_ERR_CTRL_THR_OFFSET, drvdata->base + SDRAM_ERR_CTRL_REG);
/* clear cause registers */
writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
/* clear counter registers */
writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
if (edac_mc_add_mc(mci)) {
edac_mc_free(mci);
return -EINVAL;
}
edac_op_state = EDAC_OPSTATE_POLL;
return 0;
}
static int axp_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver axp_mc_driver = {
.probe = axp_mc_probe,
.remove = axp_mc_remove,
.driver = {
.name = "armada_xp_mc_edac",
.of_match_table = of_match_ptr(axp_mc_of_match),
},
};
/************************ EDAC Device (L2 Cache) ***************************/
struct aurora_l2_drvdata {
void __iomem *base;
char msg[128];
/* error injection via debugfs */
uint32_t inject_addr;
uint32_t inject_mask;
uint8_t inject_ctl;
struct dentry *debugfs;
};
#ifdef CONFIG_EDAC_DEBUG
static void aurora_l2_inject(struct aurora_l2_drvdata *drvdata)
{
drvdata->inject_addr &= AURORA_ERR_INJECT_CTL_ADDR_MASK;
drvdata->inject_ctl &= AURORA_ERR_INJECT_CTL_EN_MASK;
writel(0, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
writel(drvdata->inject_mask, drvdata->base + AURORA_ERR_INJECT_MASK_REG);
writel(drvdata->inject_addr | drvdata->inject_ctl, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
}
#endif
static void aurora_l2_check(struct edac_device_ctl_info *dci)
{
struct aurora_l2_drvdata *drvdata = dci->pvt_info;
uint32_t cnt, src, txn, err, attr_cap, addr_cap, way_cap;
unsigned int cnt_ce, cnt_ue;
char *msg = drvdata->msg;
size_t size = sizeof(drvdata->msg);
size_t len = 0;
cnt = readl(drvdata->base + AURORA_ERR_CNT_REG);
attr_cap = readl(drvdata->base + AURORA_ERR_ATTR_CAP_REG);
addr_cap = readl(drvdata->base + AURORA_ERR_ADDR_CAP_REG);
way_cap = readl(drvdata->base + AURORA_ERR_WAY_CAP_REG);
cnt_ce = (cnt & AURORA_ERR_CNT_CE_MASK) >> AURORA_ERR_CNT_CE_OFFSET;
cnt_ue = (cnt & AURORA_ERR_CNT_UE_MASK) >> AURORA_ERR_CNT_UE_OFFSET;
/* clear error counter registers */
if (cnt_ce || cnt_ue)
writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
if (!(attr_cap & AURORA_ERR_ATTR_CAP_VALID))
goto clear_remaining;
src = (attr_cap & AURORA_ERR_ATTR_SRC_MSK) >> AURORA_ERR_ATTR_SRC_OFF;
if (src <= 3)
len += snprintf(msg+len, size-len, "src=CPU%d ", src);
else
len += snprintf(msg+len, size-len, "src=IO ");
txn = (attr_cap & AURORA_ERR_ATTR_TXN_MSK) >> AURORA_ERR_ATTR_TXN_OFF;
switch (txn) {
case 0:
len += snprintf(msg+len, size-len, "txn=Data-Read ");
break;
case 1:
len += snprintf(msg+len, size-len, "txn=Isn-Read ");
break;
case 2:
len += snprintf(msg+len, size-len, "txn=Clean-Flush ");
break;
case 3:
len += snprintf(msg+len, size-len, "txn=Eviction ");
break;
case 4:
len += snprintf(msg+len, size-len,
"txn=Read-Modify-Write ");
break;
}
err = (attr_cap & AURORA_ERR_ATTR_ERR_MSK) >> AURORA_ERR_ATTR_ERR_OFF;
switch (err) {
case 0:
len += snprintf(msg+len, size-len, "err=CorrECC ");
break;
case 1:
len += snprintf(msg+len, size-len, "err=UnCorrECC ");
break;
case 2:
len += snprintf(msg+len, size-len, "err=TagParity ");
break;
}
len += snprintf(msg+len, size-len, "addr=0x%x ", addr_cap & AURORA_ERR_ADDR_CAP_ADDR_MASK);
len += snprintf(msg+len, size-len, "index=0x%x ", (way_cap & AURORA_ERR_WAY_IDX_MSK) >> AURORA_ERR_WAY_IDX_OFF);
len += snprintf(msg+len, size-len, "way=0x%x", (way_cap & AURORA_ERR_WAY_CAP_WAY_MASK) >> AURORA_ERR_WAY_CAP_WAY_OFFSET);
/* clear error capture registers */
writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
if (err) {
/* UnCorrECC or TagParity */
if (cnt_ue)
cnt_ue--;
edac_device_handle_ue(dci, 0, 0, drvdata->msg);
} else {
if (cnt_ce)
cnt_ce--;
edac_device_handle_ce(dci, 0, 0, drvdata->msg);
}
clear_remaining:
/* report remaining errors */
while (cnt_ue--)
edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
while (cnt_ce--)
edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
}
static void aurora_l2_poll(struct edac_device_ctl_info *dci)
{
#ifdef CONFIG_EDAC_DEBUG
struct aurora_l2_drvdata *drvdata = dci->pvt_info;
#endif
aurora_l2_check(dci);
#ifdef CONFIG_EDAC_DEBUG
aurora_l2_inject(drvdata);
#endif
}
static const struct of_device_id aurora_l2_of_match[] = {
{.compatible = "marvell,aurora-system-cache",},
{},
};
MODULE_DEVICE_TABLE(of, aurora_l2_of_match);
static int aurora_l2_probe(struct platform_device *pdev)
{
struct aurora_l2_drvdata *drvdata;
struct edac_device_ctl_info *dci;
const struct of_device_id *id;
uint32_t l2x0_aux_ctrl;
void __iomem *base;
struct resource *r;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "Unable to get mem resource\n");
return -ENODEV;
}
base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(base)) {
dev_err(&pdev->dev, "Unable to map regs\n");
return PTR_ERR(base);
}
l2x0_aux_ctrl = readl(base + L2X0_AUX_CTRL);
if (!(l2x0_aux_ctrl & AURORA_ACR_PARITY_EN))
dev_warn(&pdev->dev, "tag parity is not enabled");
if (!(l2x0_aux_ctrl & AURORA_ACR_ECC_EN))
dev_warn(&pdev->dev, "data ECC is not enabled");
dci = edac_device_alloc_ctl_info(sizeof(*drvdata),
"cpu", 1, "L", 1, 2, NULL, 0, 0);
if (!dci)
return -ENOMEM;
drvdata = dci->pvt_info;
drvdata->base = base;
dci->dev = &pdev->dev;
platform_set_drvdata(pdev, dci);
id = of_match_device(aurora_l2_of_match, &pdev->dev);
dci->edac_check = aurora_l2_poll;
dci->mod_name = pdev->dev.driver->name;
dci->ctl_name = id ? id->compatible : "unknown";
dci->dev_name = dev_name(&pdev->dev);
/* clear registers */
writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
if (edac_device_add_device(dci)) {
edac_device_free_ctl_info(dci);
return -EINVAL;
}
#ifdef CONFIG_EDAC_DEBUG
drvdata->debugfs = edac_debugfs_create_dir(dev_name(&pdev->dev));
if (drvdata->debugfs) {
edac_debugfs_create_x32("inject_addr", 0644,
drvdata->debugfs,
&drvdata->inject_addr);
edac_debugfs_create_x32("inject_mask", 0644,
drvdata->debugfs,
&drvdata->inject_mask);
edac_debugfs_create_x8("inject_ctl", 0644,
drvdata->debugfs, &drvdata->inject_ctl);
}
#endif
return 0;
}
static int aurora_l2_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
#ifdef CONFIG_EDAC_DEBUG
struct aurora_l2_drvdata *drvdata = dci->pvt_info;
edac_debugfs_remove_recursive(drvdata->debugfs);
#endif
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver aurora_l2_driver = {
.probe = aurora_l2_probe,
.remove = aurora_l2_remove,
.driver = {
.name = "aurora_l2_edac",
.of_match_table = of_match_ptr(aurora_l2_of_match),
},
};
/************************ Driver registration ******************************/
static struct platform_driver * const drivers[] = {
&axp_mc_driver,
&aurora_l2_driver,
};
static int __init armada_xp_edac_init(void)
{
int res;
/* only polling is supported */
edac_op_state = EDAC_OPSTATE_POLL;
res = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (res)
pr_warn("Aramda XP EDAC drivers fail to register\n");
return 0;
}
module_init(armada_xp_edac_init);
static void __exit armada_xp_edac_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_exit(armada_xp_edac_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Pengutronix");
MODULE_DESCRIPTION("EDAC Drivers for Marvell Armada XP SDRAM and L2 Cache Controller");

View File

@ -138,3 +138,14 @@ void edac_debugfs_create_x16(const char *name, umode_t mode,
debugfs_create_x16(name, mode, parent, value);
}
EXPORT_SYMBOL_GPL(edac_debugfs_create_x16);
/* Wrapper for debugfs_create_x32() */
void edac_debugfs_create_x32(const char *name, umode_t mode,
struct dentry *parent, u32 *value)
{
if (!parent)
parent = edac_debugfs;
debugfs_create_x32(name, mode, parent, value);
}
EXPORT_SYMBOL_GPL(edac_debugfs_create_x32);

View File

@ -82,6 +82,8 @@ void edac_debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent, u8 *value);
void edac_debugfs_create_x16(const char *name, umode_t mode,
struct dentry *parent, u16 *value);
void edac_debugfs_create_x32(const char *name, umode_t mode,
struct dentry *parent, u32 *value);
#else
static inline void edac_debugfs_init(void) { }
static inline void edac_debugfs_exit(void) { }
@ -96,6 +98,8 @@ static inline void edac_debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent, u8 *value) { }
static inline void edac_debugfs_create_x16(const char *name, umode_t mode,
struct dentry *parent, u16 *value) { }
static inline void edac_debugfs_create_x32(const char *name, umode_t mode,
struct dentry *parent, u32 *value) { }
#endif
/*