1
0
Fork 0

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

 - StrongARM SA1111 updates to modernise and remove cruft

 - Add StrongARM gpio drivers for board GPIOs

 - Verify size of zImage is what we expect to avoid issues with
   appended DTB

 - nommu updates from Vladimir Murzin

 - page table read-write-execute checking from Jinbum Park

 - Broadcom Brahma-B15 cache updates from Florian Fainelli

 - Avoid failure with kprobes test caused by inappropriately
   placed kprobes

 - Remove __memzero optimisation (which was incorrectly being
   used directly by some drivers)

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (32 commits)
  ARM: 8745/1: get rid of __memzero()
  ARM: 8744/1: don't discard memblock for kexec
  ARM: 8743/1: bL_switcher: add MODULE_LICENSE tag
  ARM: 8742/1: Always use REFCOUNT_FULL
  ARM: 8741/1: B15: fix unused label warnings
  ARM: 8740/1: NOMMU: Make sure we do not hold stale data in mem[] array
  ARM: 8739/1: NOMMU: Setup VBAR/Hivecs for secondaries cores
  ARM: 8738/1: Disable CONFIG_DEBUG_VIRTUAL for NOMMU
  ARM: 8737/1: mm: dump: add checking for writable and executable
  ARM: 8736/1: mm: dump: make the page table dumping seq_file
  ARM: 8735/1: mm: dump: make page table dumping reusable
  ARM: sa1100/neponset: add GPIO drivers for control and modem registers
  ARM: sa1100/assabet: add BCR/BSR GPIO driver
  ARM: 8734/1: mm: idmap: Mark variables as ro_after_init
  ARM: 8733/1: hw_breakpoint: Mark variables as __ro_after_init
  ARM: 8732/1: NOMMU: Allow userspace to access background MPU region
  ARM: 8727/1: MAINTAINERS: Update brcmstb entries to cover B15 code
  ARM: 8728/1: B15: Register reboot notifier for KEXEC
  ARM: 8730/1: B15: Add suspend/resume hooks
  ARM: 8726/1: B15: Add CPU hotplug awareness
  ...
hifive-unleashed-5.1
Linus Torvalds 2018-02-02 09:50:51 -08:00
commit 367b0df173
48 changed files with 1031 additions and 623 deletions

View File

@ -2855,6 +2855,8 @@ S: Maintained
F: arch/arm/mach-bcm/*brcmstb*
F: arch/arm/boot/dts/bcm7*.dts*
F: drivers/bus/brcmstb_gisb.c
F: arch/arm/mm/cache-b15-rac.c
F: arch/arm/include/asm/hardware/cache-b15-rac.h
N: brcmstb
BROADCOM BMIPS CPUFREQ DRIVER

View File

@ -3,8 +3,8 @@ config ARM
bool
default y
select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID && !KEXEC
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_SET_MEMORY
@ -100,6 +100,7 @@ config ARM
select OLD_SIGACTION
select OLD_SIGSUSPEND3
select PERF_USE_VMALLOC
select REFCOUNT_FULL
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
# Above selects are sorted alphabetically; please add new ones
@ -1526,12 +1527,10 @@ config THUMB2_KERNEL
bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY
depends on (CPU_V7 || CPU_V7M) && !CPU_V6 && !CPU_V6K
default y if CPU_THUMBONLY
select ARM_ASM_UNIFIED
select ARM_UNWIND
help
By enabling this option, the kernel will be compiled in
Thumb-2 mode. A compiler/assembler that understand the unified
ARM-Thumb syntax is needed.
Thumb-2 mode.
If unsure, say N.
@ -1566,9 +1565,6 @@ config THUMB2_AVOID_R_ARM_THM_JUMP11
Unless you are sure your tools don't have this problem, say Y.
config ARM_ASM_UNIFIED
bool
config ARM_PATCH_IDIV
bool "Runtime patch udiv/sdiv instructions into __aeabi_{u}idiv()"
depends on CPU_32v7 && !XIP_KERNEL

View File

@ -3,10 +3,14 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
config ARM_PTDUMP
config ARM_PTDUMP_CORE
def_bool n
config ARM_PTDUMP_DEBUGFS
bool "Export kernel pagetable layout to userspace via debugfs"
depends on DEBUG_KERNEL
depends on MMU
select ARM_PTDUMP_CORE
select DEBUG_FS
---help---
Say Y here if you want to show the kernel pagetable layout in a
@ -16,6 +20,33 @@ config ARM_PTDUMP
kernel.
If in doubt, say "N"
config DEBUG_WX
bool "Warn on W+X mappings at boot"
select ARM_PTDUMP_CORE
---help---
Generate a warning if any W+X mappings are found at boot.
This is useful for discovering cases where the kernel is leaving
W+X mappings after applying NX, as such mappings are a security risk.
Look for a message in dmesg output like this:
arm/mm: Checked W+X mappings: passed, no W+X pages found.
or like this, if the check failed:
arm/mm: Checked W+X mappings: FAILED, <N> W+X pages found.
Note that even if the check fails, your kernel is possibly
still fine, as W+X mappings are not a security hole in
themselves, what they do is that they make the exploitation
of other unfixed kernel bugs easier.
There is no runtime or memory usage effect of this option
once the kernel has booted up - it's a one time check.
If in doubt, say "Y".
# RMK wants arm kernels compiled with frame pointers or stack unwinding.
# If you know what you are doing and are willing to live without stack
# traces, you can get a slightly smaller kernel by setting this option to

View File

@ -115,9 +115,11 @@ ifeq ($(CONFIG_ARM_UNWIND),y)
CFLAGS_ABI +=-funwind-tables
endif
# Accept old syntax despite ".syntax unified"
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
ifeq ($(CONFIG_THUMB2_KERNEL),y)
AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it)
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
CFLAGS_ISA :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
# Work around buggy relocation from gas if requested:
@ -125,7 +127,7 @@ ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
KBUILD_CFLAGS_MODULE +=-fno-optimize-sibling-calls
endif
else
CFLAGS_ISA :=$(call cc-option,-marm,)
CFLAGS_ISA :=$(call cc-option,-marm,) $(AFLAGS_NOWARN)
AFLAGS_ISA :=$(CFLAGS_ISA)
endif

View File

@ -130,8 +130,3 @@ void *memset(void *s, int c, size_t count)
*xs++ = c;
return s;
}
void __memzero(void *s, size_t count)
{
memset(s, 0, count);
}

View File

@ -56,6 +56,7 @@ SECTIONS
.rodata : {
*(.rodata)
*(.rodata.*)
*(.data.rel.ro)
}
.piggydata : {
*(.piggydata)
@ -101,6 +102,12 @@ SECTIONS
* this symbol allows further debug in the near future.
*/
.image_end (NOLOAD) : {
/*
* EFI requires that the image is aligned to 512 bytes, and appended
* DTB requires that we know where the end of the image is. Ensure
* that both are satisfied by ensuring that there are no additional
* sections emitted into the decompressor image.
*/
_edata_real = .;
}
@ -128,3 +135,4 @@ SECTIONS
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
}
ASSERT(_edata_real == _edata, "error: zImage file size is incorrect");

View File

@ -57,3 +57,7 @@ static struct miscdevice bL_switcher_device = {
&bL_switcher_fops
};
module_misc_device(bL_switcher_device);
MODULE_AUTHOR("Nicolas Pitre <nico@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("big.LITTLE switcher dummy user interface");

View File

@ -108,6 +108,7 @@ struct sa1111 {
spinlock_t lock;
void __iomem *base;
struct sa1111_platform_data *pdata;
struct irq_domain *irqdomain;
struct gpio_chip gc;
#ifdef CONFIG_PM
void *saved_state;
@ -125,7 +126,7 @@ struct sa1111_dev_info {
unsigned long skpcr_mask;
bool dma;
unsigned int devid;
unsigned int irq[6];
unsigned int hwirq[6];
};
static struct sa1111_dev_info sa1111_devices[] = {
@ -134,7 +135,7 @@ static struct sa1111_dev_info sa1111_devices[] = {
.skpcr_mask = SKPCR_UCLKEN,
.dma = true,
.devid = SA1111_DEVID_USB,
.irq = {
.hwirq = {
IRQ_USBPWR,
IRQ_HCIM,
IRQ_HCIBUFFACC,
@ -148,7 +149,7 @@ static struct sa1111_dev_info sa1111_devices[] = {
.skpcr_mask = SKPCR_I2SCLKEN | SKPCR_L3CLKEN,
.dma = true,
.devid = SA1111_DEVID_SAC,
.irq = {
.hwirq = {
AUDXMTDMADONEA,
AUDXMTDMADONEB,
AUDRCVDMADONEA,
@ -164,7 +165,7 @@ static struct sa1111_dev_info sa1111_devices[] = {
.offset = SA1111_KBD,
.skpcr_mask = SKPCR_PTCLKEN,
.devid = SA1111_DEVID_PS2_KBD,
.irq = {
.hwirq = {
IRQ_TPRXINT,
IRQ_TPTXINT
},
@ -173,7 +174,7 @@ static struct sa1111_dev_info sa1111_devices[] = {
.offset = SA1111_MSE,
.skpcr_mask = SKPCR_PMCLKEN,
.devid = SA1111_DEVID_PS2_MSE,
.irq = {
.hwirq = {
IRQ_MSRXINT,
IRQ_MSTXINT
},
@ -182,7 +183,7 @@ static struct sa1111_dev_info sa1111_devices[] = {
.offset = 0x1800,
.skpcr_mask = 0,
.devid = SA1111_DEVID_PCMCIA,
.irq = {
.hwirq = {
IRQ_S0_READY_NINT,
IRQ_S0_CD_VALID,
IRQ_S0_BVD1_STSCHG,
@ -193,6 +194,19 @@ static struct sa1111_dev_info sa1111_devices[] = {
},
};
static int sa1111_map_irq(struct sa1111 *sachip, irq_hw_number_t hwirq)
{
return irq_create_mapping(sachip->irqdomain, hwirq);
}
static void sa1111_handle_irqdomain(struct irq_domain *irqdomain, int irq)
{
struct irq_desc *d = irq_to_desc(irq_linear_revmap(irqdomain, irq));
if (d)
generic_handle_irq_desc(d);
}
/*
* SA1111 interrupt support. Since clearing an IRQ while there are
* active IRQs causes the interrupt output to pulse, the upper levels
@ -202,49 +216,45 @@ static void sa1111_irq_handler(struct irq_desc *desc)
{
unsigned int stat0, stat1, i;
struct sa1111 *sachip = irq_desc_get_handler_data(desc);
struct irq_domain *irqdomain;
void __iomem *mapbase = sachip->base + SA1111_INTC;
stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0);
stat1 = sa1111_readl(mapbase + SA1111_INTSTATCLR1);
stat0 = readl_relaxed(mapbase + SA1111_INTSTATCLR0);
stat1 = readl_relaxed(mapbase + SA1111_INTSTATCLR1);
sa1111_writel(stat0, mapbase + SA1111_INTSTATCLR0);
writel_relaxed(stat0, mapbase + SA1111_INTSTATCLR0);
desc->irq_data.chip->irq_ack(&desc->irq_data);
sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1);
writel_relaxed(stat1, mapbase + SA1111_INTSTATCLR1);
if (stat0 == 0 && stat1 == 0) {
do_bad_IRQ(desc);
return;
}
irqdomain = sachip->irqdomain;
for (i = 0; stat0; i++, stat0 >>= 1)
if (stat0 & 1)
generic_handle_irq(i + sachip->irq_base);
sa1111_handle_irqdomain(irqdomain, i);
for (i = 32; stat1; i++, stat1 >>= 1)
if (stat1 & 1)
generic_handle_irq(i + sachip->irq_base);
sa1111_handle_irqdomain(irqdomain, i);
/* For level-based interrupts */
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
#define SA1111_IRQMASK_LO(x) (1 << (x - sachip->irq_base))
#define SA1111_IRQMASK_HI(x) (1 << (x - sachip->irq_base - 32))
static u32 sa1111_irqmask(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
return BIT((d->irq - sachip->irq_base) & 31);
return BIT(irqd_to_hwirq(d) & 31);
}
static int sa1111_irqbank(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
return ((d->irq - sachip->irq_base) / 32) * 4;
return (irqd_to_hwirq(d) / 32) * 4;
}
static void sa1111_ack_irq(struct irq_data *d)
@ -257,9 +267,9 @@ static void sa1111_mask_irq(struct irq_data *d)
void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
u32 ie;
ie = sa1111_readl(mapbase + SA1111_INTEN0);
ie = readl_relaxed(mapbase + SA1111_INTEN0);
ie &= ~sa1111_irqmask(d);
sa1111_writel(ie, mapbase + SA1111_INTEN0);
writel(ie, mapbase + SA1111_INTEN0);
}
static void sa1111_unmask_irq(struct irq_data *d)
@ -268,9 +278,9 @@ static void sa1111_unmask_irq(struct irq_data *d)
void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
u32 ie;
ie = sa1111_readl(mapbase + SA1111_INTEN0);
ie = readl_relaxed(mapbase + SA1111_INTEN0);
ie |= sa1111_irqmask(d);
sa1111_writel(ie, mapbase + SA1111_INTEN0);
writel_relaxed(ie, mapbase + SA1111_INTEN0);
}
/*
@ -287,11 +297,11 @@ static int sa1111_retrigger_irq(struct irq_data *d)
u32 ip, mask = sa1111_irqmask(d);
int i;
ip = sa1111_readl(mapbase + SA1111_INTPOL0);
ip = readl_relaxed(mapbase + SA1111_INTPOL0);
for (i = 0; i < 8; i++) {
sa1111_writel(ip ^ mask, mapbase + SA1111_INTPOL0);
sa1111_writel(ip, mapbase + SA1111_INTPOL0);
if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask)
writel_relaxed(ip ^ mask, mapbase + SA1111_INTPOL0);
writel_relaxed(ip, mapbase + SA1111_INTPOL0);
if (readl_relaxed(mapbase + SA1111_INTSTATCLR0) & mask)
break;
}
@ -313,13 +323,13 @@ static int sa1111_type_irq(struct irq_data *d, unsigned int flags)
if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
return -EINVAL;
ip = sa1111_readl(mapbase + SA1111_INTPOL0);
ip = readl_relaxed(mapbase + SA1111_INTPOL0);
if (flags & IRQ_TYPE_EDGE_RISING)
ip &= ~mask;
else
ip |= mask;
sa1111_writel(ip, mapbase + SA1111_INTPOL0);
sa1111_writel(ip, mapbase + SA1111_WAKEPOL0);
writel_relaxed(ip, mapbase + SA1111_INTPOL0);
writel_relaxed(ip, mapbase + SA1111_WAKEPOL0);
return 0;
}
@ -330,12 +340,12 @@ static int sa1111_wake_irq(struct irq_data *d, unsigned int on)
void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
u32 we, mask = sa1111_irqmask(d);
we = sa1111_readl(mapbase + SA1111_WAKEEN0);
we = readl_relaxed(mapbase + SA1111_WAKEEN0);
if (on)
we |= mask;
else
we &= ~mask;
sa1111_writel(we, mapbase + SA1111_WAKEEN0);
writel_relaxed(we, mapbase + SA1111_WAKEEN0);
return 0;
}
@ -350,10 +360,30 @@ static struct irq_chip sa1111_irq_chip = {
.irq_set_wake = sa1111_wake_irq,
};
static int sa1111_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct sa1111 *sachip = d->host_data;
/* Disallow unavailable interrupts */
if (hwirq > SSPROR && hwirq < AUDXMTDMADONEA)
return -EINVAL;
irq_set_chip_data(irq, sachip);
irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq);
irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
return 0;
}
static const struct irq_domain_ops sa1111_irqdomain_ops = {
.map = sa1111_irqdomain_map,
.xlate = irq_domain_xlate_twocell,
};
static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
{
void __iomem *irqbase = sachip->base + SA1111_INTC;
unsigned i, irq;
int ret;
/*
@ -373,37 +403,39 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
sachip->irq_base = ret;
/* disable all IRQs */
sa1111_writel(0, irqbase + SA1111_INTEN0);
sa1111_writel(0, irqbase + SA1111_INTEN1);
sa1111_writel(0, irqbase + SA1111_WAKEEN0);
sa1111_writel(0, irqbase + SA1111_WAKEEN1);
writel_relaxed(0, irqbase + SA1111_INTEN0);
writel_relaxed(0, irqbase + SA1111_INTEN1);
writel_relaxed(0, irqbase + SA1111_WAKEEN0);
writel_relaxed(0, irqbase + SA1111_WAKEEN1);
/*
* detect on rising edge. Note: Feb 2001 Errata for SA1111
* specifies that S0ReadyInt and S1ReadyInt should be '1'.
*/
sa1111_writel(0, irqbase + SA1111_INTPOL0);
sa1111_writel(BIT(IRQ_S0_READY_NINT & 31) |
BIT(IRQ_S1_READY_NINT & 31),
irqbase + SA1111_INTPOL1);
writel_relaxed(0, irqbase + SA1111_INTPOL0);
writel_relaxed(BIT(IRQ_S0_READY_NINT & 31) |
BIT(IRQ_S1_READY_NINT & 31),
irqbase + SA1111_INTPOL1);
/* clear all IRQs */
sa1111_writel(~0, irqbase + SA1111_INTSTATCLR0);
sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1);
writel_relaxed(~0, irqbase + SA1111_INTSTATCLR0);
writel_relaxed(~0, irqbase + SA1111_INTSTATCLR1);
for (i = IRQ_GPAIN0; i <= SSPROR; i++) {
irq = sachip->irq_base + i;
irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq);
irq_set_chip_data(irq, sachip);
irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
sachip->irqdomain = irq_domain_add_linear(NULL, SA1111_IRQ_NR,
&sa1111_irqdomain_ops,
sachip);
if (!sachip->irqdomain) {
irq_free_descs(sachip->irq_base, SA1111_IRQ_NR);
return -ENOMEM;
}
for (i = AUDXMTDMADONEA; i <= IRQ_S1_BVD1_STSCHG; i++) {
irq = sachip->irq_base + i;
irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq);
irq_set_chip_data(irq, sachip);
irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
irq_domain_associate_many(sachip->irqdomain,
sachip->irq_base + IRQ_GPAIN0,
IRQ_GPAIN0, SSPROR + 1 - IRQ_GPAIN0);
irq_domain_associate_many(sachip->irqdomain,
sachip->irq_base + AUDXMTDMADONEA,
AUDXMTDMADONEA,
IRQ_S1_BVD1_STSCHG + 1 - AUDXMTDMADONEA);
/*
* Register SA1111 interrupt
@ -420,20 +452,22 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
static void sa1111_remove_irq(struct sa1111 *sachip)
{
struct irq_domain *domain = sachip->irqdomain;
void __iomem *irqbase = sachip->base + SA1111_INTC;
int i;
/* disable all IRQs */
sa1111_writel(0, irqbase + SA1111_INTEN0);
sa1111_writel(0, irqbase + SA1111_INTEN1);
sa1111_writel(0, irqbase + SA1111_WAKEEN0);
sa1111_writel(0, irqbase + SA1111_WAKEEN1);
writel_relaxed(0, irqbase + SA1111_INTEN0);
writel_relaxed(0, irqbase + SA1111_INTEN1);
writel_relaxed(0, irqbase + SA1111_WAKEEN0);
writel_relaxed(0, irqbase + SA1111_WAKEEN1);
if (sachip->irq != NO_IRQ) {
irq_set_chained_handler_and_data(sachip->irq, NULL, NULL);
irq_free_descs(sachip->irq_base, SA1111_IRQ_NR);
irq_set_chained_handler_and_data(sachip->irq, NULL, NULL);
for (i = 0; i < SA1111_IRQ_NR; i++)
irq_dispose_mapping(irq_find_mapping(domain, i));
irq_domain_remove(domain);
release_mem_region(sachip->phys + SA1111_INTC, 512);
}
release_mem_region(sachip->phys + SA1111_INTC, 512);
}
enum {
@ -572,7 +606,7 @@ static int sa1111_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct sa1111 *sachip = gc_to_sa1111(gc);
return sachip->irq_base + offset;
return sa1111_map_irq(sachip, offset);
}
static int sa1111_setup_gpios(struct sa1111 *sachip)
@ -618,11 +652,11 @@ static void sa1111_wake(struct sa1111 *sachip)
/*
* Turn VCO on, and disable PLL Bypass.
*/
r = sa1111_readl(sachip->base + SA1111_SKCR);
r = readl_relaxed(sachip->base + SA1111_SKCR);
r &= ~SKCR_VCO_OFF;
sa1111_writel(r, sachip->base + SA1111_SKCR);
writel_relaxed(r, sachip->base + SA1111_SKCR);
r |= SKCR_PLL_BYPASS | SKCR_OE_EN;
sa1111_writel(r, sachip->base + SA1111_SKCR);
writel_relaxed(r, sachip->base + SA1111_SKCR);
/*
* Wait lock time. SA1111 manual _doesn't_
@ -634,7 +668,7 @@ static void sa1111_wake(struct sa1111 *sachip)
* Enable RCLK. We also ensure that RDYEN is set.
*/
r |= SKCR_RCLKEN | SKCR_RDYEN;
sa1111_writel(r, sachip->base + SA1111_SKCR);
writel_relaxed(r, sachip->base + SA1111_SKCR);
/*
* Wait 14 RCLK cycles for the chip to finish coming out
@ -645,7 +679,7 @@ static void sa1111_wake(struct sa1111 *sachip)
/*
* Ensure all clocks are initially off.
*/
sa1111_writel(0, sachip->base + SA1111_SKPCR);
writel_relaxed(0, sachip->base + SA1111_SKPCR);
spin_unlock_irqrestore(&sachip->lock, flags);
}
@ -675,7 +709,7 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
if (cas_latency == 3)
smcr |= SMCR_CLAT;
sa1111_writel(smcr, sachip->base + SA1111_SMCR);
writel_relaxed(smcr, sachip->base + SA1111_SMCR);
/*
* Now clear the bits in the DMA mask to work around the SA1111
@ -723,8 +757,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
dev->mapbase = sachip->base + info->offset;
dev->skpcr_mask = info->skpcr_mask;
for (i = 0; i < ARRAY_SIZE(info->irq); i++)
dev->irq[i] = sachip->irq_base + info->irq[i];
for (i = 0; i < ARRAY_SIZE(info->hwirq); i++)
dev->hwirq[i] = info->hwirq[i];
/*
* If the parent device has a DMA mask associated with it, and
@ -814,7 +848,7 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
/*
* Probe for the chip. Only touch the SBI registers.
*/
id = sa1111_readl(sachip->base + SA1111_SKID);
id = readl_relaxed(sachip->base + SA1111_SKID);
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
printk(KERN_DEBUG "SA1111 not detected: ID = %08lx\n", id);
ret = -ENODEV;
@ -833,11 +867,9 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
* The interrupt controller must be initialised before any
* other device to ensure that the interrupts are available.
*/
if (sachip->irq != NO_IRQ) {
ret = sa1111_setup_irq(sachip, pd->irq_base);
if (ret)
goto err_clk;
}
ret = sa1111_setup_irq(sachip, pd->irq_base);
if (ret)
goto err_clk;
/* Setup the GPIOs - should really be done after the IRQ setup */
ret = sa1111_setup_gpios(sachip);
@ -864,8 +896,8 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
* DMA. It can otherwise be held firmly in the off position.
* (currently, we always enable it.)
*/
val = sa1111_readl(sachip->base + SA1111_SKPCR);
sa1111_writel(val | SKPCR_DCLKEN, sachip->base + SA1111_SKPCR);
val = readl_relaxed(sachip->base + SA1111_SKPCR);
writel_relaxed(val | SKPCR_DCLKEN, sachip->base + SA1111_SKPCR);
/*
* Enable the SA1110 memory bus request and grant signals.
@ -962,31 +994,31 @@ static int sa1111_suspend_noirq(struct device *dev)
* Save state.
*/
base = sachip->base;
save->skcr = sa1111_readl(base + SA1111_SKCR);
save->skpcr = sa1111_readl(base + SA1111_SKPCR);
save->skcdr = sa1111_readl(base + SA1111_SKCDR);
save->skaud = sa1111_readl(base + SA1111_SKAUD);
save->skpwm0 = sa1111_readl(base + SA1111_SKPWM0);
save->skpwm1 = sa1111_readl(base + SA1111_SKPWM1);
save->skcr = readl_relaxed(base + SA1111_SKCR);
save->skpcr = readl_relaxed(base + SA1111_SKPCR);
save->skcdr = readl_relaxed(base + SA1111_SKCDR);
save->skaud = readl_relaxed(base + SA1111_SKAUD);
save->skpwm0 = readl_relaxed(base + SA1111_SKPWM0);
save->skpwm1 = readl_relaxed(base + SA1111_SKPWM1);
sa1111_writel(0, sachip->base + SA1111_SKPWM0);
sa1111_writel(0, sachip->base + SA1111_SKPWM1);
writel_relaxed(0, sachip->base + SA1111_SKPWM0);
writel_relaxed(0, sachip->base + SA1111_SKPWM1);
base = sachip->base + SA1111_INTC;
save->intpol0 = sa1111_readl(base + SA1111_INTPOL0);
save->intpol1 = sa1111_readl(base + SA1111_INTPOL1);
save->inten0 = sa1111_readl(base + SA1111_INTEN0);
save->inten1 = sa1111_readl(base + SA1111_INTEN1);
save->wakepol0 = sa1111_readl(base + SA1111_WAKEPOL0);
save->wakepol1 = sa1111_readl(base + SA1111_WAKEPOL1);
save->wakeen0 = sa1111_readl(base + SA1111_WAKEEN0);
save->wakeen1 = sa1111_readl(base + SA1111_WAKEEN1);
save->intpol0 = readl_relaxed(base + SA1111_INTPOL0);
save->intpol1 = readl_relaxed(base + SA1111_INTPOL1);
save->inten0 = readl_relaxed(base + SA1111_INTEN0);
save->inten1 = readl_relaxed(base + SA1111_INTEN1);
save->wakepol0 = readl_relaxed(base + SA1111_WAKEPOL0);
save->wakepol1 = readl_relaxed(base + SA1111_WAKEPOL1);
save->wakeen0 = readl_relaxed(base + SA1111_WAKEEN0);
save->wakeen1 = readl_relaxed(base + SA1111_WAKEEN1);
/*
* Disable.
*/
val = sa1111_readl(sachip->base + SA1111_SKCR);
sa1111_writel(val | SKCR_SLEEP, sachip->base + SA1111_SKCR);
val = readl_relaxed(sachip->base + SA1111_SKCR);
writel_relaxed(val | SKCR_SLEEP, sachip->base + SA1111_SKCR);
clk_disable(sachip->clk);
@ -1023,7 +1055,7 @@ static int sa1111_resume_noirq(struct device *dev)
* Ensure that the SA1111 is still here.
* FIXME: shouldn't do this here.
*/
id = sa1111_readl(sachip->base + SA1111_SKID);
id = readl_relaxed(sachip->base + SA1111_SKID);
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
__sa1111_remove(sachip);
dev_set_drvdata(dev, NULL);
@ -1047,26 +1079,26 @@ static int sa1111_resume_noirq(struct device *dev)
*/
spin_lock_irqsave(&sachip->lock, flags);
sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
writel_relaxed(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
writel_relaxed(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
base = sachip->base;
sa1111_writel(save->skcr, base + SA1111_SKCR);
sa1111_writel(save->skpcr, base + SA1111_SKPCR);
sa1111_writel(save->skcdr, base + SA1111_SKCDR);
sa1111_writel(save->skaud, base + SA1111_SKAUD);
sa1111_writel(save->skpwm0, base + SA1111_SKPWM0);
sa1111_writel(save->skpwm1, base + SA1111_SKPWM1);
writel_relaxed(save->skcr, base + SA1111_SKCR);
writel_relaxed(save->skpcr, base + SA1111_SKPCR);
writel_relaxed(save->skcdr, base + SA1111_SKCDR);
writel_relaxed(save->skaud, base + SA1111_SKAUD);
writel_relaxed(save->skpwm0, base + SA1111_SKPWM0);
writel_relaxed(save->skpwm1, base + SA1111_SKPWM1);
base = sachip->base + SA1111_INTC;
sa1111_writel(save->intpol0, base + SA1111_INTPOL0);
sa1111_writel(save->intpol1, base + SA1111_INTPOL1);
sa1111_writel(save->inten0, base + SA1111_INTEN0);
sa1111_writel(save->inten1, base + SA1111_INTEN1);
sa1111_writel(save->wakepol0, base + SA1111_WAKEPOL0);
sa1111_writel(save->wakepol1, base + SA1111_WAKEPOL1);
sa1111_writel(save->wakeen0, base + SA1111_WAKEEN0);
sa1111_writel(save->wakeen1, base + SA1111_WAKEEN1);
writel_relaxed(save->intpol0, base + SA1111_INTPOL0);
writel_relaxed(save->intpol1, base + SA1111_INTPOL1);
writel_relaxed(save->inten0, base + SA1111_INTEN0);
writel_relaxed(save->inten1, base + SA1111_INTEN1);
writel_relaxed(save->wakepol0, base + SA1111_WAKEPOL0);
writel_relaxed(save->wakepol1, base + SA1111_WAKEPOL1);
writel_relaxed(save->wakeen0, base + SA1111_WAKEEN0);
writel_relaxed(save->wakeen1, base + SA1111_WAKEEN1);
spin_unlock_irqrestore(&sachip->lock, flags);
@ -1153,7 +1185,7 @@ static unsigned int __sa1111_pll_clock(struct sa1111 *sachip)
{
unsigned int skcdr, fbdiv, ipdiv, opdiv;
skcdr = sa1111_readl(sachip->base + SA1111_SKCDR);
skcdr = readl_relaxed(sachip->base + SA1111_SKCDR);
fbdiv = (skcdr & 0x007f) + 2;
ipdiv = ((skcdr & 0x0f80) >> 7) + 2;
@ -1195,13 +1227,13 @@ void sa1111_select_audio_mode(struct sa1111_dev *sadev, int mode)
spin_lock_irqsave(&sachip->lock, flags);
val = sa1111_readl(sachip->base + SA1111_SKCR);
val = readl_relaxed(sachip->base + SA1111_SKCR);
if (mode == SA1111_AUDIO_I2S) {
val &= ~SKCR_SELAC;
} else {
val |= SKCR_SELAC;
}
sa1111_writel(val, sachip->base + SA1111_SKCR);
writel_relaxed(val, sachip->base + SA1111_SKCR);
spin_unlock_irqrestore(&sachip->lock, flags);
}
@ -1226,7 +1258,7 @@ int sa1111_set_audio_rate(struct sa1111_dev *sadev, int rate)
if (div > 128)
div = 128;
sa1111_writel(div - 1, sachip->base + SA1111_SKAUD);
writel_relaxed(div - 1, sachip->base + SA1111_SKAUD);
return 0;
}
@ -1244,7 +1276,7 @@ int sa1111_get_audio_rate(struct sa1111_dev *sadev)
if (sadev->devid != SA1111_DEVID_SAC)
return -EINVAL;
div = sa1111_readl(sachip->base + SA1111_SKAUD) + 1;
div = readl_relaxed(sachip->base + SA1111_SKAUD) + 1;
return __sa1111_pll_clock(sachip) / (256 * div);
}
@ -1261,10 +1293,10 @@ void sa1111_set_io_dir(struct sa1111_dev *sadev,
#define MODIFY_BITS(port, mask, dir) \
if (mask) { \
val = sa1111_readl(port); \
val = readl_relaxed(port); \
val &= ~(mask); \
val |= (dir) & (mask); \
sa1111_writel(val, port); \
writel_relaxed(val, port); \
}
spin_lock_irqsave(&sachip->lock, flags);
@ -1329,8 +1361,8 @@ int sa1111_enable_device(struct sa1111_dev *sadev)
if (ret == 0) {
spin_lock_irqsave(&sachip->lock, flags);
val = sa1111_readl(sachip->base + SA1111_SKPCR);
sa1111_writel(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
val = readl_relaxed(sachip->base + SA1111_SKPCR);
writel_relaxed(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
spin_unlock_irqrestore(&sachip->lock, flags);
}
return ret;
@ -1348,8 +1380,8 @@ void sa1111_disable_device(struct sa1111_dev *sadev)
unsigned int val;
spin_lock_irqsave(&sachip->lock, flags);
val = sa1111_readl(sachip->base + SA1111_SKPCR);
sa1111_writel(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
val = readl_relaxed(sachip->base + SA1111_SKPCR);
writel_relaxed(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
spin_unlock_irqrestore(&sachip->lock, flags);
if (sachip->pdata && sachip->pdata->disable)
@ -1359,9 +1391,10 @@ EXPORT_SYMBOL(sa1111_disable_device);
int sa1111_get_irq(struct sa1111_dev *sadev, unsigned num)
{
if (num >= ARRAY_SIZE(sadev->irq))
struct sa1111 *sachip = sa1111_chip_driver(sadev);
if (num >= ARRAY_SIZE(sadev->hwirq))
return -EINVAL;
return sadev->irq[num];
return sa1111_map_irq(sachip, sadev->hwirq[num]);
}
EXPORT_SYMBOL_GPL(sa1111_get_irq);
@ -1379,36 +1412,6 @@ static int sa1111_match(struct device *_dev, struct device_driver *_drv)
return !!(dev->devid & drv->devid);
}
static int sa1111_bus_suspend(struct device *dev, pm_message_t state)
{
struct sa1111_dev *sadev = to_sa1111_device(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = 0;
if (drv && drv->suspend)
ret = drv->suspend(sadev, state);
return ret;
}
static int sa1111_bus_resume(struct device *dev)
{
struct sa1111_dev *sadev = to_sa1111_device(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = 0;
if (drv && drv->resume)
ret = drv->resume(sadev);
return ret;
}
static void sa1111_bus_shutdown(struct device *dev)
{
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
if (drv && drv->shutdown)
drv->shutdown(to_sa1111_device(dev));
}
static int sa1111_bus_probe(struct device *dev)
{
struct sa1111_dev *sadev = to_sa1111_device(dev);
@ -1436,9 +1439,6 @@ struct bus_type sa1111_bus_type = {
.match = sa1111_match,
.probe = sa1111_bus_probe,
.remove = sa1111_bus_remove,
.suspend = sa1111_bus_suspend,
.resume = sa1111_bus_resume,
.shutdown = sa1111_bus_shutdown,
};
EXPORT_SYMBOL(sa1111_bus_type);

View File

@ -10,11 +10,10 @@
#include <linux/interrupt.h>
#define __exception __attribute__((section(".exception.text")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
#else
#define __exception_irq_entry __exception
#define __exception_irq_entry
#endif
#endif /* __ASM_ARM_EXCEPTION_H */

View File

@ -117,6 +117,10 @@
# endif
#endif
#if defined(CONFIG_CACHE_B15_RAC)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_V7M)
# define MULTI_CACHE 1
#endif

View File

@ -0,0 +1,10 @@
#ifndef __ASM_ARM_HARDWARE_CACHE_B15_RAC_H
#define __ASM_ARM_HARDWARE_CACHE_B15_RAC_H
#ifndef __ASSEMBLY__
void b15_flush_kern_cache_all(void);
#endif
#endif

View File

@ -15,33 +15,6 @@
#include <mach/bitfield.h>
/*
* The SA1111 is always located at virtual 0xf4000000, and is always
* "native" endian.
*/
#define SA1111_VBASE 0xf4000000
/* Don't use these! */
#define SA1111_p2v( x ) ((x) - SA1111_BASE + SA1111_VBASE)
#define SA1111_v2p( x ) ((x) - SA1111_VBASE + SA1111_BASE)
#ifndef __ASSEMBLY__
#define _SA1111(x) ((x) + sa1111->resource.start)
#endif
#define sa1111_writel(val,addr) __raw_writel(val, addr)
#define sa1111_readl(addr) __raw_readl(addr)
/*
* 26 bits of the SA-1110 address bus are available to the SA-1111.
* Use these when feeding target addresses to the DMA engines.
*/
#define SA1111_ADDR_WIDTH (26)
#define SA1111_ADDR_MASK ((1<<SA1111_ADDR_WIDTH)-1)
#define SA1111_DMA_ADDR(x) ((x)&SA1111_ADDR_MASK)
/*
* Don't ask the (SAC) DMA engines to move less than this amount.
*/
@ -417,7 +390,7 @@ struct sa1111_dev {
struct resource res;
void __iomem *mapbase;
unsigned int skpcr_mask;
unsigned int irq[6];
unsigned int hwirq[6];
u64 dma_mask;
};
@ -431,9 +404,6 @@ struct sa1111_driver {
unsigned int devid;
int (*probe)(struct sa1111_dev *);
int (*remove)(struct sa1111_dev *);
int (*suspend)(struct sa1111_dev *, pm_message_t);
int (*resume)(struct sa1111_dev *);
void (*shutdown)(struct sa1111_dev *);
};
#define SA1111_DRV(_d) container_of((_d), struct sa1111_driver, drv)

View File

@ -88,6 +88,7 @@
#else /* CONFIG_MMU */
#ifndef __ASSEMBLY__
extern unsigned long setup_vectors_base(void);
extern unsigned long vectors_base;
#define VECTORS_BASE vectors_base
#endif

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2014 ARM Ltd. */
#ifndef __ASM_PTDUMP_H
#define __ASM_PTDUMP_H
#ifdef CONFIG_ARM_PTDUMP_CORE
#include <linux/mm_types.h>
#include <linux/seq_file.h>
struct addr_marker {
unsigned long start_address;
char *name;
};
struct ptdump_info {
struct mm_struct *mm;
const struct addr_marker *markers;
unsigned long base_addr;
};
void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
#ifdef CONFIG_ARM_PTDUMP_DEBUGFS
int ptdump_debugfs_register(struct ptdump_info *info, const char *name);
#else
static inline int ptdump_debugfs_register(struct ptdump_info *info,
const char *name)
{
return 0;
}
#endif /* CONFIG_ARM_PTDUMP_DEBUGFS */
void ptdump_check_wx(void);
#endif /* CONFIG_ARM_PTDUMP_CORE */
#ifdef CONFIG_DEBUG_WX
#define debug_checkwx() ptdump_check_wx()
#else
#define debug_checkwx() do { } while (0)
#endif
#endif /* __ASM_PTDUMP_H */

View File

@ -6,4 +6,25 @@
extern char _exiprom[];
extern char __idmap_text_start[];
extern char __idmap_text_end[];
extern char __entry_text_start[];
extern char __entry_text_end[];
extern char __hyp_idmap_text_start[];
extern char __hyp_idmap_text_end[];
static inline bool in_entry_text(unsigned long addr)
{
return memory_contains(__entry_text_start, __entry_text_end,
(void *)addr, 1);
}
static inline bool in_idmap_text(unsigned long addr)
{
void *a = (void *)addr;
return memory_contains(__idmap_text_start, __idmap_text_end, a, 1) ||
memory_contains(__hyp_idmap_text_start, __hyp_idmap_text_end,
a, 1);
}
#endif /* _ASM_ARM_SECTIONS_H */

View File

@ -39,18 +39,4 @@ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
return __memset64(p, v, n * 8, v >> 32);
}
extern void __memzero(void *ptr, __kernel_size_t n);
#define memset(p,v,n) \
({ \
void *__p = (p); size_t __n = n; \
if ((__n) != 0) { \
if (__builtin_constant_p((v)) && (v) == 0) \
__memzero((__p),(__n)); \
else \
memset((__p),(v),(__n)); \
} \
(__p); \
})
#endif

View File

@ -28,18 +28,6 @@ static inline int __in_irqentry_text(unsigned long ptr)
ptr < (unsigned long)&__irqentry_text_end;
}
static inline int in_exception_text(unsigned long ptr)
{
extern char __exception_text_start[];
extern char __exception_text_end[];
int in;
in = ptr >= (unsigned long)&__exception_text_start &&
ptr < (unsigned long)&__exception_text_end;
return in ? : __in_irqentry_text(ptr);
}
extern void __init early_trap_init(void *);
extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);

View File

@ -20,8 +20,10 @@
#ifndef __ASM_UNIFIED_H
#define __ASM_UNIFIED_H
#if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED)
#if defined(__ASSEMBLY__)
.syntax unified
#else
__asm__(".syntax unified");
#endif
#ifdef CONFIG_CPU_V7M
@ -64,77 +66,4 @@
#endif /* CONFIG_THUMB2_KERNEL */
#ifndef CONFIG_ARM_ASM_UNIFIED
/*
* If the unified assembly syntax isn't used (in ARM mode), these
* macros expand to an empty string
*/
#ifdef __ASSEMBLY__
.macro it, cond
.endm
.macro itt, cond
.endm
.macro ite, cond
.endm
.macro ittt, cond
.endm
.macro itte, cond
.endm
.macro itet, cond
.endm
.macro itee, cond
.endm
.macro itttt, cond
.endm
.macro ittte, cond
.endm
.macro ittet, cond
.endm
.macro ittee, cond
.endm
.macro itett, cond
.endm
.macro itete, cond
.endm
.macro iteet, cond
.endm
.macro iteee, cond
.endm
#else /* !__ASSEMBLY__ */
__asm__(
" .macro it, cond\n"
" .endm\n"
" .macro itt, cond\n"
" .endm\n"
" .macro ite, cond\n"
" .endm\n"
" .macro ittt, cond\n"
" .endm\n"
" .macro itte, cond\n"
" .endm\n"
" .macro itet, cond\n"
" .endm\n"
" .macro itee, cond\n"
" .endm\n"
" .macro itttt, cond\n"
" .endm\n"
" .macro ittte, cond\n"
" .endm\n"
" .macro ittet, cond\n"
" .endm\n"
" .macro ittee, cond\n"
" .endm\n"
" .macro itett, cond\n"
" .endm\n"
" .macro itete, cond\n"
" .endm\n"
" .macro iteet, cond\n"
" .endm\n"
" .macro iteee, cond\n"
" .endm\n");
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_ARM_ASM_UNIFIED */
#endif /* !__ASM_UNIFIED_H */

View File

@ -92,7 +92,6 @@ EXPORT_SYMBOL(__memset64);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
EXPORT_SYMBOL(mmioset);
EXPORT_SYMBOL(mmiocpy);

View File

@ -82,11 +82,7 @@
#endif
.endm
#ifdef CONFIG_KPROBES
.section .kprobes.text,"ax",%progbits
#else
.text
#endif
.section .entry.text,"ax",%progbits
/*
* Invalid mode handlers

View File

@ -37,6 +37,7 @@ saved_pc .req lr
#define TRACE(x...)
#endif
.section .entry.text,"ax",%progbits
.align 5
#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
/*

View File

@ -105,8 +105,9 @@ __mmap_switched:
ARM( ldmia r4!, {r0, r1, sp} )
THUMB( ldmia r4!, {r0, r1, r3} )
THUMB( mov sp, r3 )
sub r1, r1, r0
bl __memzero @ clear .bss
sub r2, r1, r0
mov r1, #0
bl memset @ clear .bss
ldmia r4, {r0, r1, r2, r3}
str r9, [r0] @ Save processor ID

View File

@ -44,17 +44,17 @@ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
/* Number of BRP/WRP registers on this CPU. */
static int core_num_brps;
static int core_num_wrps;
static int core_num_brps __ro_after_init;
static int core_num_wrps __ro_after_init;
/* Debug architecture version. */
static u8 debug_arch;
static u8 debug_arch __ro_after_init;
/* Does debug architecture support OS Save and Restore? */
static bool has_ossr;
static bool has_ossr __ro_after_init;
/* Maximum supported watchpoint length. */
static u8 max_watchpoint_len;
static u8 max_watchpoint_len __ro_after_init;
#define READ_WB_REG_CASE(OP2, M, VAL) \
case ((OP2 << 4) + M): \

View File

@ -379,6 +379,9 @@ asmlinkage void secondary_start_kernel(void)
cpu_init();
#ifndef CONFIG_MMU
setup_vectors_base();
#endif
pr_debug("CPU%u: Booted secondary processor\n", cpu);
preempt_disable();

View File

@ -3,6 +3,7 @@
#include <linux/sched/debug.h>
#include <linux/stacktrace.h>
#include <asm/sections.h>
#include <asm/stacktrace.h>
#include <asm/traps.h>
@ -63,7 +64,6 @@ EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
struct stack_trace *trace;
unsigned long last_pc;
unsigned int no_sched_functions;
unsigned int skip;
};
@ -87,16 +87,7 @@ static int save_trace(struct stackframe *frame, void *d)
if (trace->nr_entries >= trace->max_entries)
return 1;
/*
* in_exception_text() is designed to test if the PC is one of
* the functions which has an exception stack above it, but
* unfortunately what is in frame->pc is the return LR value,
* not the saved PC value. So, we need to track the previous
* frame PC value when doing this.
*/
addr = data->last_pc;
data->last_pc = frame->pc;
if (!in_exception_text(addr))
if (!in_entry_text(frame->pc))
return 0;
regs = (struct pt_regs *)frame->sp;
@ -114,7 +105,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
struct stackframe frame;
data.trace = trace;
data.last_pc = ULONG_MAX;
data.skip = trace->skip;
data.no_sched_functions = nosched;

View File

@ -72,7 +72,7 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif
if (in_exception_text(where))
if (in_entry_text(from))
dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
}
@ -433,7 +433,7 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
return fn ? fn(regs, instr) : 1;
}
asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
asmlinkage void do_undefinstr(struct pt_regs *regs)
{
unsigned int instr;
siginfo_t info;

View File

@ -96,9 +96,9 @@ SECTIONS
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
IDMAP_TEXT
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
__entry_text_start = .;
*(.entry.text)
__entry_text_end = .;
IRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT

View File

@ -105,9 +105,9 @@ SECTIONS
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
IDMAP_TEXT
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
__entry_text_start = .;
*(.entry.text)
__entry_text_end = .;
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
TEXT_TEXT

View File

@ -8,7 +8,7 @@
lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
delay.o delay-loop.o findbit.o memchr.o memcpy.o \
memmove.o memset.o memzero.o setbit.o \
memmove.o memset.o setbit.o \
strchr.o strrchr.o \
testchangebit.o testclearbit.o testsetbit.o \
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \

View File

@ -1,137 +0,0 @@
/*
* linux/arch/arm/lib/memzero.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
.text
.align 5
.word 0
/*
* Align the pointer in r0. r3 contains the number of bytes that we are
* mis-aligned by, and r1 is the number of bytes. If r1 < 4, then we
* don't bother; we use byte stores instead.
*/
UNWIND( .fnstart )
1: subs r1, r1, #4 @ 1 do we have enough
blt 5f @ 1 bytes to align with?
cmp r3, #2 @ 1
strltb r2, [r0], #1 @ 1
strleb r2, [r0], #1 @ 1
strb r2, [r0], #1 @ 1
add r1, r1, r3 @ 1 (r1 = r1 - (4 - r3))
/*
* The pointer is now aligned and the length is adjusted. Try doing the
* memzero again.
*/
ENTRY(__memzero)
mov r2, #0 @ 1
ands r3, r0, #3 @ 1 unaligned?
bne 1b @ 1
/*
* r3 = 0, and we know that the pointer in r0 is aligned to a word boundary.
*/
cmp r1, #16 @ 1 we can skip this chunk if we
blt 4f @ 1 have < 16 bytes
#if ! CALGN(1)+0
/*
* We need an extra register for this loop - save the return address and
* use the LR
*/
str lr, [sp, #-4]! @ 1
UNWIND( .fnend )
UNWIND( .fnstart )
UNWIND( .save {lr} )
mov ip, r2 @ 1
mov lr, r2 @ 1
3: subs r1, r1, #64 @ 1 write 32 bytes out per loop
stmgeia r0!, {r2, r3, ip, lr} @ 4
stmgeia r0!, {r2, r3, ip, lr} @ 4
stmgeia r0!, {r2, r3, ip, lr} @ 4
stmgeia r0!, {r2, r3, ip, lr} @ 4
bgt 3b @ 1
ldmeqfd sp!, {pc} @ 1/2 quick exit
/*
* No need to correct the count; we're only testing bits from now on
*/
tst r1, #32 @ 1
stmneia r0!, {r2, r3, ip, lr} @ 4
stmneia r0!, {r2, r3, ip, lr} @ 4
tst r1, #16 @ 1 16 bytes or more?
stmneia r0!, {r2, r3, ip, lr} @ 4
ldr lr, [sp], #4 @ 1
UNWIND( .fnend )
#else
/*
* This version aligns the destination pointer in order to write
* whole cache lines at once.
*/
stmfd sp!, {r4-r7, lr}
UNWIND( .fnend )
UNWIND( .fnstart )
UNWIND( .save {r4-r7, lr} )
mov r4, r2
mov r5, r2
mov r6, r2
mov r7, r2
mov ip, r2
mov lr, r2
cmp r1, #96
andgts ip, r0, #31
ble 3f
rsb ip, ip, #32
sub r1, r1, ip
movs ip, ip, lsl #(32 - 4)
stmcsia r0!, {r4, r5, r6, r7}
stmmiia r0!, {r4, r5}
movs ip, ip, lsl #2
strcs r2, [r0], #4
3: subs r1, r1, #64
stmgeia r0!, {r2-r7, ip, lr}
stmgeia r0!, {r2-r7, ip, lr}
bgt 3b
ldmeqfd sp!, {r4-r7, pc}
tst r1, #32
stmneia r0!, {r2-r7, ip, lr}
tst r1, #16
stmneia r0!, {r4-r7}
ldmfd sp!, {r4-r7, lr}
UNWIND( .fnend )
#endif
UNWIND( .fnstart )
4: tst r1, #8 @ 1 8 bytes or more?
stmneia r0!, {r2, r3} @ 2
tst r1, #4 @ 1 4 bytes or more?
strne r2, [r0], #4 @ 1
/*
* When we get here, we've got less than 4 bytes to zero. We
* may have an unaligned pointer as well.
*/
5: tst r1, #2 @ 1 2 bytes or more?
strneb r2, [r0], #1 @ 1
strneb r2, [r0], #1 @ 1
tst r1, #1 @ 1 a byte left over
strneb r2, [r0], #1 @ 1
ret lr @ 1
UNWIND( .fnend )
ENDPROC(__memzero)

View File

@ -5,6 +5,7 @@ menu "SA11x0 Implementations"
config SA1100_ASSABET
bool "Assabet"
select ARM_SA1110_CPUFREQ
select GPIO_REG
help
Say Y here if you are using the Intel(R) StrongARM(R) SA-1110
Microprocessor Development Board (also known as the Assabet).

View File

@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/gpio/gpio-reg.h>
#include <linux/ioport.h>
#include <linux/platform_data/sa11x0-serial.h>
#include <linux/serial_core.h>
@ -61,20 +62,45 @@
unsigned long SCR_value = ASSABET_SCR_INIT;
EXPORT_SYMBOL(SCR_value);
static unsigned long BCR_value = ASSABET_BCR_DB1110;
static struct gpio_chip *assabet_bcr_gc;
static const char *assabet_names[] = {
"cf_pwr", "cf_gfx_reset", "nsoft_reset", "irda_fsel",
"irda_md0", "irda_md1", "stereo_loopback", "ncf_bus_on",
"audio_pwr_on", "light_pwr_on", "lcd16data", "lcd_pwr_on",
"rs232_on", "nred_led", "ngreen_led", "vib_on",
"com_dtr", "com_rts", "radio_wake_mod", "i2c_enab",
"tvir_enab", "qmute", "radio_pwr_on", "spkr_off",
"rs232_valid", "com_dcd", "com_cts", "com_dsr",
"radio_cts", "radio_dsr", "radio_dcd", "radio_ri",
};
/* The old deprecated interface */
void ASSABET_BCR_frob(unsigned int mask, unsigned int val)
{
unsigned long flags;
unsigned long m = mask, v = val;
local_irq_save(flags);
BCR_value = (BCR_value & ~mask) | val;
ASSABET_BCR = BCR_value;
local_irq_restore(flags);
assabet_bcr_gc->set_multiple(assabet_bcr_gc, &m, &v);
}
EXPORT_SYMBOL(ASSABET_BCR_frob);
static int __init assabet_init_gpio(void __iomem *reg, u32 def_val)
{
struct gpio_chip *gc;
writel_relaxed(def_val, reg);
gc = gpio_reg_init(NULL, reg, -1, 32, "assabet", 0xff000000, def_val,
assabet_names, NULL, NULL);
if (IS_ERR(gc))
return PTR_ERR(gc);
assabet_bcr_gc = gc;
return gc->base;
}
/*
* The codec reset goes to three devices, so we need to release
* the rest when any one of these requests it. However, that
@ -146,7 +172,7 @@ static void adv7171_write(unsigned reg, unsigned val)
unsigned gpdr = GPDR;
unsigned gplr = GPLR;
ASSABET_BCR = BCR_value | ASSABET_BCR_AUDIO_ON;
ASSABET_BCR_frob(ASSABET_BCR_AUDIO_ON, ASSABET_BCR_AUDIO_ON);
udelay(100);
GPCR = SDA | SCK | MOD; /* clear L3 mode to ensure UDA1341 doesn't respond */
@ -457,14 +483,6 @@ static void __init assabet_init(void)
sa11x0_ppc_configure_mcp();
if (machine_has_neponset()) {
/*
* Angel sets this, but other bootloaders may not.
*
* This must precede any driver calls to BCR_set()
* or BCR_clear().
*/
ASSABET_BCR = BCR_value = ASSABET_BCR_DB1111;
#ifndef CONFIG_ASSABET_NEPONSET
printk( "Warning: Neponset detected but full support "
"hasn't been configured in the kernel\n" );
@ -748,12 +766,31 @@ static int __init assabet_leds_init(void)
fs_initcall(assabet_leds_init);
#endif
void __init assabet_init_irq(void)
{
u32 def_val;
sa1100_init_irq();
if (machine_has_neponset())
def_val = ASSABET_BCR_DB1111;
else
def_val = ASSABET_BCR_DB1110;
/*
* Angel sets this, but other bootloaders may not.
*
* This must precede any driver calls to BCR_set() or BCR_clear().
*/
assabet_init_gpio((void *)&ASSABET_BCR, def_val);
}
MACHINE_START(ASSABET, "Intel-Assabet")
.atag_offset = 0x100,
.fixup = fixup_assabet,
.map_io = assabet_map_io,
.nr_irqs = SA1100_NR_IRQS,
.init_irq = sa1100_init_irq,
.init_irq = assabet_init_irq,
.init_time = sa1100_timer_init,
.init_machine = assabet_init,
.init_late = sa11x0_init_late,

View File

@ -3,6 +3,8 @@
* linux/arch/arm/mach-sa1100/neponset.c
*/
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/gpio-reg.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/irq.h>
@ -45,10 +47,13 @@
#define IRR_USAR (1 << 1)
#define IRR_SA1111 (1 << 2)
#define NCR_NGPIO 7
#define MDM_CTL0_RTS1 (1 << 0)
#define MDM_CTL0_DTR1 (1 << 1)
#define MDM_CTL0_RTS2 (1 << 2)
#define MDM_CTL0_DTR2 (1 << 3)
#define MDM_CTL0_NGPIO 4
#define MDM_CTL1_CTS1 (1 << 0)
#define MDM_CTL1_DSR1 (1 << 1)
@ -56,80 +61,87 @@
#define MDM_CTL1_CTS2 (1 << 3)
#define MDM_CTL1_DSR2 (1 << 4)
#define MDM_CTL1_DCD2 (1 << 5)
#define MDM_CTL1_NGPIO 6
#define AUD_SEL_1341 (1 << 0)
#define AUD_MUTE_1341 (1 << 1)
#define AUD_NGPIO 2
extern void sa1110_mb_disable(void);
#define to_neponset_gpio_chip(x) container_of(x, struct neponset_gpio_chip, gc)
static const char *neponset_ncr_names[] = {
"gp01_off", "tp_power", "ms_power", "enet_osc",
"spi_kb_wk_up", "a0vpp", "a1vpp"
};
static const char *neponset_mdmctl0_names[] = {
"rts3", "dtr3", "rts1", "dtr1",
};
static const char *neponset_mdmctl1_names[] = {
"cts3", "dsr3", "dcd3", "cts1", "dsr1", "dcd1"
};
static const char *neponset_aud_names[] = {
"sel_1341", "mute_1341",
};
struct neponset_drvdata {
void __iomem *base;
struct platform_device *sa1111;
struct platform_device *smc91x;
unsigned irq_base;
#ifdef CONFIG_PM_SLEEP
u32 ncr0;
u32 mdm_ctl_0;
#endif
struct gpio_chip *gpio[4];
};
static void __iomem *nep_base;
static struct neponset_drvdata *nep;
void neponset_ncr_frob(unsigned int mask, unsigned int val)
{
void __iomem *base = nep_base;
struct neponset_drvdata *n = nep;
unsigned long m = mask, v = val;
if (base) {
unsigned long flags;
unsigned v;
local_irq_save(flags);
v = readb_relaxed(base + NCR_0);
writeb_relaxed((v & ~mask) | val, base + NCR_0);
local_irq_restore(flags);
} else {
WARN(1, "nep_base unset\n");
}
if (nep)
n->gpio[0]->set_multiple(n->gpio[0], &m, &v);
else
WARN(1, "nep unset\n");
}
EXPORT_SYMBOL(neponset_ncr_frob);
static void neponset_set_mctrl(struct uart_port *port, u_int mctrl)
{
void __iomem *base = nep_base;
u_int mdm_ctl0;
struct neponset_drvdata *n = nep;
unsigned long mask, val = 0;
if (!base)
if (!n)
return;
mdm_ctl0 = readb_relaxed(base + MDM_CTL_0);
if (port->mapbase == _Ser1UTCR0) {
if (mctrl & TIOCM_RTS)
mdm_ctl0 &= ~MDM_CTL0_RTS2;
else
mdm_ctl0 |= MDM_CTL0_RTS2;
mask = MDM_CTL0_RTS2 | MDM_CTL0_DTR2;
if (mctrl & TIOCM_DTR)
mdm_ctl0 &= ~MDM_CTL0_DTR2;
else
mdm_ctl0 |= MDM_CTL0_DTR2;
if (!(mctrl & TIOCM_RTS))
val |= MDM_CTL0_RTS2;
if (!(mctrl & TIOCM_DTR))
val |= MDM_CTL0_DTR2;
} else if (port->mapbase == _Ser3UTCR0) {
if (mctrl & TIOCM_RTS)
mdm_ctl0 &= ~MDM_CTL0_RTS1;
else
mdm_ctl0 |= MDM_CTL0_RTS1;
mask = MDM_CTL0_RTS1 | MDM_CTL0_DTR1;
if (mctrl & TIOCM_DTR)
mdm_ctl0 &= ~MDM_CTL0_DTR1;
else
mdm_ctl0 |= MDM_CTL0_DTR1;
if (!(mctrl & TIOCM_RTS))
val |= MDM_CTL0_RTS1;
if (!(mctrl & TIOCM_DTR))
val |= MDM_CTL0_DTR1;
}
writeb_relaxed(mdm_ctl0, base + MDM_CTL_0);
n->gpio[1]->set_multiple(n->gpio[1], &mask, &val);
}
static u_int neponset_get_mctrl(struct uart_port *port)
{
void __iomem *base = nep_base;
void __iomem *base = nep->base;
u_int ret = TIOCM_CD | TIOCM_CTS | TIOCM_DSR;
u_int mdm_ctl1;
@ -231,6 +243,22 @@ static struct irq_chip nochip = {
.irq_unmask = nochip_noop,
};
static int neponset_init_gpio(struct gpio_chip **gcp,
struct device *dev, const char *label, void __iomem *reg,
unsigned num, bool in, const char *const * names)
{
struct gpio_chip *gc;
gc = gpio_reg_init(dev, reg, -1, num, label, in ? 0xffffffff : 0,
readl_relaxed(reg), names, NULL, NULL);
if (IS_ERR(gc))
return PTR_ERR(gc);
*gcp = gc;
return 0;
}
static struct sa1111_platform_data sa1111_info = {
.disable_devs = SA1111_DEVID_PS2_MSE,
};
@ -274,7 +302,7 @@ static int neponset_probe(struct platform_device *dev)
};
int ret, irq;
if (nep_base)
if (nep)
return -EBUSY;
irq = ret = platform_get_irq(dev, 0);
@ -330,6 +358,22 @@ static int neponset_probe(struct platform_device *dev)
irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
irq_set_chained_handler_and_data(irq, neponset_irq_handler, d);
/* Disable GPIO 0/1 drivers so the buttons work on the Assabet */
writeb_relaxed(NCR_GP01_OFF, d->base + NCR_0);
neponset_init_gpio(&d->gpio[0], &dev->dev, "neponset-ncr",
d->base + NCR_0, NCR_NGPIO, false,
neponset_ncr_names);
neponset_init_gpio(&d->gpio[1], &dev->dev, "neponset-mdm-ctl0",
d->base + MDM_CTL_0, MDM_CTL0_NGPIO, false,
neponset_mdmctl0_names);
neponset_init_gpio(&d->gpio[2], &dev->dev, "neponset-mdm-ctl1",
d->base + MDM_CTL_1, MDM_CTL1_NGPIO, true,
neponset_mdmctl1_names);
neponset_init_gpio(&d->gpio[3], &dev->dev, "neponset-aud-ctl",
d->base + AUD_CTL, AUD_NGPIO, false,
neponset_aud_names);
/*
* We would set IRQ_GPIO25 to be a wake-up IRQ, but unfortunately
* something on the Neponset activates this IRQ on sleep (eth?)
@ -340,16 +384,13 @@ static int neponset_probe(struct platform_device *dev)
dev_info(&dev->dev, "Neponset daughter board, providing IRQ%u-%u\n",
d->irq_base, d->irq_base + NEP_IRQ_NR - 1);
nep_base = d->base;
nep = d;
sa1100_register_uart_fns(&neponset_port_fns);
/* Ensure that the memory bus request/grant signals are setup */
sa1110_mb_disable();
/* Disable GPIO 0/1 drivers so the buttons work on the Assabet */
writeb_relaxed(NCR_GP01_OFF, d->base + NCR_0);
sa1111_resources[0].parent = sa1111_res;
sa1111_resources[1].start = d->irq_base + NEP_IRQ_SA1111;
sa1111_resources[1].end = d->irq_base + NEP_IRQ_SA1111;
@ -385,7 +426,7 @@ static int neponset_remove(struct platform_device *dev)
platform_device_unregister(d->smc91x);
irq_set_chained_handler(irq, NULL);
irq_free_descs(d->irq_base, NEP_IRQ_NR);
nep_base = NULL;
nep = NULL;
iounmap(d->base);
kfree(d);
@ -393,30 +434,22 @@ static int neponset_remove(struct platform_device *dev)
}
#ifdef CONFIG_PM_SLEEP
static int neponset_suspend(struct device *dev)
{
struct neponset_drvdata *d = dev_get_drvdata(dev);
d->ncr0 = readb_relaxed(d->base + NCR_0);
d->mdm_ctl_0 = readb_relaxed(d->base + MDM_CTL_0);
return 0;
}
static int neponset_resume(struct device *dev)
{
struct neponset_drvdata *d = dev_get_drvdata(dev);
int i, ret = 0;
writeb_relaxed(d->ncr0, d->base + NCR_0);
writeb_relaxed(d->mdm_ctl_0, d->base + MDM_CTL_0);
for (i = 0; i < ARRAY_SIZE(d->gpio); i++) {
ret = gpio_reg_resume(d->gpio[i]);
if (ret)
break;
}
return 0;
return ret;
}
static const struct dev_pm_ops neponset_pm_ops = {
.suspend_noirq = neponset_suspend,
.resume_noirq = neponset_resume,
.freeze_noirq = neponset_suspend,
.restore_noirq = neponset_resume,
};
#define PM_OPS &neponset_pm_ops

View File

@ -909,6 +909,14 @@ config OUTER_CACHE_SYNC
The outer cache has a outer_cache_fns.sync function pointer
that can be used to drain the write buffer of the outer cache.
config CACHE_B15_RAC
bool "Enable the Broadcom Brahma-B15 read-ahead cache controller"
depends on ARCH_BRCMSTB
default y
help
This option enables the Broadcom Brahma-B15 read-ahead cache
controller. If disabled, the read-ahead cache remains off.
config CACHE_FEROCEON_L2
bool "Enable the Feroceon L2 cache controller"
depends on ARCH_MV78XX0 || ARCH_MVEBU

View File

@ -13,7 +13,8 @@ obj-y += nommu.o
obj-$(CONFIG_ARM_MPU) += pmsa-v7.o
endif
obj-$(CONFIG_ARM_PTDUMP) += dump.o
obj-$(CONFIG_ARM_PTDUMP_CORE) += dump.o
obj-$(CONFIG_ARM_PTDUMP_DEBUGFS) += ptdump_debugfs.o
obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
@ -103,6 +104,7 @@ AFLAGS_proc-v6.o :=-Wa,-march=armv6
AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
obj-$(CONFIG_OUTER_CACHE) += l2c-common.o
obj-$(CONFIG_CACHE_B15_RAC) += cache-b15-rac.o
obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o
obj-$(CONFIG_CACHE_L2X0_PMU) += cache-l2x0-pmu.o

View File

@ -0,0 +1,356 @@
/*
* Broadcom Brahma-B15 CPU read-ahead cache management functions
*
* Copyright (C) 2015-2016 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/of_address.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <asm/cacheflush.h>
#include <asm/hardware/cache-b15-rac.h>
extern void v7_flush_kern_cache_all(void);
/* RAC register offsets, relative to the HIF_CPU_BIUCTRL register base */
#define RAC_CONFIG0_REG (0x78)
#define RACENPREF_MASK (0x3)
#define RACPREFINST_SHIFT (0)
#define RACENINST_SHIFT (2)
#define RACPREFDATA_SHIFT (4)
#define RACENDATA_SHIFT (6)
#define RAC_CPU_SHIFT (8)
#define RACCFG_MASK (0xff)
#define RAC_CONFIG1_REG (0x7c)
#define RAC_FLUSH_REG (0x80)
#define FLUSH_RAC (1 << 0)
/* Bitmask to enable instruction and data prefetching with a 256-bytes stride */
#define RAC_DATA_INST_EN_MASK (1 << RACPREFINST_SHIFT | \
RACENPREF_MASK << RACENINST_SHIFT | \
1 << RACPREFDATA_SHIFT | \
RACENPREF_MASK << RACENDATA_SHIFT)
#define RAC_ENABLED 0
/* Special state where we want to bypass the spinlock and call directly
* into the v7 cache maintenance operations during suspend/resume
*/
#define RAC_SUSPENDED 1
static void __iomem *b15_rac_base;
static DEFINE_SPINLOCK(rac_lock);
static u32 rac_config0_reg;
/* Initialization flag to avoid checking for b15_rac_base, and to prevent
* multi-platform kernels from crashing here as well.
*/
static unsigned long b15_rac_flags;
static inline u32 __b15_rac_disable(void)
{
u32 val = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
__raw_writel(0, b15_rac_base + RAC_CONFIG0_REG);
dmb();
return val;
}
static inline void __b15_rac_flush(void)
{
u32 reg;
__raw_writel(FLUSH_RAC, b15_rac_base + RAC_FLUSH_REG);
do {
/* This dmb() is required to force the Bus Interface Unit
* to clean oustanding writes, and forces an idle cycle
* to be inserted.
*/
dmb();
reg = __raw_readl(b15_rac_base + RAC_FLUSH_REG);
} while (reg & FLUSH_RAC);
}
static inline u32 b15_rac_disable_and_flush(void)
{
u32 reg;
reg = __b15_rac_disable();
__b15_rac_flush();
return reg;
}
static inline void __b15_rac_enable(u32 val)
{
__raw_writel(val, b15_rac_base + RAC_CONFIG0_REG);
/* dsb() is required here to be consistent with __flush_icache_all() */
dsb();
}
#define BUILD_RAC_CACHE_OP(name, bar) \
void b15_flush_##name(void) \
{ \
unsigned int do_flush; \
u32 val = 0; \
\
if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) { \
v7_flush_##name(); \
bar; \
return; \
} \
\
spin_lock(&rac_lock); \
do_flush = test_bit(RAC_ENABLED, &b15_rac_flags); \
if (do_flush) \
val = b15_rac_disable_and_flush(); \
v7_flush_##name(); \
if (!do_flush) \
bar; \
else \
__b15_rac_enable(val); \
spin_unlock(&rac_lock); \
}
#define nobarrier
/* The readahead cache present in the Brahma-B15 CPU is a special piece of
* hardware after the integrated L2 cache of the B15 CPU complex whose purpose
* is to prefetch instruction and/or data with a line size of either 64 bytes
* or 256 bytes. The rationale is that the data-bus of the CPU interface is
* optimized for 256-bytes transactions, and enabling the readahead cache
* provides a significant performance boost we want it enabled (typically
* twice the performance for a memcpy benchmark application).
*
* The readahead cache is transparent for Modified Virtual Addresses
* cache maintenance operations: ICIMVAU, DCIMVAC, DCCMVAC, DCCMVAU and
* DCCIMVAC.
*
* It is however not transparent for the following cache maintenance
* operations: DCISW, DCCSW, DCCISW, ICIALLUIS and ICIALLU which is precisely
* what we are patching here with our BUILD_RAC_CACHE_OP here.
*/
BUILD_RAC_CACHE_OP(kern_cache_all, nobarrier);
static void b15_rac_enable(void)
{
unsigned int cpu;
u32 enable = 0;
for_each_possible_cpu(cpu)
enable |= (RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT));
b15_rac_disable_and_flush();
__b15_rac_enable(enable);
}
static int b15_rac_reboot_notifier(struct notifier_block *nb,
unsigned long action,
void *data)
{
/* During kexec, we are not yet migrated on the boot CPU, so we need to
* make sure we are SMP safe here. Once the RAC is disabled, flag it as
* suspended such that the hotplug notifier returns early.
*/
if (action == SYS_RESTART) {
spin_lock(&rac_lock);
b15_rac_disable_and_flush();
clear_bit(RAC_ENABLED, &b15_rac_flags);
set_bit(RAC_SUSPENDED, &b15_rac_flags);
spin_unlock(&rac_lock);
}
return NOTIFY_DONE;
}
static struct notifier_block b15_rac_reboot_nb = {
.notifier_call = b15_rac_reboot_notifier,
};
/* The CPU hotplug case is the most interesting one, we basically need to make
* sure that the RAC is disabled for the entire system prior to having a CPU
* die, in particular prior to this dying CPU having exited the coherency
* domain.
*
* Once this CPU is marked dead, we can safely re-enable the RAC for the
* remaining CPUs in the system which are still online.
*
* Offlining a CPU is the problematic case, onlining a CPU is not much of an
* issue since the CPU and its cache-level hierarchy will start filling with
* the RAC disabled, so L1 and L2 only.
*
* In this function, we should NOT have to verify any unsafe setting/condition
* b15_rac_base:
*
* It is protected by the RAC_ENABLED flag which is cleared by default, and
* being cleared when initial procedure is done. b15_rac_base had been set at
* that time.
*
* RAC_ENABLED:
* There is a small timing windows, in b15_rac_init(), between
* cpuhp_setup_state_*()
* ...
* set RAC_ENABLED
* However, there is no hotplug activity based on the Linux booting procedure.
*
* Since we have to disable RAC for all cores, we keep RAC on as long as as
* possible (disable it as late as possible) to gain the cache benefit.
*
* Thus, dying/dead states are chosen here
*
* We are choosing not do disable the RAC on a per-CPU basis, here, if we did
* we would want to consider disabling it as early as possible to benefit the
* other active CPUs.
*/
/* Running on the dying CPU */
static int b15_rac_dying_cpu(unsigned int cpu)
{
/* During kexec/reboot, the RAC is disabled via the reboot notifier
* return early here.
*/
if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
return 0;
spin_lock(&rac_lock);
/* Indicate that we are starting a hotplug procedure */
__clear_bit(RAC_ENABLED, &b15_rac_flags);
/* Disable the readahead cache and save its value to a global */
rac_config0_reg = b15_rac_disable_and_flush();
spin_unlock(&rac_lock);
return 0;
}
/* Running on a non-dying CPU */
static int b15_rac_dead_cpu(unsigned int cpu)
{
/* During kexec/reboot, the RAC is disabled via the reboot notifier
* return early here.
*/
if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
return 0;
spin_lock(&rac_lock);
/* And enable it */
__b15_rac_enable(rac_config0_reg);
__set_bit(RAC_ENABLED, &b15_rac_flags);
spin_unlock(&rac_lock);
return 0;
}
static int b15_rac_suspend(void)
{
/* Suspend the read-ahead cache oeprations, forcing our cache
* implementation to fallback to the regular ARMv7 calls.
*
* We are guaranteed to be running on the boot CPU at this point and
* with every other CPU quiesced, so setting RAC_SUSPENDED is not racy
* here.
*/
rac_config0_reg = b15_rac_disable_and_flush();
set_bit(RAC_SUSPENDED, &b15_rac_flags);
return 0;
}
static void b15_rac_resume(void)
{
/* Coming out of a S3 suspend/resume cycle, the read-ahead cache
* register RAC_CONFIG0_REG will be restored to its default value, make
* sure we re-enable it and set the enable flag, we are also guaranteed
* to run on the boot CPU, so not racy again.
*/
__b15_rac_enable(rac_config0_reg);
clear_bit(RAC_SUSPENDED, &b15_rac_flags);
}
static struct syscore_ops b15_rac_syscore_ops = {
.suspend = b15_rac_suspend,
.resume = b15_rac_resume,
};
static int __init b15_rac_init(void)
{
struct device_node *dn;
int ret = 0, cpu;
u32 reg, en_mask = 0;
dn = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
if (!dn)
return -ENODEV;
if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
goto out;
b15_rac_base = of_iomap(dn, 0);
if (!b15_rac_base) {
pr_err("failed to remap BIU control base\n");
ret = -ENOMEM;
goto out;
}
ret = register_reboot_notifier(&b15_rac_reboot_nb);
if (ret) {
pr_err("failed to register reboot notifier\n");
iounmap(b15_rac_base);
goto out;
}
if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
"arm/cache-b15-rac:dead",
NULL, b15_rac_dead_cpu);
if (ret)
goto out_unmap;
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
"arm/cache-b15-rac:dying",
NULL, b15_rac_dying_cpu);
if (ret)
goto out_cpu_dead;
}
if (IS_ENABLED(CONFIG_PM_SLEEP))
register_syscore_ops(&b15_rac_syscore_ops);
spin_lock(&rac_lock);
reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
for_each_possible_cpu(cpu)
en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT));
WARN(reg & en_mask, "Read-ahead cache not previously disabled\n");
b15_rac_enable();
set_bit(RAC_ENABLED, &b15_rac_flags);
spin_unlock(&rac_lock);
pr_info("Broadcom Brahma-B15 readahead cache at: 0x%p\n",
b15_rac_base + RAC_CONFIG0_REG);
goto out;
out_cpu_dead:
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING);
out_unmap:
unregister_reboot_notifier(&b15_rac_reboot_nb);
iounmap(b15_rac_base);
out:
of_node_put(dn);
return ret;
}
arch_initcall(b15_rac_init);

View File

@ -15,6 +15,7 @@
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/unwind.h>
#include <asm/hardware/cache-b15-rac.h>
#include "proc-macros.S"
@ -446,3 +447,23 @@ ENDPROC(v7_dma_unmap_area)
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v7
/* The Broadcom Brahma-B15 read-ahead cache requires some modifications
* to the v7_cache_fns, we only override the ones we need
*/
#ifndef CONFIG_CACHE_B15_RAC
globl_equ b15_flush_kern_cache_all, v7_flush_kern_cache_all
#endif
globl_equ b15_flush_icache_all, v7_flush_icache_all
globl_equ b15_flush_kern_cache_louis, v7_flush_kern_cache_louis
globl_equ b15_flush_user_cache_all, v7_flush_user_cache_all
globl_equ b15_flush_user_cache_range, v7_flush_user_cache_range
globl_equ b15_coherent_kern_range, v7_coherent_kern_range
globl_equ b15_coherent_user_range, v7_coherent_user_range
globl_equ b15_flush_kern_dcache_area, v7_flush_kern_dcache_area
globl_equ b15_dma_map_area, v7_dma_map_area
globl_equ b15_dma_unmap_area, v7_dma_unmap_area
globl_equ b15_dma_flush_range, v7_dma_flush_range
define_cache_functions b15

View File

@ -21,11 +21,7 @@
#include <asm/fixmap.h>
#include <asm/memory.h>
#include <asm/pgtable.h>
struct addr_marker {
unsigned long start_address;
const char *name;
};
#include <asm/ptdump.h>
static struct addr_marker address_markers[] = {
{ MODULES_VADDR, "Modules" },
@ -38,12 +34,26 @@ static struct addr_marker address_markers[] = {
{ -1, NULL },
};
#define pt_dump_seq_printf(m, fmt, args...) \
({ \
if (m) \
seq_printf(m, fmt, ##args); \
})
#define pt_dump_seq_puts(m, fmt) \
({ \
if (m) \
seq_printf(m, fmt); \
})
struct pg_state {
struct seq_file *seq;
const struct addr_marker *marker;
unsigned long start_address;
unsigned level;
u64 current_prot;
bool check_wx;
unsigned long wx_pages;
const char *current_domain;
};
@ -52,6 +62,8 @@ struct prot_bits {
u64 val;
const char *set;
const char *clear;
bool ro_bit;
bool nx_bit;
};
static const struct prot_bits pte_bits[] = {
@ -65,11 +77,13 @@ static const struct prot_bits pte_bits[] = {
.val = L_PTE_RDONLY,
.set = "ro",
.clear = "RW",
.ro_bit = true,
}, {
.mask = L_PTE_XN,
.val = L_PTE_XN,
.set = "NX",
.clear = "x ",
.nx_bit = true,
}, {
.mask = L_PTE_SHARED,
.val = L_PTE_SHARED,
@ -133,11 +147,13 @@ static const struct prot_bits section_bits[] = {
.val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
.set = "ro",
.clear = "RW",
.ro_bit = true,
#elif __LINUX_ARM_ARCH__ >= 6
{
.mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
.set = " ro",
.ro_bit = true,
}, {
.mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_WRITE,
@ -156,6 +172,7 @@ static const struct prot_bits section_bits[] = {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = 0,
.set = " ro",
.ro_bit = true,
}, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_WRITE,
@ -174,6 +191,7 @@ static const struct prot_bits section_bits[] = {
.val = PMD_SECT_XN,
.set = "NX",
.clear = "x ",
.nx_bit = true,
}, {
.mask = PMD_SECT_S,
.val = PMD_SECT_S,
@ -186,6 +204,8 @@ struct pg_level {
const struct prot_bits *bits;
size_t num;
u64 mask;
const struct prot_bits *ro_bit;
const struct prot_bits *nx_bit;
};
static struct pg_level pg_level[] = {
@ -214,10 +234,27 @@ static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t
s = bits->clear;
if (s)
seq_printf(st->seq, " %s", s);
pt_dump_seq_printf(st->seq, " %s", s);
}
}
static void note_prot_wx(struct pg_state *st, unsigned long addr)
{
if (!st->check_wx)
return;
if ((st->current_prot & pg_level[st->level].ro_bit->mask) ==
pg_level[st->level].ro_bit->val)
return;
if ((st->current_prot & pg_level[st->level].nx_bit->mask) ==
pg_level[st->level].nx_bit->val)
return;
WARN_ONCE(1, "arm/mm: Found insecure W+X mapping at address %pS\n",
(void *)st->start_address);
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
}
static void note_page(struct pg_state *st, unsigned long addr,
unsigned int level, u64 val, const char *domain)
{
@ -228,7 +265,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
st->level = level;
st->current_prot = prot;
st->current_domain = domain;
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
} else if (prot != st->current_prot || level != st->level ||
domain != st->current_domain ||
addr >= st->marker[1].start_address) {
@ -236,7 +273,8 @@ static void note_page(struct pg_state *st, unsigned long addr,
unsigned long delta;
if (st->current_prot) {
seq_printf(st->seq, "0x%08lx-0x%08lx ",
note_prot_wx(st, addr);
pt_dump_seq_printf(st->seq, "0x%08lx-0x%08lx ",
st->start_address, addr);
delta = (addr - st->start_address) >> 10;
@ -244,17 +282,19 @@ static void note_page(struct pg_state *st, unsigned long addr,
delta >>= 10;
unit++;
}
seq_printf(st->seq, "%9lu%c", delta, *unit);
pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
if (st->current_domain)
seq_printf(st->seq, " %s", st->current_domain);
pt_dump_seq_printf(st->seq, " %s",
st->current_domain);
if (pg_level[st->level].bits)
dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
seq_printf(st->seq, "\n");
pt_dump_seq_printf(st->seq, "\n");
}
if (addr >= st->marker[1].start_address) {
st->marker++;
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
st->marker->name);
}
st->start_address = addr;
st->current_prot = prot;
@ -335,61 +375,82 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
}
}
static void walk_pgd(struct seq_file *m)
static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
unsigned long start)
{
pgd_t *pgd = swapper_pg_dir;
struct pg_state st;
unsigned long addr;
pgd_t *pgd = pgd_offset(mm, 0UL);
unsigned i;
memset(&st, 0, sizeof(st));
st.seq = m;
st.marker = address_markers;
unsigned long addr;
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = i * PGDIR_SIZE;
addr = start + i * PGDIR_SIZE;
if (!pgd_none(*pgd)) {
walk_pud(&st, pgd, addr);
walk_pud(st, pgd, addr);
} else {
note_page(&st, addr, 1, pgd_val(*pgd), NULL);
note_page(st, addr, 1, pgd_val(*pgd), NULL);
}
}
}
void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
{
struct pg_state st = {
.seq = m,
.marker = info->markers,
.check_wx = false,
};
walk_pgd(&st, info->mm, info->base_addr);
note_page(&st, 0, 0, 0, NULL);
}
static int ptdump_show(struct seq_file *m, void *v)
static void ptdump_initialize(void)
{
walk_pgd(m);
return 0;
}
static int ptdump_open(struct inode *inode, struct file *file)
{
return single_open(file, ptdump_show, NULL);
}
static const struct file_operations ptdump_fops = {
.open = ptdump_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int ptdump_init(void)
{
struct dentry *pe;
unsigned i, j;
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
if (pg_level[i].bits)
for (j = 0; j < pg_level[i].num; j++)
for (j = 0; j < pg_level[i].num; j++) {
pg_level[i].mask |= pg_level[i].bits[j].mask;
if (pg_level[i].bits[j].ro_bit)
pg_level[i].ro_bit = &pg_level[i].bits[j];
if (pg_level[i].bits[j].nx_bit)
pg_level[i].nx_bit = &pg_level[i].bits[j];
}
address_markers[2].start_address = VMALLOC_START;
}
pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
&ptdump_fops);
return pe ? 0 : -ENOMEM;
static struct ptdump_info kernel_ptdump_info = {
.mm = &init_mm,
.markers = address_markers,
.base_addr = 0,
};
void ptdump_check_wx(void)
{
struct pg_state st = {
.seq = NULL,
.marker = (struct addr_marker[]) {
{ 0, NULL},
{ -1, NULL},
},
.check_wx = true,
};
walk_pgd(&st, &init_mm, 0);
note_page(&st, 0, 0, 0, NULL);
if (st.wx_pages)
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
st.wx_pages);
else
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
}
static int ptdump_init(void)
{
ptdump_initialize();
return ptdump_debugfs_register(&kernel_ptdump_info,
"kernel_page_tables");
}
__initcall(ptdump_init);

View File

@ -21,7 +21,6 @@
#include <linux/highmem.h>
#include <linux/perf_event.h>
#include <asm/exception.h>
#include <asm/pgtable.h>
#include <asm/system_misc.h>
#include <asm/system_info.h>
@ -545,7 +544,7 @@ hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *)
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void __exception
asmlinkage void
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
@ -578,7 +577,7 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
ifsr_info[nr].name = name;
}
asmlinkage void __exception
asmlinkage void
do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
{
const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);

View File

@ -16,8 +16,8 @@
* are not supported on any CPU using the idmap tables as its current
* page tables.
*/
pgd_t *idmap_pgd;
long long arch_phys_to_idmap_offset;
pgd_t *idmap_pgd __ro_after_init;
long long arch_phys_to_idmap_offset __ro_after_init;
#ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,

View File

@ -36,6 +36,7 @@
#include <asm/system_info.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
#include <asm/ptdump.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@ -738,6 +739,7 @@ static int __mark_rodata_ro(void *unused)
void mark_rodata_ro(void)
{
stop_machine(__mark_rodata_ro, NULL, NULL);
debug_checkwx();
}
void set_kernel_text_rw(void)

View File

@ -31,7 +31,7 @@ struct mpu_rgn_info mpu_rgn_info;
#ifdef CONFIG_CPU_CP15
#ifdef CONFIG_CPU_HIGH_VECTOR
static unsigned long __init setup_vectors_base(void)
unsigned long setup_vectors_base(void)
{
unsigned long reg = get_cr();
@ -57,7 +57,7 @@ static inline bool security_extensions_enabled(void)
return 0;
}
static unsigned long __init setup_vectors_base(void)
unsigned long setup_vectors_base(void)
{
unsigned long base = 0, reg = get_cr();

View File

@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include <linux/memblock.h>
#include <linux/string.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
@ -296,6 +297,7 @@ void __init adjust_lowmem_bounds_mpu(void)
}
}
memset(mem, 0, sizeof(mem));
num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
for (i = 0; i < num; i++) {
@ -433,7 +435,7 @@ void __init mpu_setup(void)
/* Background */
err |= mpu_setup_region(region++, 0, 32,
MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA,
MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0RW,
0, false);
#ifdef CONFIG_XIP_KERNEL

View File

@ -567,7 +567,7 @@ __v7_setup_stack:
/*
* Standard v7 proc info content
*/
.macro __v7_proc name, initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions
.macro __v7_proc name, initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions, cache_fns = v7_cache_fns
ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
@ -583,7 +583,7 @@ __v7_setup_stack:
.long \proc_fns
.long v7wbi_tlb_fns
.long v6_user_fns
.long v7_cache_fns
.long \cache_fns
.endm
#ifndef CONFIG_ARM_LPAE
@ -678,7 +678,7 @@ __v7_ca15mp_proc_info:
__v7_b15mp_proc_info:
.long 0x420f00f0
.long 0xff0ffff0
__v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup
__v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, cache_fns = b15_cache_fns
.size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
/*

View File

@ -0,0 +1,34 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/ptdump.h>
static int ptdump_show(struct seq_file *m, void *v)
{
struct ptdump_info *info = m->private;
ptdump_walk_pgd(m, info);
return 0;
}
static int ptdump_open(struct inode *inode, struct file *file)
{
return single_open(file, ptdump_show, inode->i_private);
}
static const struct file_operations ptdump_fops = {
.open = ptdump_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
int ptdump_debugfs_register(struct ptdump_info *info, const char *name)
{
struct dentry *pe;
pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
return pe ? 0 : -ENOMEM;
}

View File

@ -32,6 +32,7 @@
#include <linux/percpu.h>
#include <linux/bug.h>
#include <asm/patch.h>
#include <asm/sections.h>
#include "../decode-arm.h"
#include "../decode-thumb.h"
@ -64,9 +65,6 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
int is;
const struct decode_checker **checkers;
if (in_exception_text(addr))
return -EINVAL;
#ifdef CONFIG_THUMB2_KERNEL
thumb = true;
addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
@ -680,3 +678,13 @@ int __init arch_init_kprobes()
#endif
return 0;
}
bool arch_within_kprobe_blacklist(unsigned long addr)
{
void *a = (void *)addr;
return __in_irqentry_text(addr) ||
in_entry_text(addr) ||
in_idmap_text(addr) ||
memory_contains(__kprobes_text_start, __kprobes_text_end, a, 1);
}

View File

@ -765,7 +765,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
break;
__memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
memset(&desc->desc, 0, sizeof(struct dma_async_tx_descriptor));
dma_async_tx_descriptor_init(&desc->desc, chan);
desc->desc.tx_submit = imxdma_tx_submit;
/* txd.flags will be overwritten in prep funcs */

View File

@ -59,6 +59,7 @@ enum cpuhp_state {
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_INTEL_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
@ -138,6 +139,7 @@ enum cpuhp_state {
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
CPUHP_AP_X86_TBOOT_DYING,
CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
CPUHP_AP_ONLINE_IDLE,