1
0
Fork 0

Merge branch 'linus' into locking/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Ingo Molnar 2016-11-11 08:25:07 +01:00
commit 4c8ee71620
746 changed files with 8694 additions and 5839 deletions

View File

@ -1864,10 +1864,11 @@ S: The Netherlands
N: Martin Kepplinger
E: martink@posteo.de
E: martin.kepplinger@theobroma-systems.com
E: martin.kepplinger@ginzinger.com
W: http://www.martinkepplinger.com
D: mma8452 accelerators iio driver
D: Kernel cleanups
D: pegasus_notetaker input driver
D: Kernel fixes and cleanups
S: Garnisonstraße 26
S: 4020 Linz
S: Austria

View File

@ -309,3 +309,4 @@ Version History
with a reshape in progress.
1.9.0 Add support for RAID level takeover/reshape/region size
and set size reduction.
1.9.1 Fix activation of existing RAID 4/10 mapped devices

View File

@ -49,6 +49,7 @@ Optional port properties:
and
- phy-handle: See ethernet.txt file in the same directory.
- phy-mode: See ethernet.txt file in the same directory.
or

View File

@ -6,25 +6,25 @@ System reset
Required properties:
- compatible: should be one of the following:
"socionext,uniphier-sld3-reset" - for PH1-sLD3 SoC.
"socionext,uniphier-ld4-reset" - for PH1-LD4 SoC.
"socionext,uniphier-pro4-reset" - for PH1-Pro4 SoC.
"socionext,uniphier-sld8-reset" - for PH1-sLD8 SoC.
"socionext,uniphier-pro5-reset" - for PH1-Pro5 SoC.
"socionext,uniphier-pxs2-reset" - for ProXstream2/PH1-LD6b SoC.
"socionext,uniphier-ld11-reset" - for PH1-LD11 SoC.
"socionext,uniphier-ld20-reset" - for PH1-LD20 SoC.
"socionext,uniphier-sld3-reset" - for sLD3 SoC.
"socionext,uniphier-ld4-reset" - for LD4 SoC.
"socionext,uniphier-pro4-reset" - for Pro4 SoC.
"socionext,uniphier-sld8-reset" - for sLD8 SoC.
"socionext,uniphier-pro5-reset" - for Pro5 SoC.
"socionext,uniphier-pxs2-reset" - for PXs2/LD6b SoC.
"socionext,uniphier-ld11-reset" - for LD11 SoC.
"socionext,uniphier-ld20-reset" - for LD20 SoC.
- #reset-cells: should be 1.
Example:
sysctrl@61840000 {
compatible = "socionext,uniphier-ld20-sysctrl",
compatible = "socionext,uniphier-ld11-sysctrl",
"simple-mfd", "syscon";
reg = <0x61840000 0x4000>;
reset {
compatible = "socionext,uniphier-ld20-reset";
compatible = "socionext,uniphier-ld11-reset";
#reset-cells = <1>;
};
@ -32,30 +32,30 @@ Example:
};
Media I/O (MIO) reset
---------------------
Media I/O (MIO) reset, SD reset
-------------------------------
Required properties:
- compatible: should be one of the following:
"socionext,uniphier-sld3-mio-reset" - for PH1-sLD3 SoC.
"socionext,uniphier-ld4-mio-reset" - for PH1-LD4 SoC.
"socionext,uniphier-pro4-mio-reset" - for PH1-Pro4 SoC.
"socionext,uniphier-sld8-mio-reset" - for PH1-sLD8 SoC.
"socionext,uniphier-pro5-mio-reset" - for PH1-Pro5 SoC.
"socionext,uniphier-pxs2-mio-reset" - for ProXstream2/PH1-LD6b SoC.
"socionext,uniphier-ld11-mio-reset" - for PH1-LD11 SoC.
"socionext,uniphier-ld20-mio-reset" - for PH1-LD20 SoC.
"socionext,uniphier-sld3-mio-reset" - for sLD3 SoC.
"socionext,uniphier-ld4-mio-reset" - for LD4 SoC.
"socionext,uniphier-pro4-mio-reset" - for Pro4 SoC.
"socionext,uniphier-sld8-mio-reset" - for sLD8 SoC.
"socionext,uniphier-pro5-sd-reset" - for Pro5 SoC.
"socionext,uniphier-pxs2-sd-reset" - for PXs2/LD6b SoC.
"socionext,uniphier-ld11-mio-reset" - for LD11 SoC.
"socionext,uniphier-ld20-sd-reset" - for LD20 SoC.
- #reset-cells: should be 1.
Example:
mioctrl@59810000 {
compatible = "socionext,uniphier-ld20-mioctrl",
compatible = "socionext,uniphier-ld11-mioctrl",
"simple-mfd", "syscon";
reg = <0x59810000 0x800>;
reset {
compatible = "socionext,uniphier-ld20-mio-reset";
compatible = "socionext,uniphier-ld11-mio-reset";
#reset-cells = <1>;
};
@ -68,24 +68,24 @@ Peripheral reset
Required properties:
- compatible: should be one of the following:
"socionext,uniphier-ld4-peri-reset" - for PH1-LD4 SoC.
"socionext,uniphier-pro4-peri-reset" - for PH1-Pro4 SoC.
"socionext,uniphier-sld8-peri-reset" - for PH1-sLD8 SoC.
"socionext,uniphier-pro5-peri-reset" - for PH1-Pro5 SoC.
"socionext,uniphier-pxs2-peri-reset" - for ProXstream2/PH1-LD6b SoC.
"socionext,uniphier-ld11-peri-reset" - for PH1-LD11 SoC.
"socionext,uniphier-ld20-peri-reset" - for PH1-LD20 SoC.
"socionext,uniphier-ld4-peri-reset" - for LD4 SoC.
"socionext,uniphier-pro4-peri-reset" - for Pro4 SoC.
"socionext,uniphier-sld8-peri-reset" - for sLD8 SoC.
"socionext,uniphier-pro5-peri-reset" - for Pro5 SoC.
"socionext,uniphier-pxs2-peri-reset" - for PXs2/LD6b SoC.
"socionext,uniphier-ld11-peri-reset" - for LD11 SoC.
"socionext,uniphier-ld20-peri-reset" - for LD20 SoC.
- #reset-cells: should be 1.
Example:
perictrl@59820000 {
compatible = "socionext,uniphier-ld20-perictrl",
compatible = "socionext,uniphier-ld11-perictrl",
"simple-mfd", "syscon";
reg = <0x59820000 0x200>;
reset {
compatible = "socionext,uniphier-ld20-peri-reset";
compatible = "socionext,uniphier-ld11-peri-reset";
#reset-cells = <1>;
};

View File

@ -1,7 +1,9 @@
Binding for Cadence UART Controller
Required properties:
- compatible : should be "cdns,uart-r1p8", or "xlnx,xuartps"
- compatible :
Use "xlnx,xuartps","cdns,uart-r1p8" for Zynq-7xxx SoC.
Use "xlnx,zynqmp-uart","cdns,uart-r1p12" for Zynq Ultrascale+ MPSoC.
- reg: Should contain UART controller registers location and length.
- interrupts: Should contain UART controller interrupts.
- clocks: Must contain phandles to the UART clocks

View File

@ -9,6 +9,14 @@ Required properties:
- "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
- "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
- "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART.
- "renesas,scif-r8a7743" for R8A7743 (RZ/G1M) SCIF compatible UART.
- "renesas,scifa-r8a7743" for R8A7743 (RZ/G1M) SCIFA compatible UART.
- "renesas,scifb-r8a7743" for R8A7743 (RZ/G1M) SCIFB compatible UART.
- "renesas,hscif-r8a7743" for R8A7743 (RZ/G1M) HSCIF compatible UART.
- "renesas,scif-r8a7745" for R8A7745 (RZ/G1E) SCIF compatible UART.
- "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART.
- "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART.
- "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART.
- "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
- "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
- "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.

View File

@ -28,10 +28,7 @@ Refer to phy/phy-bindings.txt for generic phy consumer properties
- g-use-dma: enable dma usage in gadget driver.
- g-rx-fifo-size: size of rx fifo size in gadget mode.
- g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
Deprecated properties:
- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0)
in gadget mode.
- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
Example:

View File

@ -29,8 +29,8 @@ A: There are always two trees (git repositories) in play. Both are driven
Linus, and net-next is where the new code goes for the future release.
You can find the trees here:
http://git.kernel.org/?p=linux/kernel/git/davem/net.git
http://git.kernel.org/?p=linux/kernel/git/davem/net-next.git
https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
Q: How often do changes from these trees make it to the mainline Linus tree?
@ -76,7 +76,7 @@ Q: So where are we now in this cycle?
A: Load the mainline (Linus) page here:
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
and note the top of the "tags" section. If it is rc1, it is early
in the dev cycle. If it was tagged rc7 a week ago, then a release
@ -123,7 +123,7 @@ A: Normally Greg Kroah-Hartman collects stable commits himself, but
It contains the patches which Dave has selected, but not yet handed
off to Greg. If Greg already has the patch, then it will be here:
http://git.kernel.org/cgit/linux/kernel/git/stable/stable-queue.git
https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
A quick way to find whether the patch is in this stable-queue is
to simply clone the repo, and then git grep the mainline commit ID, e.g.

View File

@ -33,24 +33,6 @@ nf_conntrack_events - BOOLEAN
If this option is enabled, the connection tracking code will
provide userspace with connection tracking events via ctnetlink.
nf_conntrack_events_retry_timeout - INTEGER (seconds)
default 15
This option is only relevant when "reliable connection tracking
events" are used. Normally, ctnetlink is "lossy", that is,
events are normally dropped when userspace listeners can't keep up.
Userspace can request "reliable event mode". When this mode is
active, the conntrack will only be destroyed after the event was
delivered. If event delivery fails, the kernel periodically
re-tries to send the event to userspace.
This is the maximum interval the kernel should use when re-trying
to deliver the destroy event.
A higher number means there will be fewer delivery retries and it
will take longer for a backlog to be processed.
nf_conntrack_expect_max - INTEGER
Maximum size of expectation table. Default value is
nf_conntrack_buckets / 256. Minimum is 1.

View File

@ -4,7 +4,17 @@ KVM Lock Overview
1. Acquisition Orders
---------------------
(to be written)
The acquisition orders for mutexes are as follows:
- kvm->lock is taken outside vcpu->mutex
- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
them together is quite rare.
For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything
else is a leaf: no other lock is taken inside the critical sections.
2: Exception
------------

View File

@ -1442,6 +1442,7 @@ F: drivers/cpufreq/mvebu-cpufreq.c
F: arch/arm/configs/mvebu_*_defconfig
ARM/Marvell Berlin SoC support
M: Jisheng Zhang <jszhang@marvell.com>
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@ -2551,15 +2552,18 @@ S: Supported
F: drivers/net/ethernet/broadcom/genet/
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
M: Sony Chacko <sony.chacko@qlogic.com>
M: Dept-HSGLinuxNICDev@qlogic.com
M: Rasesh Mody <rasesh.mody@cavium.com>
M: Harish Patil <harish.patil@cavium.com>
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2.*
F: drivers/net/ethernet/broadcom/bnx2_*
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
M: Ariel Elior <ariel.elior@qlogic.com>
M: Yuval Mintz <Yuval.Mintz@cavium.com>
M: Ariel Elior <ariel.elior@cavium.com>
M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/
@ -2766,7 +2770,9 @@ S: Supported
F: drivers/scsi/bfa/
BROCADE BNA 10 GIGABIT ETHERNET DRIVER
M: Rasesh Mody <rasesh.mody@qlogic.com>
M: Rasesh Mody <rasesh.mody@cavium.com>
M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/brocade/bna/
@ -5287,6 +5293,12 @@ M: Joe Perches <joe@perches.com>
S: Maintained
F: scripts/get_maintainer.pl
GENWQE (IBM Generic Workqueue Card)
M: Frank Haverkamp <haver@linux.vnet.ibm.com>
M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
S: Supported
F: drivers/misc/genwqe/
GFS2 FILE SYSTEM
M: Steven Whitehouse <swhiteho@redhat.com>
M: Bob Peterson <rpeterso@redhat.com>
@ -7913,6 +7925,10 @@ F: mm/
MEMORY TECHNOLOGY DEVICES (MTD)
M: David Woodhouse <dwmw2@infradead.org>
M: Brian Norris <computersforpeace@gmail.com>
M: Boris Brezillon <boris.brezillon@free-electrons.com>
M: Marek Vasut <marek.vasut@gmail.com>
M: Richard Weinberger <richard@nod.at>
M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
@ -8100,6 +8116,7 @@ S: Maintained
F: drivers/media/dvb-frontends/mn88473*
MODULE SUPPORT
M: Jessica Yu <jeyu@redhat.com>
M: Rusty Russell <rusty@rustcorp.com.au>
S: Maintained
F: include/linux/module.h
@ -8509,11 +8526,10 @@ F: Documentation/devicetree/bindings/net/wireless/
F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT
M: Manish Chopra <manish.chopra@qlogic.com>
M: Sony Chacko <sony.chacko@qlogic.com>
M: Rajesh Borundia <rajesh.borundia@qlogic.com>
M: Manish Chopra <manish.chopra@cavium.com>
M: Rahul Verma <rahul.verma@cavium.com>
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
W: http://www.qlogic.com
S: Supported
F: drivers/net/ethernet/qlogic/netxen/
@ -9889,33 +9905,32 @@ F: Documentation/scsi/LICENSE.qla4xxx
F: drivers/scsi/qla4xxx/
QLOGIC QLA3XXX NETWORK DRIVER
M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
M: Ron Mercer <ron.mercer@qlogic.com>
M: linux-driver@qlogic.com
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
M: Dept-GELinuxNICDev@qlogic.com
M: Harish Patil <harish.patil@cavium.com>
M: Manish Chopra <manish.chopra@cavium.com>
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER
M: Harish Patil <harish.patil@qlogic.com>
M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
M: Dept-GELinuxNICDev@qlogic.com
M: linux-driver@qlogic.com
M: Harish Patil <harish.patil@cavium.com>
M: Manish Chopra <manish.chopra@cavium.com>
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlge/
QLOGIC QL4xxx ETHERNET DRIVER
M: Yuval Mintz <Yuval.Mintz@qlogic.com>
M: Ariel Elior <Ariel.Elior@qlogic.com>
M: everest-linux-l2@qlogic.com
M: Yuval Mintz <Yuval.Mintz@cavium.com>
M: Ariel Elior <Ariel.Elior@cavium.com>
M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qed/
@ -11393,6 +11408,17 @@ W: http://www.st.com/spear
S: Maintained
F: drivers/clk/spear/
SPI NOR SUBSYSTEM
M: Cyrille Pitchen <cyrille.pitchen@atmel.com>
M: Marek Vasut <marek.vasut@gmail.com>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
T: git git://github.com/spi-nor/linux.git
S: Maintained
F: drivers/mtd/spi-nor/
F: include/linux/mtd/spi-nor.h
SPI SUBSYSTEM
M: Mark Brown <broonie@kernel.org>
L: linux-spi@vger.kernel.org
@ -12772,6 +12798,7 @@ F: include/uapi/linux/virtio_console.h
VIRTIO CORE, NET AND BLOCK DRIVERS
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
F: Documentation/devicetree/bindings/virtio/
@ -12802,6 +12829,7 @@ F: include/uapi/linux/virtio_gpu.h
VIRTIO HOST (VHOST)
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
L: kvm@vger.kernel.org
L: virtualization@lists.linux-foundation.org
L: netdev@vger.kernel.org

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc4
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*

View File

@ -41,6 +41,8 @@ config ARC
select PERF_USE_VMALLOC
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA
config MIGHT_HAVE_PCI
bool
@ -186,14 +188,6 @@ if SMP
config ARC_HAS_COH_CACHES
def_bool n
config ARC_MCIP
bool "ARConnect Multicore IP (MCIP) Support "
depends on ISA_ARCV2
help
This IP block enables SMP in ARC-HS38 cores.
It provides for cross-core interrupts, multi-core debug
hardware semaphores, shared memory,....
config NR_CPUS
int "Maximum number of CPUs (2-4096)"
range 2 4096
@ -211,6 +205,15 @@ config ARC_SMP_HALT_ON_RESET
endif #SMP
config ARC_MCIP
bool "ARConnect Multicore IP (MCIP) Support "
depends on ISA_ARCV2
default y if SMP
help
This IP block enables SMP in ARC-HS38 cores.
It provides for cross-core interrupts, multi-core debug
hardware semaphores, shared memory,....
menuconfig ARC_CACHE
bool "Enable Cache Support"
default y
@ -537,14 +540,6 @@ config ARC_DBG_TLB_PARANOIA
bool "Paranoia Checks in Low Level TLB Handlers"
default n
config ARC_DBG_TLB_MISS_COUNT
bool "Profile TLB Misses"
default n
select DEBUG_FS
help
Counts number of I and D TLB Misses and exports them via Debugfs
The counters can be cleared via Debugfs as well
endif
config ARC_UBOOT_SUPPORT

View File

@ -50,9 +50,6 @@ atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
cflags-$(atleast_gcc44) += -fsection-anchors
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
ifdef CONFIG_ISA_ARCV2
ifndef CONFIG_ARC_HAS_LL64

View File

@ -14,9 +14,15 @@ UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
suffix-y := bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_LZMA) := lzma
targets += uImage uImage.bin uImage.gz
extra-y += vmlinux.bin vmlinux.bin.gz
targets += uImage
targets += uImage.bin
targets += uImage.gz
targets += uImage.lzma
extra-y += vmlinux.bin
extra-y += vmlinux.bin.gz
extra-y += vmlinux.bin.lzma
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
@ -24,12 +30,18 @@ $(obj)/vmlinux.bin: vmlinux FORCE
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
$(call if_changed,lzma)
$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
$(call if_changed,uimage,none)
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,uimage,gzip)
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
$(call if_changed,uimage,lzma)
$(obj)/uImage: $(obj)/uImage.$(suffix-y)
@ln -sf $(notdir $<) $@
@echo ' Image $@ is ready'

View File

@ -349,10 +349,11 @@ struct cpuinfo_arc {
struct cpuinfo_arc_bpu bpu;
struct bcr_identity core;
struct bcr_isa isa;
const char *details, *name;
unsigned int vec_base;
struct cpuinfo_arc_ccm iccm, dccm;
struct {
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
fpu_sp:1, fpu_dp:1, pad2:6,
debug:1, ap:1, smart:1, rtt:1, pad3:4,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;

View File

@ -53,7 +53,7 @@ extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
extern int ioc_exists;
extern int ioc_enable;
extern unsigned long perip_base, perip_end;
#endif /* !__ASSEMBLY__ */

View File

@ -54,7 +54,7 @@ extern int elf_check_arch(const struct elf32_hdr *);
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
#define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3)
/*
* When the program starts, a1 contains a pointer to a function to be

View File

@ -55,6 +55,22 @@ struct mcip_cmd {
#define IDU_M_DISTRI_DEST 0x2
};
struct mcip_bcr {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad3:8,
idu:1, llm:1, num_cores:6,
iocoh:1, gfrc:1, dbg:1, pad2:1,
msg:1, sem:1, ipi:1, pad:1,
ver:8;
#else
unsigned int ver:8,
pad:1, ipi:1, sem:1, msg:1,
pad2:1, dbg:1, gfrc:1, iocoh:1,
num_cores:6, llm:1, idu:1,
pad3:8;
#endif
};
/*
* MCIP programming model
*

View File

@ -18,6 +18,7 @@
struct mod_arch_specific {
void *unw_info;
int unw_sec_idx;
const char *secstr;
};
#endif

View File

@ -27,11 +27,6 @@ struct id_to_str {
const char *str;
};
struct cpuinfo_data {
struct id_to_str info;
int up_range;
};
extern int root_mountflags, end_mem;
void setup_processor(void);
@ -43,5 +38,6 @@ void __init setup_arch_memory(void);
#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
#endif /* __ASMARC_SETUP_H */

View File

@ -17,6 +17,7 @@ int sys_clone_wrapper(int, int, int, int, int);
int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *);
int sys_arc_gettls(void);
int sys_arc_usr_cmpxchg(int *, int, int);
#include <asm-generic/syscalls.h>

View File

@ -27,18 +27,19 @@
#define NR_syscalls __NR_syscalls
/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
/* ARC specific syscall */
#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
__SYSCALL(__NR_cacheflush, sys_cacheflush)
__SYSCALL(__NR_arc_settls, sys_arc_settls)
__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
__SYSCALL(__NR_sysfs, sys_sysfs)
#undef __SYSCALL

View File

@ -15,11 +15,12 @@
#include <asm/mcip.h>
#include <asm/setup.h>
static char smp_cpuinfo_buf[128];
static int idu_detected;
static DEFINE_RAW_SPINLOCK(mcip_lock);
#ifdef CONFIG_SMP
static char smp_cpuinfo_buf[128];
static void mcip_setup_per_cpu(int cpu)
{
smp_ipi_irq_setup(cpu, IPI_IRQ);
@ -86,21 +87,7 @@ static void mcip_ipi_clear(int irq)
static void mcip_probe_n_setup(void)
{
struct mcip_bcr {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad3:8,
idu:1, llm:1, num_cores:6,
iocoh:1, gfrc:1, dbg:1, pad2:1,
msg:1, sem:1, ipi:1, pad:1,
ver:8;
#else
unsigned int ver:8,
pad:1, ipi:1, sem:1, msg:1,
pad2:1, dbg:1, gfrc:1, iocoh:1,
num_cores:6, llm:1, idu:1,
pad3:8;
#endif
} mp;
struct mcip_bcr mp;
READ_BCR(ARC_REG_MCIP_BCR, mp);
@ -114,7 +101,6 @@ static void mcip_probe_n_setup(void)
IS_AVAIL1(mp.gfrc, "GFRC"));
cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
idu_detected = mp.idu;
if (mp.dbg) {
__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
@ -130,6 +116,8 @@ struct plat_smp_ops plat_smp_ops = {
.ipi_clear = mcip_ipi_clear,
};
#endif
/***************************************************************************
* ARCv2 Interrupt Distribution Unit (IDU)
*
@ -295,8 +283,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
/* Read IDU BCR to confirm nr_irqs */
int nr_irqs = of_irq_count(intc);
int i, irq;
struct mcip_bcr mp;
if (!idu_detected)
READ_BCR(ARC_REG_MCIP_BCR, mp);
if (!mp.idu)
panic("IDU not detected, but DeviceTree using it");
pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);

View File

@ -30,17 +30,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstr, struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
int i;
mod->arch.unw_sec_idx = 0;
mod->arch.unw_info = NULL;
for (i = 1; i < hdr->e_shnum; i++) {
if (strcmp(secstr+sechdrs[i].sh_name, ".eh_frame") == 0) {
mod->arch.unw_sec_idx = i;
break;
}
}
mod->arch.secstr = secstr;
#endif
return 0;
}
@ -59,29 +51,33 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
unsigned int relsec, /* sec index for relo sec */
struct module *module)
{
int i, n;
int i, n, relo_type;
Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym_entry, *sym_sec;
Elf32_Addr relocation;
Elf32_Addr location;
Elf32_Addr sec_to_patch;
int relo_type;
Elf32_Addr relocation, location, tgt_addr;
unsigned int tgtsec;
sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
/*
* @relsec has relocations e.g. .rela.init.text
* @tgtsec is section to patch e.g. .init.text
*/
tgtsec = sechdrs[relsec].sh_info;
tgt_addr = sechdrs[tgtsec].sh_addr;
sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
pr_debug("\n========== Module Sym reloc ===========================\n");
pr_debug("Section to fixup %x\n", sec_to_patch);
pr_debug("\nSection to fixup %s @%x\n",
module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr);
pr_debug("=========================================================\n");
pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
pr_debug("r_off\tr_add\tst_value ADDRESS VALUE\n");
pr_debug("=========================================================\n");
/* Loop thru entries in relocation section */
for (i = 0; i < n; i++) {
const char *s;
/* This is where to make the change */
location = sec_to_patch + rel_entry[i].r_offset;
location = tgt_addr + rel_entry[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
@ -89,10 +85,15 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
relocation = sym_entry->st_value + rel_entry[i].r_addend;
pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",
rel_entry[i].r_offset, rel_entry[i].r_addend,
sym_entry->st_value, location, relocation,
strtab + sym_entry->st_name);
if (sym_entry->st_name == 0 && ELF_ST_TYPE (sym_entry->st_info) == STT_SECTION) {
s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name;
} else {
s = strtab + sym_entry->st_name;
}
pr_debug(" %x\t%x\t%x %x %x [%s]\n",
rel_entry[i].r_offset, rel_entry[i].r_addend,
sym_entry->st_value, location, relocation, s);
/* This assumes modules are built with -mlong-calls
* so any branches/jumps are absolute 32 bit jmps
@ -111,6 +112,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
goto relo_err;
}
if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
module->arch.unw_sec_idx = tgtsec;
return 0;
relo_err:

View File

@ -41,6 +41,39 @@ SYSCALL_DEFINE0(arc_gettls)
return task_thread_info(current)->thr_ptr;
}
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
{
int uval;
int ret;
/*
* This is only for old cores lacking LLOCK/SCOND, which by defintion
* can't possibly be SMP. Thus doesn't need to be SMP safe.
* And this also helps reduce the overhead for serializing in
* the UP case
*/
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
preempt_disable();
ret = __get_user(uval, uaddr);
if (ret)
goto done;
if (uval != expected)
ret = -EAGAIN;
else
ret = __put_user(new, uaddr);
done:
preempt_enable();
return ret;
}
void arch_cpu_idle(void)
{
/* sleep, but enable all interrupts before committing */

View File

@ -40,6 +40,29 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
static const struct id_to_str arc_cpu_rel[] = {
#ifdef CONFIG_ISA_ARCOMPACT
{ 0x34, "R4.10"},
{ 0x35, "R4.11"},
#else
{ 0x51, "R2.0" },
{ 0x52, "R2.1" },
{ 0x53, "R3.0" },
#endif
{ 0x00, NULL }
};
static const struct id_to_str arc_cpu_nm[] = {
#ifdef CONFIG_ISA_ARCOMPACT
{ 0x20, "ARC 600" },
{ 0x30, "ARC 770" }, /* 750 identified seperately */
#else
{ 0x40, "ARC EM" },
{ 0x50, "ARC HS38" },
#endif
{ 0x00, "Unknown" }
};
static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
{
if (is_isa_arcompact()) {
@ -92,11 +115,26 @@ static void read_arc_build_cfg_regs(void)
struct bcr_timer timer;
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
const struct id_to_str *tbl;
FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core);
READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
if (cpu->core.family == tbl->id) {
cpu->details = tbl->str;
break;
}
}
for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
if ((cpu->core.family & 0xF0) == tbl->id)
break;
}
cpu->name = tbl->str;
READ_BCR(ARC_REG_TIMERS_BCR, timer);
cpu->extn.timer0 = timer.t0;
cpu->extn.timer1 = timer.t1;
@ -111,6 +149,9 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
/* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
@ -160,64 +201,38 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.rtt = bcr.ver ? 1 : 0;
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
/* some hacks for lack of feature BCR info in old ARC700 cores */
if (is_isa_arcompact()) {
if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
else
cpu->isa.atomic = cpu->isa.atomic1;
cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
/* there's no direct way to distinguish 750 vs. 770 */
if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
cpu->name = "ARC750";
}
}
static const struct cpuinfo_data arc_cpu_tbl[] = {
#ifdef CONFIG_ISA_ARCOMPACT
{ {0x20, "ARC 600" }, 0x2F},
{ {0x30, "ARC 700" }, 0x33},
{ {0x34, "ARC 700 R4.10"}, 0x34},
{ {0x35, "ARC 700 R4.11"}, 0x35},
#else
{ {0x50, "ARC HS38 R2.0"}, 0x51},
{ {0x52, "ARC HS38 R2.1"}, 0x52},
{ {0x53, "ARC HS38 R3.0"}, 0x53},
#endif
{ {0x00, NULL } }
};
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
struct bcr_identity *core = &cpu->core;
const struct cpuinfo_data *tbl;
char *isa_nm;
int i, be, atomic;
int n = 0;
int i, n = 0;
FIX_PTR(cpu);
if (is_isa_arcompact()) {
isa_nm = "ARCompact";
be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
atomic = cpu->isa.atomic1;
if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
} else {
isa_nm = "ARCv2";
be = cpu->isa.be;
atomic = cpu->isa.atomic;
}
n += scnprintf(buf + n, len - n,
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
core->family, core->cpu_id, core->chip_id);
for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
if ((core->family >= tbl->info.id) &&
(core->family <= tbl->up_range)) {
n += scnprintf(buf + n, len - n,
"processor [%d]\t: %s (%s ISA) %s\n",
cpu_id, tbl->info.str, isa_nm,
IS_AVAIL1(be, "[Big-Endian]"));
break;
}
}
if (tbl->info.id == 0)
n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",
cpu_id, cpu->name, cpu->details,
is_isa_arcompact() ? "ARCompact" : "ARCv2",
IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
@ -226,7 +241,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
CONFIG_ARC_HAS_RTC));
n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
@ -253,7 +268,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL1(cpu->extn.swap, "swap "),
IS_AVAIL1(cpu->extn.minmax, "minmax "),
IS_AVAIL1(cpu->extn.crc, "crc "),
IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE));
IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));
if (cpu->bpu.ver)
n += scnprintf(buf + n, len - n,
@ -272,9 +287,7 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
FIX_PTR(cpu);
n += scnprintf(buf + n, len - n,
"Vector Table\t: %#x\nPeripherals\t: %#lx:%#lx\n",
cpu->vec_base, perip_base, perip_end);
n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
@ -507,7 +520,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
* way to pass it w/o having to kmalloc/free a 2 byte string.
* Encode cpu-id as 0xFFcccc, which is decoded by show routine.
*/
return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)

View File

@ -237,113 +237,3 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
if (!user_mode(regs))
show_stacktrace(current, regs);
}
#ifdef CONFIG_DEBUG_FS
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/namei.h>
#include <linux/debugfs.h>
static struct dentry *test_dentry;
static struct dentry *test_dir;
static struct dentry *test_u32_dentry;
static u32 clr_on_read = 1;
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
u32 numitlb, numdtlb, num_pte_not_present;
static int fill_display_data(char *kbuf)
{
size_t num = 0;
num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
if (clr_on_read)
numitlb = numdtlb = num_pte_not_present = 0;
return num;
}
static int tlb_stats_open(struct inode *inode, struct file *file)
{
file->private_data = (void *)__get_free_page(GFP_KERNEL);
return 0;
}
/* called on user read(): display the counters */
static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
char __user *user_buf, /* user buffer */
size_t len, /* length of buffer */
loff_t *offset) /* offset in the file */
{
size_t num;
char *kbuf = (char *)file->private_data;
/* All of the data can he shoved in one iteration */
if (*offset != 0)
return 0;
num = fill_display_data(kbuf);
/* simple_read_from_buffer() is helper for copy to user space
It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
@3 (offset) into the user space address starting at @1 (user_buf).
@5 (len) is max size of user buffer
*/
return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
}
/* called on user write : clears the counters */
static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
size_t length, loff_t *offset)
{
numitlb = numdtlb = num_pte_not_present = 0;
return length;
}
static int tlb_stats_close(struct inode *inode, struct file *file)
{
free_page((unsigned long)(file->private_data));
return 0;
}
static const struct file_operations tlb_stats_file_ops = {
.read = tlb_stats_output,
.write = tlb_stats_clear,
.open = tlb_stats_open,
.release = tlb_stats_close
};
#endif
static int __init arc_debugfs_init(void)
{
test_dir = debugfs_create_dir("arc", NULL);
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
&tlb_stats_file_ops);
#endif
test_u32_dentry =
debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
return 0;
}
module_init(arc_debugfs_init);
static void __exit arc_debugfs_exit(void)
{
debugfs_remove(test_u32_dentry);
debugfs_remove(test_dentry);
debugfs_remove(test_dir);
}
module_exit(arc_debugfs_exit);
#endif

View File

@ -22,8 +22,8 @@
#include <asm/setup.h>
static int l2_line_sz;
int ioc_exists;
volatile int slc_enable = 1, ioc_enable = 1;
static int ioc_exists;
int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
@ -53,18 +53,15 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
if (!is_isa_arcv2())
return buf;
p = &cpuinfo_arc700[c].slc;
if (p->ver)
n += scnprintf(buf + n, len - n,
"SLC\t\t: %uK, %uB Line%s\n",
p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
if (ioc_exists)
n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
IS_DISABLED_RUN(ioc_enable));
n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
perip_base,
IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
return buf;
}
@ -113,8 +110,10 @@ static void read_decode_cache_bcr_arcv2(int cpu)
}
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
if (cbcr.c && ioc_enable)
if (cbcr.c)
ioc_exists = 1;
else
ioc_enable = 0;
/* HS 2.0 didn't have AUX_VOL */
if (cpuinfo_arc700[cpu].core.family > 0x51) {
@ -1002,7 +1001,7 @@ void arc_cache_init(void)
read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
}
if (is_isa_arcv2() && ioc_exists) {
if (is_isa_arcv2() && ioc_enable) {
/* IO coherency base - 0x8z */
write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
/* IO coherency aperture size - 512Mb: 0x8z-0xAz */

View File

@ -45,7 +45,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
* -For coherent data, Read/Write to buffers terminate early in cache
* (vs. always going to memory - thus are faster)
*/
if ((is_isa_arcv2() && ioc_exists) ||
if ((is_isa_arcv2() && ioc_enable) ||
(attrs & DMA_ATTR_NON_CONSISTENT))
need_coh = 0;
@ -97,7 +97,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
int is_non_coh = 1;
is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
(is_isa_arcv2() && ioc_exists);
(is_isa_arcv2() && ioc_enable);
if (PageHighMem(page) || !is_non_coh)
iounmap((void __force __iomem *)vaddr);

View File

@ -793,16 +793,16 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
char super_pg[64] = "";
if (p_mmu->s_pg_sz_m)
scnprintf(super_pg, 64, "%dM Super Page%s, ",
scnprintf(super_pg, 64, "%dM Super Page %s",
p_mmu->s_pg_sz_m,
IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
n += scnprintf(buf + n, len - n,
"MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n",
"MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
p_mmu->ver, p_mmu->pg_sz_k, super_pg,
p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
p_mmu->u_dtlb, p_mmu->u_itlb,
IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40));
IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
return buf;
}

View File

@ -237,15 +237,6 @@ ex_saved_reg1:
2:
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
and.f 0, r0, _PAGE_PRESENT
bz 1f
ld r3, [num_pte_not_present]
add r3, r3, 1
st r3, [num_pte_not_present]
1:
#endif
.endm
;-----------------------------------------------------------------
@ -309,12 +300,6 @@ ENTRY(EV_TLBMissI)
TLBMISS_FREEUP_REGS
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
ld r0, [@numitlb]
add r0, r0, 1
st r0, [@numitlb]
#endif
;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
LOAD_FAULT_PTE
@ -349,12 +334,6 @@ ENTRY(EV_TLBMissD)
TLBMISS_FREEUP_REGS
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
ld r0, [@numdtlb]
add r0, r0, 1
st r0, [@numdtlb]
#endif
;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed
; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA

View File

@ -239,14 +239,25 @@
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <4>;
cap-sd-highspeed;
cap-mmc-highspeed;
sd-uhs-sdr12;
sd-uhs-sdr25;
/* All direction control is used */
st,sig-dir-cmd;
st,sig-dir-dat0;
st,sig-dir-dat2;
st,sig-dir-dat31;
st,sig-pin-fbclk;
full-pwr-cycle;
vmmc-supply = <&ab8500_ldo_aux3_reg>;
vqmmc-supply = <&vmmci>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&sdi0_default_mode>;
pinctrl-1 = <&sdi0_sleep_mode>;
cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>; // 218
/* GPIO218 MMC_CD */
cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>;
status = "okay";
};
@ -549,7 +560,7 @@
/* VMMCI level-shifter enable */
snowball_cfg3 {
pins = "GPIO217_AH12";
ste,config = <&gpio_out_lo>;
ste,config = <&gpio_out_hi>;
};
/* VMMCI level-shifter voltage select */
snowball_cfg4 {

View File

@ -184,11 +184,11 @@
};
&mio_clk {
compatible = "socionext,uniphier-pro5-mio-clock";
compatible = "socionext,uniphier-pro5-sd-clock";
};
&mio_rst {
compatible = "socionext,uniphier-pro5-mio-reset";
compatible = "socionext,uniphier-pro5-sd-reset";
};
&peri_clk {

View File

@ -197,11 +197,11 @@
};
&mio_clk {
compatible = "socionext,uniphier-pxs2-mio-clock";
compatible = "socionext,uniphier-pxs2-sd-clock";
};
&mio_rst {
compatible = "socionext,uniphier-pxs2-mio-reset";
compatible = "socionext,uniphier-pxs2-sd-reset";
};
&peri_clk {

View File

@ -70,7 +70,7 @@
global_timer: timer@40002200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x40002200 0x20>;
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
interrupt-parent = <&intc>;
clocks = <&clks VF610_CLK_PLATFORM_BUS>;
};

View File

@ -850,6 +850,7 @@ CONFIG_PWM_SUN4I=y
CONFIG_PWM_TEGRA=y
CONFIG_PWM_VT8500=y
CONFIG_PHY_HIX5HD2_SATA=y
CONFIG_E1000E=y
CONFIG_PWM_STI=y
CONFIG_PWM_BCM2835=y
CONFIG_PWM_BRCMSTB=m

View File

@ -19,7 +19,7 @@
* This may need to be greater than __NR_last_syscall+1 in order to
* account for the padding in the syscall table
*/
#define __NR_syscalls (396)
#define __NR_syscalls (400)
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME

View File

@ -420,6 +420,9 @@
#define __NR_copy_file_range (__NR_SYSCALL_BASE+391)
#define __NR_preadv2 (__NR_SYSCALL_BASE+392)
#define __NR_pwritev2 (__NR_SYSCALL_BASE+393)
#define __NR_pkey_mprotect (__NR_SYSCALL_BASE+394)
#define __NR_pkey_alloc (__NR_SYSCALL_BASE+395)
#define __NR_pkey_free (__NR_SYSCALL_BASE+396)
/*
* The following SWIs are ARM private.

View File

@ -403,6 +403,9 @@
CALL(sys_copy_file_range)
CALL(sys_preadv2)
CALL(sys_pwritev2)
CALL(sys_pkey_mprotect)
/* 395 */ CALL(sys_pkey_alloc)
CALL(sys_pkey_free)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted

View File

@ -408,7 +408,7 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
{
struct clk *clk;
int i;
int i, ret;
imx6q_pu_domain.reg = pu_reg;
@ -430,13 +430,22 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
return 0;
pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
return of_genpd_add_provider_onecell(dev->of_node,
&imx_gpc_onecell_data);
for (i = 0; i < ARRAY_SIZE(imx_gpc_domains); i++)
pm_genpd_init(imx_gpc_domains[i], NULL, false);
ret = of_genpd_add_provider_onecell(dev->of_node,
&imx_gpc_onecell_data);
if (ret)
goto power_off;
return 0;
power_off:
imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
clk_err:
while (i--)
clk_put(imx6q_pu_domain.clk[i]);
imx6q_pu_domain.reg = NULL;
return -EINVAL;
}

View File

@ -173,7 +173,7 @@ static void __init imx6q_enet_phy_init(void)
ksz9021rn_phy_fixup);
phy_register_fixup_for_uid(PHY_ID_KSZ9031, MICREL_PHY_ID_MASK,
ksz9031rn_phy_fixup);
phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffff,
phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffef,
ar8031_phy_fixup);
phy_register_fixup_for_uid(PHY_ID_AR8035, 0xffffffef,
ar8035_phy_fixup);

View File

@ -23,6 +23,7 @@ config MACH_MVEBU_V7
select CACHE_L2X0
select ARM_CPU_SUSPEND
select MACH_MVEBU_ANY
select MVEBU_CLK_COREDIV
config MACH_ARMADA_370
bool "Marvell Armada 370 boards"
@ -32,7 +33,6 @@ config MACH_ARMADA_370
select CPU_PJ4B
select MACH_MVEBU_V7
select PINCTRL_ARMADA_370
select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 370 SoC with device tree.
@ -50,7 +50,6 @@ config MACH_ARMADA_375
select HAVE_SMP
select MACH_MVEBU_V7
select PINCTRL_ARMADA_375
select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 375 SoC with device tree.
@ -68,7 +67,6 @@ config MACH_ARMADA_38X
select HAVE_SMP
select MACH_MVEBU_V7
select PINCTRL_ARMADA_38X
select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 380/385 SoC with device tree.

View File

@ -1,6 +1,7 @@
config ARCH_UNIPHIER
bool "Socionext UniPhier SoCs"
depends on ARCH_MULTI_V7
select ARCH_HAS_RESET_CONTROLLER
select ARM_AMBA
select ARM_GLOBAL_TIMER
select ARM_GIC

View File

@ -7,7 +7,7 @@
* : r4 = aborted context pc
* : r5 = aborted context psr
*
* Returns : r4-r5, r10-r11, r13 preserved
* Returns : r4-r5, r9-r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
@ -48,7 +48,10 @@ ENTRY(v4t_late_abort)
/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
/* d */ b do_DataAbort @ ldc rd, [rn, #m]
/* e */ b .data_unknown
/* f */
/* f */ b .data_unknown
.data_unknown_r9:
ldr r9, [sp], #4
.data_unknown: @ Part of jumptable
mov r0, r4
mov r1, r8
@ -57,6 +60,7 @@ ENTRY(v4t_late_abort)
.data_arm_ldmstm:
tst r8, #1 << 21 @ check writeback bit
beq do_DataAbort @ no writeback -> no fixup
str r9, [sp, #-4]!
mov r7, #0x11
orr r7, r7, #0x1100
and r6, r8, r7
@ -75,12 +79,14 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6, lsl #2 @ Undo increment
addeq r7, r7, r6, lsl #2 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrhpre:
tst r8, #1 << 21 @ Check writeback bit
beq do_DataAbort @ No writeback -> no fixup
.data_arm_lateldrhpost:
str r9, [sp, #-4]!
and r9, r8, #0x00f @ get Rm / low nibble of immediate value
tst r8, #1 << 22 @ if (immediate offset)
andne r6, r8, #0xf00 @ { immediate high nibble
@ -93,6 +99,7 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6 @ Undo incrmenet
addeq r7, r7, r6 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrpreconst:
@ -101,12 +108,14 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostconst:
movs r6, r8, lsl #20 @ Get offset
beq do_DataAbort @ zero -> no fixup
str r9, [sp, #-4]!
and r9, r8, #15 << 16 @ Extract 'n' from instruction
ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r6, lsr #20 @ Undo increment
addeq r7, r7, r6, lsr #20 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrprereg:
@ -115,6 +124,7 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostreg:
and r7, r8, #15 @ Extract 'm' from instruction
ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
str r9, [sp, #-4]!
mov r9, r8, lsr #7 @ get shift count
ands r9, r9, #31
and r7, r8, #0x70 @ get shift type
@ -126,33 +136,33 @@ ENTRY(v4t_late_abort)
b .data_arm_apply_r6_and_rn
b .data_arm_apply_r6_and_rn @ 1: LSL #0
nop
b .data_unknown @ 2: MUL?
b .data_unknown_r9 @ 2: MUL?
nop
b .data_unknown @ 3: MUL?
b .data_unknown_r9 @ 3: MUL?
nop
mov r6, r6, lsr r9 @ 4: LSR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, lsr #32 @ 5: LSR #32
b .data_arm_apply_r6_and_rn
b .data_unknown @ 6: MUL?
b .data_unknown_r9 @ 6: MUL?
nop
b .data_unknown @ 7: MUL?
b .data_unknown_r9 @ 7: MUL?
nop
mov r6, r6, asr r9 @ 8: ASR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, asr #32 @ 9: ASR #32
b .data_arm_apply_r6_and_rn
b .data_unknown @ A: MUL?
b .data_unknown_r9 @ A: MUL?
nop
b .data_unknown @ B: MUL?
b .data_unknown_r9 @ B: MUL?
nop
mov r6, r6, ror r9 @ C: ROR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, rrx @ D: RRX
b .data_arm_apply_r6_and_rn
b .data_unknown @ E: MUL?
b .data_unknown_r9 @ E: MUL?
nop
b .data_unknown @ F: MUL?
b .data_unknown_r9 @ F: MUL?
.data_thumb_abort:
ldrh r8, [r4] @ read instruction
@ -190,6 +200,7 @@ ENTRY(v4t_late_abort)
.data_thumb_pushpop:
tst r8, #1 << 10
beq .data_unknown
str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8) + R bit
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
@ -204,9 +215,11 @@ ENTRY(v4t_late_abort)
addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
subne r7, r7, r6, lsl #2 @ decrement SP if POP
str r7, [r2, #13 << 2]
ldr r9, [sp], #4
b do_DataAbort
.data_thumb_ldmstm:
str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8)
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
@ -219,4 +232,5 @@ ENTRY(v4t_late_abort)
and r6, r6, #15 @ number of regs to transfer
sub r7, r7, r6, lsl #2 @ always decrement
str r7, [r2, r9, lsr #6]
ldr r9, [sp], #4
b do_DataAbort

View File

@ -190,6 +190,7 @@ config ARCH_THUNDER
config ARCH_UNIPHIER
bool "Socionext UniPhier SoC Family"
select ARCH_HAS_RESET_CONTROLLER
select PINCTRL
help
This enables support for Socionext UniPhier SoC family.

View File

@ -164,6 +164,8 @@
nand-ecc-mode = "hw";
nand-ecc-strength = <8>;
nand-ecc-step-size = <512>;
nand-bus-width = <16>;
brcm,nand-oob-sector-size = <16>;
#address-cells = <1>;
#size-cells = <1>;
};

View File

@ -123,6 +123,7 @@
<1 14 0xf08>, /* Physical Non-Secure PPI */
<1 11 0xf08>, /* Virtual PPI */
<1 10 0xf08>; /* Hypervisor PPI */
fsl,erratum-a008585;
};
pmu {

View File

@ -195,6 +195,7 @@
<1 14 4>, /* Physical Non-Secure PPI, active-low */
<1 11 4>, /* Virtual PPI, active-low */
<1 10 4>; /* Hypervisor PPI, active-low */
fsl,erratum-a008585;
};
pmu {

View File

@ -131,7 +131,7 @@
#address-cells = <0x1>;
#size-cells = <0x0>;
cell-index = <1>;
clocks = <&cpm_syscon0 0 3>;
clocks = <&cpm_syscon0 1 21>;
status = "disabled";
};

View File

@ -116,7 +116,6 @@
cap-mmc-highspeed;
clock-frequency = <150000000>;
disable-wp;
keep-power-in-suspend;
non-removable;
num-slots = <1>;
vmmc-supply = <&vcc_io>;
@ -258,8 +257,6 @@
};
vcc_sd: SWITCH_REG1 {
regulator-always-on;
regulator-boot-on;
regulator-name = "vcc_sd";
};

View File

@ -152,8 +152,6 @@
gpio = <&gpio3 11 GPIO_ACTIVE_LOW>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
regulator-boot-on;
vin-supply = <&vcc_io>;
};
@ -201,7 +199,6 @@
bus-width = <8>;
cap-mmc-highspeed;
disable-wp;
keep-power-in-suspend;
mmc-pwrseq = <&emmc_pwrseq>;
mmc-hs200-1_2v;
mmc-hs200-1_8v;
@ -350,7 +347,6 @@
clock-freq-min-max = <400000 50000000>;
cap-sd-highspeed;
card-detect-delay = <200>;
keep-power-in-suspend;
num-slots = <1>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;

View File

@ -257,18 +257,18 @@
reg = <0x59801000 0x400>;
};
mioctrl@59810000 {
compatible = "socionext,uniphier-mioctrl",
sdctrl@59810000 {
compatible = "socionext,uniphier-ld20-sdctrl",
"simple-mfd", "syscon";
reg = <0x59810000 0x800>;
mio_clk: clock {
compatible = "socionext,uniphier-ld20-mio-clock";
sd_clk: clock {
compatible = "socionext,uniphier-ld20-sd-clock";
#clock-cells = <1>;
};
mio_rst: reset {
compatible = "socionext,uniphier-ld20-mio-reset";
sd_rst: reset {
compatible = "socionext,uniphier-ld20-sd-reset";
#reset-cells = <1>;
};
};

View File

@ -1,7 +1,7 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
#include <asm/cpufeature.h>
#include <asm/cpucaps.h>
#include <asm/insn.h>
#ifndef __ASSEMBLY__

View File

@ -0,0 +1,40 @@
/*
* arch/arm64/include/asm/cpucaps.h
*
* Copyright (C) 2016 ARM Ltd.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_CPUCAPS_H
#define __ASM_CPUCAPS_H
#define ARM64_WORKAROUND_CLEAN_CACHE 0
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
#define ARM64_WORKAROUND_845719 2
#define ARM64_HAS_SYSREG_GIC_CPUIF 3
#define ARM64_HAS_PAN 4
#define ARM64_HAS_LSE_ATOMICS 5
#define ARM64_WORKAROUND_CAVIUM_23154 6
#define ARM64_WORKAROUND_834220 7
#define ARM64_HAS_NO_HW_PREFETCH 8
#define ARM64_HAS_UAO 9
#define ARM64_ALT_PAN_NOT_UAO 10
#define ARM64_HAS_VIRT_HOST_EXTN 11
#define ARM64_WORKAROUND_CAVIUM_27456 12
#define ARM64_HAS_32BIT_EL0 13
#define ARM64_HYP_OFFSET_LOW 14
#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
#define ARM64_NCAPS 16
#endif /* __ASM_CPUCAPS_H */

View File

@ -11,6 +11,7 @@
#include <linux/jump_label.h>
#include <asm/cpucaps.h>
#include <asm/hwcap.h>
#include <asm/sysreg.h>
@ -24,25 +25,6 @@
#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
#define cpu_feature(x) ilog2(HWCAP_ ## x)
#define ARM64_WORKAROUND_CLEAN_CACHE 0
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
#define ARM64_WORKAROUND_845719 2
#define ARM64_HAS_SYSREG_GIC_CPUIF 3
#define ARM64_HAS_PAN 4
#define ARM64_HAS_LSE_ATOMICS 5
#define ARM64_WORKAROUND_CAVIUM_23154 6
#define ARM64_WORKAROUND_834220 7
#define ARM64_HAS_NO_HW_PREFETCH 8
#define ARM64_HAS_UAO 9
#define ARM64_ALT_PAN_NOT_UAO 10
#define ARM64_HAS_VIRT_HOST_EXTN 11
#define ARM64_WORKAROUND_CAVIUM_27456 12
#define ARM64_HAS_32BIT_EL0 13
#define ARM64_HYP_OFFSET_LOW 14
#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
#define ARM64_NCAPS 16
#ifndef __ASSEMBLY__
#include <linux/kernel.h>

View File

@ -5,7 +5,6 @@
#include <linux/stringify.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#ifdef __ASSEMBLER__

View File

@ -217,7 +217,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
#define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))

View File

@ -147,7 +147,7 @@ static int __init early_cpu_to_node(int cpu)
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
return node_distance(from, to);
return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
}
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
@ -223,8 +223,11 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
void *nd;
int tnid;
pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
nid, start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
if (start_pfn < end_pfn)
pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
else
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
nd = __va(nd_pa);

View File

@ -3149,7 +3149,7 @@ static void print_dma_descriptors(struct cryptocop_int_operation *iop)
printk("print_dma_descriptors start\n");
printk("iop:\n");
printk("\tsid: 0x%lld\n", iop->sid);
printk("\tsid: 0x%llx\n", iop->sid);
printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);

View File

@ -31,7 +31,6 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit;
struct restart_block restart_block;
};
/*
@ -44,9 +43,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
}
#define init_thread_info (init_thread_union.thread_info)

View File

@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
unsigned int er0;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
current->restart_block.fn = do_no_restart_syscall;
/* restore passed registers */
#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)

View File

@ -263,7 +263,7 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
VMLINUX_ENTRY_ADDRESS=$(entry-y) \
PLATFORM=$(platform-y)
PLATFORM="$(platform-y)"
ifdef CONFIG_32BIT
bootvars-y += ADDR_BITS=32
endif

View File

@ -84,12 +84,13 @@
fpga_regs: system-controller@1f000000 {
compatible = "mti,malta-fpga", "syscon", "simple-mfd";
reg = <0x1f000000 0x1000>;
native-endian;
reboot {
compatible = "syscon-reboot";
regmap = <&fpga_regs>;
offset = <0x500>;
mask = <0x4d>;
mask = <0x42>;
};
};

View File

@ -29,10 +29,20 @@ static __initdata const struct mips_machine *mach;
static __initdata const void *mach_match_data;
void __init prom_init(void)
{
plat_get_fdt();
BUG_ON(!fdt);
}
void __init *plat_get_fdt(void)
{
const struct mips_machine *check_mach;
const struct of_device_id *match;
if (fdt)
/* Already set up */
return (void *)fdt;
if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
/*
* We booted using the UHI boot protocol, so we have been
@ -75,12 +85,6 @@ void __init prom_init(void)
/* Retrieve the machine's FDT */
fdt = mach->fdt;
}
BUG_ON(!fdt);
}
void __init *plat_get_fdt(void)
{
return (void *)fdt;
}

View File

@ -63,6 +63,8 @@ do { \
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
struct task_struct *tsk);
int process_fpemu_return(int sig, void __user *fault_addr,
unsigned long fcr31);
int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
@ -81,4 +83,15 @@ static inline void fpu_emulator_init_fpu(void)
set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
}
/*
* Mask the FCSR Cause bits according to the Enable bits, observing
* that Unimplemented is always enabled.
*/
static inline unsigned long mask_fcr31_x(unsigned long fcr31)
{
return fcr31 & (FPU_CSR_UNI_X |
((fcr31 & FPU_CSR_ALL_E) <<
(ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))));
}
#endif /* _ASM_FPU_EMULATOR_H */

View File

@ -293,7 +293,10 @@ struct kvm_vcpu_arch {
/* Host KSEG0 address of the EI/DI offset */
void *kseg0_commpage;
u32 io_gpr; /* GPR used as IO source/target */
/* Resume PC after MMIO completion */
unsigned long io_pc;
/* GPR used as IO source/target */
u32 io_gpr;
struct hrtimer comparecount_timer;
/* Count timer control KVM register */
@ -315,8 +318,6 @@ struct kvm_vcpu_arch {
/* Bitmask of pending exceptions to be cleared */
unsigned long pending_exceptions_clr;
u32 pending_load_cause;
/* Save/Restore the entryhi register when are are preempted/scheduled back in */
unsigned long preempt_entryhi;

View File

@ -75,6 +75,22 @@ do { if (cpu_has_rw_llb) { \
} \
} while (0)
/*
* Check FCSR for any unmasked exceptions pending set with `ptrace',
* clear them and send a signal.
*/
#define __sanitize_fcr31(next) \
do { \
unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
void __user *pc; \
\
if (unlikely(fcr31)) { \
pc = (void __user *)task_pt_regs(next)->cp0_epc; \
next->thread.fpu.fcr31 &= ~fcr31; \
force_fcr31_sig(fcr31, pc, next); \
} \
} while (0)
/*
* For newly created kernel threads switch_to() will return to
* ret_from_kernel_thread, newly created user threads to ret_from_fork.
@ -85,6 +101,8 @@ do { if (cpu_has_rw_llb) { \
do { \
__mips_mt_fpaff_switch_to(prev); \
lose_fpu_inatomic(1, prev); \
if (tsk_used_math(next)) \
__sanitize_fcr31(next); \
if (cpu_has_dsp) { \
__save_dsp(prev); \
__restore_dsp(next); \

View File

@ -21,6 +21,11 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
phys_addr_t __weak mips_cpc_default_phys_base(void)
{
return 0;
}
/**
* mips_cpc_phys_base - retrieve the physical base address of the CPC
*
@ -43,8 +48,12 @@ static phys_addr_t mips_cpc_phys_base(void)
if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
/* Otherwise, give it the default address & enable it */
/* Otherwise, use the default address */
cpc_base = mips_cpc_default_phys_base();
if (!cpc_base)
return cpc_base;
/* Enable the CPC, mapped at the default address */
write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
return cpc_base;
}

View File

@ -899,7 +899,7 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
* mipsr2_decoder: Decode and emulate a MIPS R2 instruction
* @regs: Process register set
* @inst: Instruction to decode and emulate
* @fcr31: Floating Point Control and Status Register returned
* @fcr31: Floating Point Control and Status Register Cause bits returned
*/
int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
{
@ -1172,13 +1172,13 @@ fpu_emul:
err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
*fcr31 = current->thread.fpu.fcr31;
/*
* We can't allow the emulated instruction to leave any of
* the cause bits set in $fcr31.
* We can't allow the emulated instruction to leave any
* enabled Cause bits set in $fcr31.
*/
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
*fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
current->thread.fpu.fcr31 &= ~res;
/*
* this is a tricky issue - lose_fpu() uses LL/SC atomics

View File

@ -79,16 +79,15 @@ void ptrace_disable(struct task_struct *child)
}
/*
* Poke at FCSR according to its mask. Don't set the cause bits as
* this is currently not handled correctly in FP context restoration
* and will cause an oops if a corresponding enable bit is set.
* Poke at FCSR according to its mask. Set the Cause bits even
* if a corresponding Enable bit is set. This will be noticed at
* the time the thread is switched to and SIGFPE thrown accordingly.
*/
static void ptrace_setfcr31(struct task_struct *child, u32 value)
{
u32 fcr31;
u32 mask;
value &= ~FPU_CSR_ALL_X;
fcr31 = child->thread.fpu.fcr31;
mask = boot_cpu_data.fpu_msk31;
child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
@ -817,6 +816,7 @@ long arch_ptrace(struct task_struct *child, long request,
break;
#endif
case FPC_CSR:
init_fp_ctx(child);
ptrace_setfcr31(child, data);
break;
case DSP_BASE ... DSP_BASE + 5: {

View File

@ -19,108 +19,86 @@
#include <asm/regdef.h>
#define EX(a,b) \
9: a,##b; \
.section __ex_table,"a"; \
PTR 9b,fault; \
.previous
#define EX2(a,b) \
9: a,##b; \
.section __ex_table,"a"; \
PTR 9b,bad_stack; \
PTR 9b+4,bad_stack; \
.previous
.set noreorder
.set mips1
/* Save floating point context */
/**
* _save_fp_context() - save FP context from the FPU
* @a0 - pointer to fpregs field of sigcontext
* @a1 - pointer to fpc_csr field of sigcontext
*
* Save FP context, including the 32 FP data registers and the FP
* control & status register, from the FPU to signal context.
*/
LEAF(_save_fp_context)
.set push
SET_HARDFLOAT
li v0, 0 # assume success
cfc1 t1,fcr31
EX(swc1 $f0,(SC_FPREGS+0)(a0))
EX(swc1 $f1,(SC_FPREGS+8)(a0))
EX(swc1 $f2,(SC_FPREGS+16)(a0))
EX(swc1 $f3,(SC_FPREGS+24)(a0))
EX(swc1 $f4,(SC_FPREGS+32)(a0))
EX(swc1 $f5,(SC_FPREGS+40)(a0))
EX(swc1 $f6,(SC_FPREGS+48)(a0))
EX(swc1 $f7,(SC_FPREGS+56)(a0))
EX(swc1 $f8,(SC_FPREGS+64)(a0))
EX(swc1 $f9,(SC_FPREGS+72)(a0))
EX(swc1 $f10,(SC_FPREGS+80)(a0))
EX(swc1 $f11,(SC_FPREGS+88)(a0))
EX(swc1 $f12,(SC_FPREGS+96)(a0))
EX(swc1 $f13,(SC_FPREGS+104)(a0))
EX(swc1 $f14,(SC_FPREGS+112)(a0))
EX(swc1 $f15,(SC_FPREGS+120)(a0))
EX(swc1 $f16,(SC_FPREGS+128)(a0))
EX(swc1 $f17,(SC_FPREGS+136)(a0))
EX(swc1 $f18,(SC_FPREGS+144)(a0))
EX(swc1 $f19,(SC_FPREGS+152)(a0))
EX(swc1 $f20,(SC_FPREGS+160)(a0))
EX(swc1 $f21,(SC_FPREGS+168)(a0))
EX(swc1 $f22,(SC_FPREGS+176)(a0))
EX(swc1 $f23,(SC_FPREGS+184)(a0))
EX(swc1 $f24,(SC_FPREGS+192)(a0))
EX(swc1 $f25,(SC_FPREGS+200)(a0))
EX(swc1 $f26,(SC_FPREGS+208)(a0))
EX(swc1 $f27,(SC_FPREGS+216)(a0))
EX(swc1 $f28,(SC_FPREGS+224)(a0))
EX(swc1 $f29,(SC_FPREGS+232)(a0))
EX(swc1 $f30,(SC_FPREGS+240)(a0))
EX(swc1 $f31,(SC_FPREGS+248)(a0))
EX(sw t1,(SC_FPC_CSR)(a0))
cfc1 t0,$0 # implementation/version
cfc1 t1, fcr31
EX2(s.d $f0, 0(a0))
EX2(s.d $f2, 16(a0))
EX2(s.d $f4, 32(a0))
EX2(s.d $f6, 48(a0))
EX2(s.d $f8, 64(a0))
EX2(s.d $f10, 80(a0))
EX2(s.d $f12, 96(a0))
EX2(s.d $f14, 112(a0))
EX2(s.d $f16, 128(a0))
EX2(s.d $f18, 144(a0))
EX2(s.d $f20, 160(a0))
EX2(s.d $f22, 176(a0))
EX2(s.d $f24, 192(a0))
EX2(s.d $f26, 208(a0))
EX2(s.d $f28, 224(a0))
EX2(s.d $f30, 240(a0))
jr ra
EX(sw t1, (a1))
.set pop
.set nomacro
EX(sw t0,(SC_FPC_EIR)(a0))
.set macro
END(_save_fp_context)
/*
* Restore FPU state:
* - fp gp registers
* - cp1 status/control register
/**
* _restore_fp_context() - restore FP context to the FPU
* @a0 - pointer to fpregs field of sigcontext
* @a1 - pointer to fpc_csr field of sigcontext
*
* We base the decision which registers to restore from the signal stack
* frame on the current content of c0_status, not on the content of the
* stack frame which might have been changed by the user.
* Restore FP context, including the 32 FP data registers and the FP
* control & status register, from signal context to the FPU.
*/
LEAF(_restore_fp_context)
.set push
SET_HARDFLOAT
li v0, 0 # assume success
EX(lw t0,(SC_FPC_CSR)(a0))
EX(lwc1 $f0,(SC_FPREGS+0)(a0))
EX(lwc1 $f1,(SC_FPREGS+8)(a0))
EX(lwc1 $f2,(SC_FPREGS+16)(a0))
EX(lwc1 $f3,(SC_FPREGS+24)(a0))
EX(lwc1 $f4,(SC_FPREGS+32)(a0))
EX(lwc1 $f5,(SC_FPREGS+40)(a0))
EX(lwc1 $f6,(SC_FPREGS+48)(a0))
EX(lwc1 $f7,(SC_FPREGS+56)(a0))
EX(lwc1 $f8,(SC_FPREGS+64)(a0))
EX(lwc1 $f9,(SC_FPREGS+72)(a0))
EX(lwc1 $f10,(SC_FPREGS+80)(a0))
EX(lwc1 $f11,(SC_FPREGS+88)(a0))
EX(lwc1 $f12,(SC_FPREGS+96)(a0))
EX(lwc1 $f13,(SC_FPREGS+104)(a0))
EX(lwc1 $f14,(SC_FPREGS+112)(a0))
EX(lwc1 $f15,(SC_FPREGS+120)(a0))
EX(lwc1 $f16,(SC_FPREGS+128)(a0))
EX(lwc1 $f17,(SC_FPREGS+136)(a0))
EX(lwc1 $f18,(SC_FPREGS+144)(a0))
EX(lwc1 $f19,(SC_FPREGS+152)(a0))
EX(lwc1 $f20,(SC_FPREGS+160)(a0))
EX(lwc1 $f21,(SC_FPREGS+168)(a0))
EX(lwc1 $f22,(SC_FPREGS+176)(a0))
EX(lwc1 $f23,(SC_FPREGS+184)(a0))
EX(lwc1 $f24,(SC_FPREGS+192)(a0))
EX(lwc1 $f25,(SC_FPREGS+200)(a0))
EX(lwc1 $f26,(SC_FPREGS+208)(a0))
EX(lwc1 $f27,(SC_FPREGS+216)(a0))
EX(lwc1 $f28,(SC_FPREGS+224)(a0))
EX(lwc1 $f29,(SC_FPREGS+232)(a0))
EX(lwc1 $f30,(SC_FPREGS+240)(a0))
EX(lwc1 $f31,(SC_FPREGS+248)(a0))
EX(lw t0, (a1))
EX2(l.d $f0, 0(a0))
EX2(l.d $f2, 16(a0))
EX2(l.d $f4, 32(a0))
EX2(l.d $f6, 48(a0))
EX2(l.d $f8, 64(a0))
EX2(l.d $f10, 80(a0))
EX2(l.d $f12, 96(a0))
EX2(l.d $f14, 112(a0))
EX2(l.d $f16, 128(a0))
EX2(l.d $f18, 144(a0))
EX2(l.d $f20, 160(a0))
EX2(l.d $f22, 176(a0))
EX2(l.d $f24, 192(a0))
EX2(l.d $f26, 208(a0))
EX2(l.d $f28, 224(a0))
EX2(l.d $f30, 240(a0))
jr ra
ctc1 t0,fcr31
ctc1 t0, fcr31
.set pop
END(_restore_fp_context)
.set reorder

View File

@ -21,7 +21,14 @@
.set push
SET_HARDFLOAT
/* Save floating point context */
/**
* _save_fp_context() - save FP context from the FPU
* @a0 - pointer to fpregs field of sigcontext
* @a1 - pointer to fpc_csr field of sigcontext
*
* Save FP context, including the 32 FP data registers and the FP
* control & status register, from the FPU to signal context.
*/
LEAF(_save_fp_context)
mfc0 t0,CP0_STATUS
sll t0,t0,2
@ -30,59 +37,59 @@
cfc1 t1,fcr31
/* Store the 16 double precision registers */
sdc1 $f0,(SC_FPREGS+0)(a0)
sdc1 $f2,(SC_FPREGS+16)(a0)
sdc1 $f4,(SC_FPREGS+32)(a0)
sdc1 $f6,(SC_FPREGS+48)(a0)
sdc1 $f8,(SC_FPREGS+64)(a0)
sdc1 $f10,(SC_FPREGS+80)(a0)
sdc1 $f12,(SC_FPREGS+96)(a0)
sdc1 $f14,(SC_FPREGS+112)(a0)
sdc1 $f16,(SC_FPREGS+128)(a0)
sdc1 $f18,(SC_FPREGS+144)(a0)
sdc1 $f20,(SC_FPREGS+160)(a0)
sdc1 $f22,(SC_FPREGS+176)(a0)
sdc1 $f24,(SC_FPREGS+192)(a0)
sdc1 $f26,(SC_FPREGS+208)(a0)
sdc1 $f28,(SC_FPREGS+224)(a0)
sdc1 $f30,(SC_FPREGS+240)(a0)
sdc1 $f0,0(a0)
sdc1 $f2,16(a0)
sdc1 $f4,32(a0)
sdc1 $f6,48(a0)
sdc1 $f8,64(a0)
sdc1 $f10,80(a0)
sdc1 $f12,96(a0)
sdc1 $f14,112(a0)
sdc1 $f16,128(a0)
sdc1 $f18,144(a0)
sdc1 $f20,160(a0)
sdc1 $f22,176(a0)
sdc1 $f24,192(a0)
sdc1 $f26,208(a0)
sdc1 $f28,224(a0)
sdc1 $f30,240(a0)
jr ra
sw t0,SC_FPC_CSR(a0)
sw t0,(a1)
1: jr ra
nop
END(_save_fp_context)
/* Restore FPU state:
* - fp gp registers
* - cp1 status/control register
/**
* _restore_fp_context() - restore FP context to the FPU
* @a0 - pointer to fpregs field of sigcontext
* @a1 - pointer to fpc_csr field of sigcontext
*
* We base the decision which registers to restore from the signal stack
* frame on the current content of c0_status, not on the content of the
* stack frame which might have been changed by the user.
* Restore FP context, including the 32 FP data registers and the FP
* control & status register, from signal context to the FPU.
*/
LEAF(_restore_fp_context)
mfc0 t0,CP0_STATUS
sll t0,t0,2
bgez t0,1f
lw t0,SC_FPC_CSR(a0)
lw t0,(a1)
/* Restore the 16 double precision registers */
ldc1 $f0,(SC_FPREGS+0)(a0)
ldc1 $f2,(SC_FPREGS+16)(a0)
ldc1 $f4,(SC_FPREGS+32)(a0)
ldc1 $f6,(SC_FPREGS+48)(a0)
ldc1 $f8,(SC_FPREGS+64)(a0)
ldc1 $f10,(SC_FPREGS+80)(a0)
ldc1 $f12,(SC_FPREGS+96)(a0)
ldc1 $f14,(SC_FPREGS+112)(a0)
ldc1 $f16,(SC_FPREGS+128)(a0)
ldc1 $f18,(SC_FPREGS+144)(a0)
ldc1 $f20,(SC_FPREGS+160)(a0)
ldc1 $f22,(SC_FPREGS+176)(a0)
ldc1 $f24,(SC_FPREGS+192)(a0)
ldc1 $f26,(SC_FPREGS+208)(a0)
ldc1 $f28,(SC_FPREGS+224)(a0)
ldc1 $f30,(SC_FPREGS+240)(a0)
ldc1 $f0,0(a0)
ldc1 $f2,16(a0)
ldc1 $f4,32(a0)
ldc1 $f6,48(a0)
ldc1 $f8,64(a0)
ldc1 $f10,80(a0)
ldc1 $f12,96(a0)
ldc1 $f14,112(a0)
ldc1 $f16,128(a0)
ldc1 $f18,144(a0)
ldc1 $f20,160(a0)
ldc1 $f22,176(a0)
ldc1 $f24,192(a0)
ldc1 $f26,208(a0)
ldc1 $f28,224(a0)
ldc1 $f30,240(a0)
jr ra
ctc1 t0,fcr31
1: jr ra

View File

@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void)
#if defined(CONFIG_USE_OF)
/* Get any additional entropy passed in device tree */
{
if (initial_boot_params) {
int node, len;
u64 *prop;

View File

@ -368,6 +368,19 @@ static void __init bootmem_init(void)
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
#ifndef CONFIG_HIGHMEM
/*
* Skip highmem here so we get an accurate max_low_pfn if low
* memory stops short of high memory.
* If the region overlaps HIGHMEM_START, end is clipped so
* max_pfn excludes the highmem portion.
*/
if (start >= PFN_DOWN(HIGHMEM_START))
continue;
if (end > PFN_DOWN(HIGHMEM_START))
end = PFN_DOWN(HIGHMEM_START);
#endif
if (end > max_low_pfn)
max_low_pfn = end;
if (start < min_low_pfn)

View File

@ -156,7 +156,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
print_ip_sym(pc);
pc = unwind_stack(task, &sp, pc, &ra);
} while (pc);
printk("\n");
pr_cont("\n");
}
/*
@ -174,22 +174,24 @@ static void show_stacktrace(struct task_struct *task,
printk("Stack :");
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
if (i && ((i % (64 / field)) == 0))
printk("\n ");
if (i && ((i % (64 / field)) == 0)) {
pr_cont("\n");
printk(" ");
}
if (i > 39) {
printk(" ...");
pr_cont(" ...");
break;
}
if (__get_user(stackdata, sp++)) {
printk(" (Bad stack address)");
pr_cont(" (Bad stack address)");
break;
}
printk(" %0*lx", field, stackdata);
pr_cont(" %0*lx", field, stackdata);
i++;
}
printk("\n");
pr_cont("\n");
show_backtrace(task, regs);
}
@ -229,18 +231,19 @@ static void show_code(unsigned int __user *pc)
long i;
unsigned short __user *pc16 = NULL;
printk("\nCode:");
printk("Code:");
if ((unsigned long)pc & 1)
pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
for(i = -3 ; i < 6 ; i++) {
unsigned int insn;
if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
printk(" (Bad address in epc)\n");
pr_cont(" (Bad address in epc)\n");
break;
}
printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
}
pr_cont("\n");
}
static void __show_regs(const struct pt_regs *regs)
@ -259,15 +262,15 @@ static void __show_regs(const struct pt_regs *regs)
if ((i % 4) == 0)
printk("$%2d :", i);
if (i == 0)
printk(" %0*lx", field, 0UL);
pr_cont(" %0*lx", field, 0UL);
else if (i == 26 || i == 27)
printk(" %*s", field, "");
pr_cont(" %*s", field, "");
else
printk(" %0*lx", field, regs->regs[i]);
pr_cont(" %0*lx", field, regs->regs[i]);
i++;
if ((i % 4) == 0)
printk("\n");
pr_cont("\n");
}
#ifdef CONFIG_CPU_HAS_SMARTMIPS
@ -288,46 +291,46 @@ static void __show_regs(const struct pt_regs *regs)
if (cpu_has_3kex) {
if (regs->cp0_status & ST0_KUO)
printk("KUo ");
pr_cont("KUo ");
if (regs->cp0_status & ST0_IEO)
printk("IEo ");
pr_cont("IEo ");
if (regs->cp0_status & ST0_KUP)
printk("KUp ");
pr_cont("KUp ");
if (regs->cp0_status & ST0_IEP)
printk("IEp ");
pr_cont("IEp ");
if (regs->cp0_status & ST0_KUC)
printk("KUc ");
pr_cont("KUc ");
if (regs->cp0_status & ST0_IEC)
printk("IEc ");
pr_cont("IEc ");
} else if (cpu_has_4kex) {
if (regs->cp0_status & ST0_KX)
printk("KX ");
pr_cont("KX ");
if (regs->cp0_status & ST0_SX)
printk("SX ");
pr_cont("SX ");
if (regs->cp0_status & ST0_UX)
printk("UX ");
pr_cont("UX ");
switch (regs->cp0_status & ST0_KSU) {
case KSU_USER:
printk("USER ");
pr_cont("USER ");
break;
case KSU_SUPERVISOR:
printk("SUPERVISOR ");
pr_cont("SUPERVISOR ");
break;
case KSU_KERNEL:
printk("KERNEL ");
pr_cont("KERNEL ");
break;
default:
printk("BAD_MODE ");
pr_cont("BAD_MODE ");
break;
}
if (regs->cp0_status & ST0_ERL)
printk("ERL ");
pr_cont("ERL ");
if (regs->cp0_status & ST0_EXL)
printk("EXL ");
pr_cont("EXL ");
if (regs->cp0_status & ST0_IE)
printk("IE ");
pr_cont("IE ");
}
printk("\n");
pr_cont("\n");
exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
@ -705,6 +708,32 @@ asmlinkage void do_ov(struct pt_regs *regs)
exception_exit(prev_state);
}
/*
* Send SIGFPE according to FCSR Cause bits, which must have already
* been masked against Enable bits. This is impotant as Inexact can
* happen together with Overflow or Underflow, and `ptrace' can set
* any bits.
*/
void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
struct task_struct *tsk)
{
struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
if (fcr31 & FPU_CSR_INV_X)
si.si_code = FPE_FLTINV;
else if (fcr31 & FPU_CSR_DIV_X)
si.si_code = FPE_FLTDIV;
else if (fcr31 & FPU_CSR_OVF_X)
si.si_code = FPE_FLTOVF;
else if (fcr31 & FPU_CSR_UDF_X)
si.si_code = FPE_FLTUND;
else if (fcr31 & FPU_CSR_INE_X)
si.si_code = FPE_FLTRES;
else
si.si_code = __SI_FAULT;
force_sig_info(SIGFPE, &si, tsk);
}
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
struct siginfo si = { 0 };
@ -715,27 +744,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
return 0;
case SIGFPE:
si.si_addr = fault_addr;
si.si_signo = sig;
/*
* Inexact can happen together with Overflow or Underflow.
* Respect the mask to deliver the correct exception.
*/
fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
(ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
if (fcr31 & FPU_CSR_INV_X)
si.si_code = FPE_FLTINV;
else if (fcr31 & FPU_CSR_DIV_X)
si.si_code = FPE_FLTDIV;
else if (fcr31 & FPU_CSR_OVF_X)
si.si_code = FPE_FLTOVF;
else if (fcr31 & FPU_CSR_UDF_X)
si.si_code = FPE_FLTUND;
else if (fcr31 & FPU_CSR_INE_X)
si.si_code = FPE_FLTRES;
else
si.si_code = __SI_FAULT;
force_sig_info(sig, &si, current);
force_fcr31_sig(fcr31, fault_addr, current);
return 1;
case SIGBUS:
@ -799,13 +808,13 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
fcr31 = current->thread.fpu.fcr31;
/*
* We can't allow the emulated instruction to leave any of
* the cause bits set in $fcr31.
* We can't allow the emulated instruction to leave any
* enabled Cause bits set in $fcr31.
*/
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1);
@ -831,7 +840,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
goto out;
/* Clear FCSR.Cause before enabling interrupts */
write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
local_irq_enable();
die_if_kernel("FP exception in kernel code", regs);
@ -853,13 +862,13 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
fcr31 = current->thread.fpu.fcr31;
/*
* We can't allow the emulated instruction to leave any of
* the cause bits set in $fcr31.
* We can't allow the emulated instruction to leave any
* enabled Cause bits set in $fcr31.
*/
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
@ -1424,13 +1433,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
fcr31 = current->thread.fpu.fcr31;
/*
* We can't allow the emulated instruction to leave
* any of the cause bits set in $fcr31.
* any enabled Cause bits set in $fcr31.
*/
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
current->thread.fpu.fcr31 &= ~fcr31;
/* Send a signal if required. */
if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)

View File

@ -790,15 +790,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
kvm_clear_c0_guest_status(cop0, ST0_ERL);
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
} else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
kvm_read_c0_guest_epc(cop0));
kvm_clear_c0_guest_status(cop0, ST0_EXL);
vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
} else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
kvm_clear_c0_guest_status(cop0, ST0_ERL);
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
} else {
kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
vcpu->arch.pc);
@ -1528,13 +1528,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DO_MMIO;
unsigned long curr_pc;
u32 op, rt;
u32 bytes;
rt = inst.i_format.rt;
op = inst.i_format.opcode;
vcpu->arch.pending_load_cause = cause;
/*
* Find the resume PC now while we have safe and easy access to the
* prior branch instruction, and save it for
* kvm_mips_complete_mmio_load() to restore later.
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
vcpu->arch.io_pc = vcpu->arch.pc;
vcpu->arch.pc = curr_pc;
vcpu->arch.io_gpr = rt;
switch (op) {
@ -2494,9 +2506,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
goto done;
}
er = update_pc(vcpu, vcpu->arch.pending_load_cause);
if (er == EMULATE_FAIL)
return er;
/* Restore saved resume PC */
vcpu->arch.pc = vcpu->arch.io_pc;
switch (run->mmio.len) {
case 4:
@ -2518,11 +2529,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
break;
}
if (vcpu->arch.pending_load_cause & CAUSEF_BD)
kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
vcpu->mmio_needed);
done:
return er;
}

View File

@ -426,7 +426,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
int cpu = smp_processor_id();
int i, cpu = smp_processor_id();
unsigned int gasid;
/*
@ -442,6 +442,9 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
vcpu);
vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu];
for_each_possible_cpu(i)
if (i != cpu)
vcpu->arch.guest_user_asid[cpu] = 0;
vcpu->arch.last_user_gasid = gasid;
}
}

View File

@ -260,13 +260,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
asid_version_mask(cpu)) {
u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
KVM_ENTRYHI_ASID;
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu];
vcpu->arch.last_user_gasid = gasid;
newasid++;
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,

View File

@ -135,42 +135,42 @@ static void dump_tlb(int first, int last)
c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
printk("va=%0*lx asid=%0*lx",
vwidth, (entryhi & ~0x1fffUL),
asidwidth, entryhi & asidmask);
pr_cont("va=%0*lx asid=%0*lx",
vwidth, (entryhi & ~0x1fffUL),
asidwidth, entryhi & asidmask);
if (cpu_has_guestid)
printk(" gid=%02lx",
(guestctl1 & MIPS_GCTL1_RID)
pr_cont(" gid=%02lx",
(guestctl1 & MIPS_GCTL1_RID)
>> MIPS_GCTL1_RID_SHIFT);
/* RI/XI are in awkward places, so mask them off separately */
pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa)
pa |= (unsigned long long)readx_c0_entrylo0() << 30;
pa = (pa << 6) & PAGE_MASK;
printk("\n\t[");
pr_cont("\n\t[");
if (cpu_has_rixi)
printk("ri=%d xi=%d ",
(entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
(entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
pwidth, pa, c0,
(entrylo0 & ENTRYLO_D) ? 1 : 0,
(entrylo0 & ENTRYLO_V) ? 1 : 0,
(entrylo0 & ENTRYLO_G) ? 1 : 0);
pr_cont("ri=%d xi=%d ",
(entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
(entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
pwidth, pa, c0,
(entrylo0 & ENTRYLO_D) ? 1 : 0,
(entrylo0 & ENTRYLO_V) ? 1 : 0,
(entrylo0 & ENTRYLO_G) ? 1 : 0);
/* RI/XI are in awkward places, so mask them off separately */
pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa)
pa |= (unsigned long long)readx_c0_entrylo1() << 30;
pa = (pa << 6) & PAGE_MASK;
if (cpu_has_rixi)
printk("ri=%d xi=%d ",
(entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
(entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
pwidth, pa, c1,
(entrylo1 & ENTRYLO_D) ? 1 : 0,
(entrylo1 & ENTRYLO_V) ? 1 : 0,
(entrylo1 & ENTRYLO_G) ? 1 : 0);
pr_cont("ri=%d xi=%d ",
(entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
(entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
pwidth, pa, c1,
(entrylo1 & ENTRYLO_D) ? 1 : 0,
(entrylo1 & ENTRYLO_V) ? 1 : 0,
(entrylo1 & ENTRYLO_G) ? 1 : 0);
}
printk("\n");

View File

@ -53,15 +53,15 @@ static void dump_tlb(int first, int last)
*/
printk("Index: %2d ", i);
printk("va=%08lx asid=%08lx"
" [pa=%06lx n=%d d=%d v=%d g=%d]",
entryhi & PAGE_MASK,
entryhi & asid_mask,
entrylo0 & PAGE_MASK,
(entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
pr_cont("va=%08lx asid=%08lx"
" [pa=%06lx n=%d d=%d v=%d g=%d]",
entryhi & PAGE_MASK,
entryhi & asid_mask,
entrylo0 & PAGE_MASK,
(entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
}
}
printk("\n");

View File

@ -23,6 +23,8 @@
* they shouldn't be hard-coded!
*/
#define __ro_after_init __read_mostly
#define L1_CACHE_BYTES 16
#define L1_CACHE_SHIFT 4

View File

@ -368,7 +368,9 @@
#define __IGNORE_select /* newselect */
#define __IGNORE_fadvise64 /* fadvise64_64 */
#define __IGNORE_pkey_mprotect
#define __IGNORE_pkey_alloc
#define __IGNORE_pkey_free
#define LINUX_GATEWAY_ADDR 0x100

View File

@ -873,11 +873,11 @@ static void print_parisc_device(struct parisc_device *dev)
if (dev->num_addrs) {
int k;
printk(", additional addresses: ");
pr_cont(", additional addresses: ");
for (k = 0; k < dev->num_addrs; k++)
printk("0x%lx ", dev->addr[k]);
pr_cont("0x%lx ", dev->addr[k]);
}
printk("\n");
pr_cont("\n");
}
/**

View File

@ -100,14 +100,12 @@ set_thread_pointer:
.endr
/* This address must remain fixed at 0x100 for glibc's syscalls to work */
.align 256
.align LINUX_GATEWAY_ADDR
linux_gateway_entry:
gate .+8, %r0 /* become privileged */
mtsp %r0,%sr4 /* get kernel space into sr4 */
mtsp %r0,%sr5 /* get kernel space into sr5 */
mtsp %r0,%sr6 /* get kernel space into sr6 */
mfsp %sr7,%r1 /* save user sr7 */
mtsp %r1,%sr3 /* and store it in sr3 */
#ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall
@ -133,6 +131,14 @@ linux_gateway_entry:
depdi 0, 31, 32, %r21
1:
#endif
/* We use a rsm/ssm pair to prevent sr3 from being clobbered
* by external interrupts.
*/
mfsp %sr7,%r1 /* save user sr7 */
rsm PSW_SM_I, %r0 /* disable interrupts */
mtsp %r1,%sr3 /* and store it in sr3 */
mfctl %cr30,%r1
xor %r1,%r30,%r30 /* ye olde xor trick */
xor %r1,%r30,%r1
@ -147,6 +153,7 @@ linux_gateway_entry:
*/
mtsp %r0,%sr7 /* get kernel space into sr7 */
ssm PSW_SM_I, %r0 /* enable interrupts */
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
mfctl %cr30,%r1 /* get task ptr in %r1 */
LDREG TI_TASK(%r1),%r1
@ -474,11 +481,6 @@ lws_start:
comiclr,>> __NR_lws_entries, %r20, %r0
b,n lws_exit_nosys
/* WARNING: Trashing sr2 and sr3 */
mfsp %sr7,%r1 /* get userspace into sr3 */
mtsp %r1,%sr3
mtsp %r0,%sr2 /* get kernel space into sr2 */
/* Load table start */
ldil L%lws_table, %r1
ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
@ -627,9 +629,9 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
1: ldw,ma 0(%sr3,%r26), %r28
1: ldw,ma 0(%r26), %r28
sub,<> %r28, %r25, %r0
2: stw,ma %r24, 0(%sr3,%r26)
2: stw,ma %r24, 0(%r26)
/* Free lock */
stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
@ -706,9 +708,9 @@ lws_compare_and_swap_2:
nop
/* 8bit load */
4: ldb 0(%sr3,%r25), %r25
4: ldb 0(%r25), %r25
b cas2_lock_start
5: ldb 0(%sr3,%r24), %r24
5: ldb 0(%r24), %r24
nop
nop
nop
@ -716,9 +718,9 @@ lws_compare_and_swap_2:
nop
/* 16bit load */
6: ldh 0(%sr3,%r25), %r25
6: ldh 0(%r25), %r25
b cas2_lock_start
7: ldh 0(%sr3,%r24), %r24
7: ldh 0(%r24), %r24
nop
nop
nop
@ -726,9 +728,9 @@ lws_compare_and_swap_2:
nop
/* 32bit load */
8: ldw 0(%sr3,%r25), %r25
8: ldw 0(%r25), %r25
b cas2_lock_start
9: ldw 0(%sr3,%r24), %r24
9: ldw 0(%r24), %r24
nop
nop
nop
@ -737,14 +739,14 @@ lws_compare_and_swap_2:
/* 64bit load */
#ifdef CONFIG_64BIT
10: ldd 0(%sr3,%r25), %r25
11: ldd 0(%sr3,%r24), %r24
10: ldd 0(%r25), %r25
11: ldd 0(%r24), %r24
#else
/* Load new value into r22/r23 - high/low */
10: ldw 0(%sr3,%r25), %r22
11: ldw 4(%sr3,%r25), %r23
10: ldw 0(%r25), %r22
11: ldw 4(%r25), %r23
/* Load new value into fr4 for atomic store later */
12: flddx 0(%sr3,%r24), %fr4
12: flddx 0(%r24), %fr4
#endif
cas2_lock_start:
@ -794,30 +796,30 @@ cas2_action:
ldo 1(%r0),%r28
/* 8bit CAS */
13: ldb,ma 0(%sr3,%r26), %r29
13: ldb,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
14: stb,ma %r24, 0(%sr3,%r26)
14: stb,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
15: ldh,ma 0(%sr3,%r26), %r29
15: ldh,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
16: sth,ma %r24, 0(%sr3,%r26)
16: sth,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
17: ldw,ma 0(%sr3,%r26), %r29
17: ldw,ma 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
18: stw,ma %r24, 0(%sr3,%r26)
18: stw,ma %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
@ -825,22 +827,22 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
19: ldd,ma 0(%sr3,%r26), %r29
19: ldd,ma 0(%r26), %r29
sub,*= %r29, %r25, %r0
b,n cas2_end
20: std,ma %r24, 0(%sr3,%r26)
20: std,ma %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
19: ldw,ma 0(%sr3,%r26), %r29
19: ldw,ma 0(%r26), %r29
sub,= %r29, %r22, %r0
b,n cas2_end
/* Compare second word */
20: ldw,ma 4(%sr3,%r26), %r29
20: ldw,ma 4(%r26), %r29
sub,= %r29, %r23, %r0
b,n cas2_end
/* Perform the store */
21: fstdx %fr4, 0(%sr3,%r26)
21: fstdx %fr4, 0(%r26)
copy %r0, %r28
#endif

View File

@ -53,10 +53,8 @@ static inline __sum16 csum_fold(__wsum sum)
return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
__u8 proto, __wsum sum)
{
#ifdef __powerpc64__
unsigned long s = (__force u32)sum;
@ -83,10 +81,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
__u8 proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}

View File

@ -26,7 +26,7 @@ extern u64 pnv_first_deep_stop_state;
std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
1: cmp cr0,r0,r0; \
1: cmpd cr0,r0,r0; \
bne 1b; \
IDLE_INST; \
b .

View File

@ -93,6 +93,10 @@
ld reg,PACAKBASE(r13); /* get high part of &label */ \
ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
#define __LOAD_HANDLER(reg, label) \
ld reg,PACAKBASE(r13); \
ori reg,reg,(ABS_ADDR(label))@l;
/* Exception register prefixes */
#define EXC_HV H
#define EXC_STD
@ -208,6 +212,18 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define kvmppc_interrupt kvmppc_interrupt_pr
#endif
#ifdef CONFIG_RELOCATABLE
#define BRANCH_TO_COMMON(reg, label) \
__LOAD_HANDLER(reg, label); \
mtctr reg; \
bctr
#else
#define BRANCH_TO_COMMON(reg, label) \
b label
#endif
#define __KVM_HANDLER_PROLOG(area, n) \
BEGIN_FTR_SECTION_NESTED(947) \
ld r10,area+EX_CFAR(r13); \

View File

@ -52,11 +52,23 @@ static inline int mm_is_core_local(struct mm_struct *mm)
return cpumask_subset(mm_cpumask(mm),
topology_sibling_cpumask(smp_processor_id()));
}
static inline int mm_is_thread_local(struct mm_struct *mm)
{
return cpumask_equal(mm_cpumask(mm),
cpumask_of(smp_processor_id()));
}
#else
static inline int mm_is_core_local(struct mm_struct *mm)
{
return 1;
}
static inline int mm_is_thread_local(struct mm_struct *mm)
{
return 1;
}
#endif
#endif /* __KERNEL__ */

View File

@ -95,19 +95,35 @@ __start_interrupts:
/* No virt vectors corresponding with 0x0..0x100 */
EXC_VIRT_NONE(0x4000, 0x4100)
#ifdef CONFIG_PPC_P7_NAP
/*
* If running native on arch 2.06 or later, check if we are waking up
* from nap/sleep/winkle, and branch to idle handler.
*/
#define IDLETEST(n) \
BEGIN_FTR_SECTION ; \
mfspr r10,SPRN_SRR1 ; \
rlwinm. r10,r10,47-31,30,31 ; \
beq- 1f ; \
cmpwi cr3,r10,2 ; \
BRANCH_TO_COMMON(r10, system_reset_idle_common) ; \
1: \
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#else
#define IDLETEST NOTEST
#endif
EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
SET_SCRATCH0(r13)
#ifdef CONFIG_PPC_P7_NAP
BEGIN_FTR_SECTION
/* Running native on arch 2.06 or later, check if we are
* waking up from nap/sleep/winkle.
*/
mfspr r13,SPRN_SRR1
rlwinm. r13,r13,47-31,30,31
beq 9f
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
IDLETEST, 0x100)
cmpwi cr3,r13,2
GET_PACA(r13)
EXC_REAL_END(system_reset, 0x100, 0x200)
EXC_VIRT_NONE(0x4100, 0x4200)
#ifdef CONFIG_PPC_P7_NAP
EXC_COMMON_BEGIN(system_reset_idle_common)
bl pnv_restore_hyp_resource
li r0,PNV_THREAD_RUNNING
@ -130,14 +146,8 @@ BEGIN_FTR_SECTION
blt cr3,2f
b pnv_wakeup_loss
2: b pnv_wakeup_noloss
#endif
9:
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif /* CONFIG_PPC_P7_NAP */
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
NOTEST, 0x100)
EXC_REAL_END(system_reset, 0x100, 0x200)
EXC_VIRT_NONE(0x4100, 0x4200)
EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
#ifdef CONFIG_PPC_PSERIES
@ -817,10 +827,8 @@ EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00)
TRAMP_KVM(PACA_EXGEN, 0xb00)
EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
#define LOAD_SYSCALL_HANDLER(reg) \
ld reg,PACAKBASE(r13); \
ori reg,reg,(ABS_ADDR(system_call_common))@l;
#define LOAD_SYSCALL_HANDLER(reg) \
__LOAD_HANDLER(reg, system_call_common)
/* Syscall routine is used twice, in reloc-off and reloc-on paths */
#define SYSCALL_PSERIES_1 \

View File

@ -275,7 +275,7 @@ int hw_breakpoint_handler(struct die_args *args)
if (!stepped) {
WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
"0x%lx will be disabled.", info->address);
perf_event_disable(bp);
perf_event_disable_inatomic(bp);
goto out;
}
/*

View File

@ -90,6 +90,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
* Threads will spin in HMT_LOW until the lock bit is cleared.
* r14 - pointer to core_idle_state
* r15 - used to load contents of core_idle_state
* r9 - used as a temporary variable
*/
core_idle_lock_held:
@ -99,6 +100,8 @@ core_idle_lock_held:
bne 3b
HMT_MEDIUM
lwarx r15,0,r14
andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
bne core_idle_lock_held
blr
/*
@ -163,12 +166,6 @@ _GLOBAL(pnv_powersave_common)
std r9,_MSR(r1)
std r1,PACAR1(r13)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* Tell KVM we're entering idle */
li r4,KVM_HWTHREAD_IN_IDLE
stb r4,HSTATE_HWTHREAD_STATE(r13)
#endif
/*
* Go to real mode to do the nap, as required by the architecture.
* Also, we need to be in real mode before setting hwthread_state,
@ -185,6 +182,26 @@ _GLOBAL(pnv_powersave_common)
.globl pnv_enter_arch207_idle_mode
pnv_enter_arch207_idle_mode:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* Tell KVM we're entering idle */
li r4,KVM_HWTHREAD_IN_IDLE
/******************************************************/
/* N O T E W E L L ! ! ! N O T E W E L L */
/* The following store to HSTATE_HWTHREAD_STATE(r13) */
/* MUST occur in real mode, i.e. with the MMU off, */
/* and the MMU must stay off until we clear this flag */
/* and test HSTATE_HWTHREAD_REQ(r13) in the system */
/* reset interrupt vector in exceptions-64s.S. */
/* The reason is that another thread can switch the */
/* MMU to a guest context whenever this flag is set */
/* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
/* that would potentially cause this thread to start */
/* executing instructions from guest memory in */
/* hypervisor mode, leading to a host crash or data */
/* corruption, or worse. */
/******************************************************/
stb r4,HSTATE_HWTHREAD_STATE(r13)
#endif
stb r3,PACA_THREAD_IDLE_STATE(r13)
cmpwi cr3,r3,PNV_THREAD_SLEEP
bge cr3,2f
@ -250,6 +267,12 @@ enter_winkle:
* r3 - requested stop state
*/
power_enter_stop:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* Tell KVM we're entering idle */
li r4,KVM_HWTHREAD_IN_IDLE
/* DO THIS IN REAL MODE! See comment above. */
stb r4,HSTATE_HWTHREAD_STATE(r13)
#endif
/*
* Check if the requested state is a deep idle state.
*/

View File

@ -1012,7 +1012,7 @@ void restore_tm_state(struct pt_regs *regs)
/* Ensure that restore_math() will restore */
if (msr_diff & MSR_FP)
current->thread.load_fp = 1;
#ifdef CONFIG_ALIVEC
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
current->thread.load_vec = 1;
#endif

View File

@ -23,6 +23,7 @@
#include <asm/ppc-opcode.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
#include <asm/smp.h>
#include "book3s_xics.h"

View File

@ -175,7 +175,7 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
if (!mm_is_core_local(mm)) {
if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@ -201,7 +201,7 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
if (!mm_is_core_local(mm)) {
if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@ -226,7 +226,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
pid = mm ? mm->context.id : 0;
if (unlikely(pid == MMU_NO_CONTEXT))
goto bail;
if (!mm_is_core_local(mm)) {
if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@ -321,7 +321,7 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
{
unsigned long pid;
unsigned long addr;
int local = mm_is_core_local(mm);
int local = mm_is_thread_local(mm);
unsigned long ap = mmu_get_ap(psize);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;

View File

@ -363,11 +363,11 @@ out:
static int diag224_get_name_table(void)
{
/* memory must be below 2GB */
diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!diag224_cpu_names)
return -ENOMEM;
if (diag224(diag224_cpu_names)) {
kfree(diag224_cpu_names);
free_page((unsigned long) diag224_cpu_names);
return -EOPNOTSUPP;
}
EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
@ -376,7 +376,7 @@ static int diag224_get_name_table(void)
static void diag224_delete_name_table(void)
{
kfree(diag224_cpu_names);
free_page((unsigned long) diag224_cpu_names);
}
static int diag224_idx2name(int index, char *name)

View File

@ -12,9 +12,7 @@
#ifndef __ASSEMBLY__
unsigned long return_address(int depth);
#define ftrace_return_address(n) return_address(n)
#define ftrace_return_address(n) __builtin_return_address(n)
void _mcount(void);
void ftrace_caller(void);

View File

@ -192,7 +192,7 @@ struct task_struct;
struct mm_struct;
struct seq_file;
typedef int (*dump_trace_func_t)(void *data, unsigned long address);
typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
void dump_trace(dump_trace_func_t func, void *data,
struct task_struct *task, unsigned long sp);

View File

@ -9,6 +9,9 @@
#include <uapi/asm/unistd.h>
#define __IGNORE_time
#define __IGNORE_pkey_mprotect
#define __IGNORE_pkey_alloc
#define __IGNORE_pkey_free
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM

View File

@ -2014,12 +2014,12 @@ void show_code(struct pt_regs *regs)
*ptr++ = '\t';
ptr += print_insn(ptr, code + start, addr);
start += opsize;
printk("%s", buffer);
pr_cont("%s", buffer);
ptr = buffer;
ptr += sprintf(ptr, "\n ");
hops++;
}
printk("\n");
pr_cont("\n");
}
void print_fn_code(unsigned char *code, unsigned long len)

Some files were not shown because too many files have changed in this diff Show More