1
0
Fork 0

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (71 commits)
  powerpc/44x: Update ppc44x_defconfig
  powerpc/watchdog: Make default timeout for Book-E watchdog a Kconfig option
  fsl_rio: Add comments for sRIO registers.
  powerpc/fsl-booke: Add e55xx (64-bit) smp defconfig
  powerpc/fsl-booke: Add p5020 DS board support
  powerpc/fsl-booke64: Use TLB CAMs to cover linear mapping on FSL 64-bit chips
  powerpc/fsl-booke: Add support for FSL Arch v1.0 MMU in setup_page_sizes
  powerpc/fsl-booke: Add support for FSL 64-bit e5500 core
  powerpc/85xx: add cache-sram support
  powerpc/85xx: add ngPIXIS FPGA device tree node to the P1022DS board
  powerpc: Fix compile error with paca code on ppc64e
  powerpc/fsl-booke: Add p3041 DS board support
  oprofile/fsl emb: Don't set MSR[PMM] until after clearing the interrupt.
  powerpc/fsl-booke: Add PCI device ids for P2040/P3041/P5010/P5020 QoirQ chips
  powerpc/mpc8xxx_gpio: Add support for 'qoriq-gpio' controllers
  powerpc/fsl_booke: Add support to boot from core other than 0
  powerpc/p1022: Add probing for individual DMA channels
  powerpc/fsl_soc: Search all global-utilities nodes for rstccr
  powerpc: Fix invalid page flags in create TLB CAM path for PTE_64BIT
  powerpc/mpc83xx: Support for MPC8308 P1M board
  ...

Fix up conflict with the generic irq_work changes in arch/powerpc/kernel/time.c
hifive-unleashed-5.1
Linus Torvalds 2010-10-21 21:19:54 -07:00
commit d4429f608a
130 changed files with 3678 additions and 685 deletions

View File

@ -20,7 +20,7 @@
#include <string.h>
/* CHRP note section */
char arch[] = "PowerPC";
static const char arch[] = "PowerPC";
#define N_DESCR 6
unsigned int descr[N_DESCR] = {
@ -33,7 +33,7 @@ unsigned int descr[N_DESCR] = {
};
/* RPA note section */
char rpaname[] = "IBM,RPA-Client-Config";
static const char rpaname[] = "IBM,RPA-Client-Config";
/*
* Note: setting ignore_my_client_config *should* mean that OF ignores

View File

@ -0,0 +1,254 @@
/*
* Device Tree for Bluestone (APM821xx) board.
*
* Copyright (c) 2010, Applied Micro Circuits Corporation
* Author: Tirumala R Marri <tmarri@apm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*
*/
/dts-v1/;
/ {
#address-cells = <2>;
#size-cells = <1>;
model = "apm,bluestone";
compatible = "apm,bluestone";
dcr-parent = <&{/cpus/cpu@0}>;
aliases {
ethernet0 = &EMAC0;
serial0 = &UART0;
serial1 = &UART1;
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
model = "PowerPC,apm821xx";
reg = <0x00000000>;
clock-frequency = <0>; /* Filled in by U-Boot */
timebase-frequency = <0>; /* Filled in by U-Boot */
i-cache-line-size = <32>;
d-cache-line-size = <32>;
i-cache-size = <32768>;
d-cache-size = <32768>;
dcr-controller;
dcr-access-method = "native";
next-level-cache = <&L2C0>;
};
};
memory {
device_type = "memory";
reg = <0x00000000 0x00000000 0x00000000>; /* Filled in by U-Boot */
};
UIC0: interrupt-controller0 {
compatible = "ibm,uic";
interrupt-controller;
cell-index = <0>;
dcr-reg = <0x0c0 0x009>;
#address-cells = <0>;
#size-cells = <0>;
#interrupt-cells = <2>;
};
UIC1: interrupt-controller1 {
compatible = "ibm,uic";
interrupt-controller;
cell-index = <1>;
dcr-reg = <0x0d0 0x009>;
#address-cells = <0>;
#size-cells = <0>;
#interrupt-cells = <2>;
interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */
interrupt-parent = <&UIC0>;
};
UIC2: interrupt-controller2 {
compatible = "ibm,uic";
interrupt-controller;
cell-index = <2>;
dcr-reg = <0x0e0 0x009>;
#address-cells = <0>;
#size-cells = <0>;
#interrupt-cells = <2>;
interrupts = <0xa 0x4 0xb 0x4>; /* cascade */
interrupt-parent = <&UIC0>;
};
UIC3: interrupt-controller3 {
compatible = "ibm,uic";
interrupt-controller;
cell-index = <3>;
dcr-reg = <0x0f0 0x009>;
#address-cells = <0>;
#size-cells = <0>;
#interrupt-cells = <2>;
interrupts = <0x10 0x4 0x11 0x4>; /* cascade */
interrupt-parent = <&UIC0>;
};
SDR0: sdr {
compatible = "ibm,sdr-apm821xx";
dcr-reg = <0x00e 0x002>;
};
CPR0: cpr {
compatible = "ibm,cpr-apm821xx";
dcr-reg = <0x00c 0x002>;
};
plb {
compatible = "ibm,plb4";
#address-cells = <2>;
#size-cells = <1>;
ranges;
clock-frequency = <0>; /* Filled in by U-Boot */
SDRAM0: sdram {
compatible = "ibm,sdram-apm821xx";
dcr-reg = <0x010 0x002>;
};
MAL0: mcmal {
compatible = "ibm,mcmal2";
descriptor-memory = "ocm";
dcr-reg = <0x180 0x062>;
num-tx-chans = <1>;
num-rx-chans = <1>;
#address-cells = <0>;
#size-cells = <0>;
interrupt-parent = <&UIC2>;
interrupts = < /*TXEOB*/ 0x6 0x4
/*RXEOB*/ 0x7 0x4
/*SERR*/ 0x3 0x4
/*TXDE*/ 0x4 0x4
/*RXDE*/ 0x5 0x4
};
POB0: opb {
compatible = "ibm,opb";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0xb0000000 0x00000004 0xb0000000 0x50000000>;
clock-frequency = <0>; /* Filled in by U-Boot */
EBC0: ebc {
compatible = "ibm,ebc";
dcr-reg = <0x012 0x002>;
#address-cells = <2>;
#size-cells = <1>;
clock-frequency = <0>; /* Filled in by U-Boot */
/* ranges property is supplied by U-Boot */
ranges = < 0x00000003 0x00000000 0xe0000000 0x8000000>;
interrupts = <0x6 0x4>;
interrupt-parent = <&UIC1>;
nor_flash@0,0 {
compatible = "amd,s29gl512n", "cfi-flash";
bank-width = <2>;
reg = <0x00000000 0x00000000 0x00400000>;
#address-cells = <1>;
#size-cells = <1>;
partition@0 {
label = "kernel";
reg = <0x00000000 0x00180000>;
};
partition@180000 {
label = "env";
reg = <0x00180000 0x00020000>;
};
partition@1a0000 {
label = "u-boot";
reg = <0x001a0000 0x00060000>;
};
};
}
UART0: serial@ef600300 {
device_type = "serial";
compatible = "ns16550";
reg = <0xef600300 0x00000008>;
virtual-reg = <0xef600300>;
clock-frequency = <0>; /* Filled in by U-Boot */
current-speed = <0>; /* Filled in by U-Boot */
interrupt-parent = <&UIC1>;
interrupts = <0x1 0x4>;
};
IIC0: i2c@ef600700 {
compatible = "ibm,iic";
reg = <0xef600700 0x00000014>;
interrupt-parent = <&UIC0>;
interrupts = <0x2 0x4>;
};
IIC1: i2c@ef600800 {
compatible = "ibm,iic";
reg = <0xef600800 0x00000014>;
interrupt-parent = <&UIC0>;
interrupts = <0x3 0x4>;
};
RGMII0: emac-rgmii@ef601500 {
compatible = "ibm,rgmii";
reg = <0xef601500 0x00000008>;
has-mdio;
};
TAH0: emac-tah@ef601350 {
compatible = "ibm,tah";
reg = <0xef601350 0x00000030>;
};
EMAC0: ethernet@ef600c00 {
device_type = "network";
compatible = "ibm,emac4sync";
interrupt-parent = <&EMAC0>;
interrupts = <0x0 0x1>;
#interrupt-cells = <1>;
#address-cells = <0>;
#size-cells = <0>;
interrupt-map = </*Status*/ 0x0 &UIC2 0x10 0x4
/*Wake*/ 0x1 &UIC2 0x14 0x4>;
reg = <0xef600c00 0x000000c4>;
local-mac-address = [000000000000]; /* Filled in by U-Boot */
mal-device = <&MAL0>;
mal-tx-channel = <0>;
mal-rx-channel = <0>;
cell-index = <0>;
max-frame-size = <9000>;
rx-fifo-size = <16384>;
tx-fifo-size = <2048>;
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII0>;
rgmii-channel = <0>;
tah-device = <&TAH0>;
tah-channel = <0>;
has-inverted-stacr-oc;
has-new-stacr-staopc;
};
};
};
};

View File

@ -0,0 +1,332 @@
/*
* mpc8308_p1m Device Tree Source
*
* Copyright 2010 Ilya Yanok, Emcraft Systems, yanok@emcraft.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
/dts-v1/;
/ {
compatible = "denx,mpc8308_p1m";
#address-cells = <1>;
#size-cells = <1>;
aliases {
ethernet0 = &enet0;
ethernet1 = &enet1;
serial0 = &serial0;
serial1 = &serial1;
pci0 = &pci0;
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
PowerPC,8308@0 {
device_type = "cpu";
reg = <0x0>;
d-cache-line-size = <32>;
i-cache-line-size = <32>;
d-cache-size = <16384>;
i-cache-size = <16384>;
timebase-frequency = <0>; // from bootloader
bus-frequency = <0>; // from bootloader
clock-frequency = <0>; // from bootloader
};
};
memory {
device_type = "memory";
reg = <0x00000000 0x08000000>; // 128MB at 0
};
localbus@e0005000 {
#address-cells = <2>;
#size-cells = <1>;
compatible = "fsl,mpc8315-elbc", "fsl,elbc", "simple-bus";
reg = <0xe0005000 0x1000>;
interrupts = <77 0x8>;
interrupt-parent = <&ipic>;
ranges = <0x0 0x0 0xfc000000 0x04000000
0x1 0x0 0xfbff0000 0x00008000
0x2 0x0 0xfbff8000 0x00008000>;
flash@0,0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0x0 0x0 0x4000000>;
bank-width = <2>;
device-width = <1>;
u-boot@0 {
reg = <0x0 0x60000>;
read-only;
};
env@60000 {
reg = <0x60000 0x20000>;
};
env1@80000 {
reg = <0x80000 0x20000>;
};
kernel@a0000 {
reg = <0xa0000 0x200000>;
};
dtb@2a0000 {
reg = <0x2a0000 0x20000>;
};
ramdisk@2c0000 {
reg = <0x2c0000 0x640000>;
};
user@700000 {
reg = <0x700000 0x3900000>;
};
};
can@1,0 {
compatible = "nxp,sja1000";
reg = <0x1 0x0 0x80>;
interrupts = <18 0x8>;
interrups-parent = <&ipic>;
};
cpld@2,0 {
compatible = "denx,mpc8308_p1m-cpld";
reg = <0x2 0x0 0x8>;
interrupts = <48 0x8>;
interrups-parent = <&ipic>;
};
};
immr@e0000000 {
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
compatible = "fsl,mpc8308-immr", "simple-bus";
ranges = <0 0xe0000000 0x00100000>;
reg = <0xe0000000 0x00000200>;
bus-frequency = <0>;
i2c@3000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl-i2c";
reg = <0x3000 0x100>;
interrupts = <14 0x8>;
interrupt-parent = <&ipic>;
dfsrr;
fram@50 {
compatible = "ramtron,24c64";
reg = <0x50>;
};
};
i2c@3100 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl-i2c";
reg = <0x3100 0x100>;
interrupts = <15 0x8>;
interrupt-parent = <&ipic>;
dfsrr;
pwm@28 {
compatible = "maxim,ds1050";
reg = <0x28>;
};
sensor@48 {
compatible = "maxim,max6625";
reg = <0x48>;
};
sensor@49 {
compatible = "maxim,max6625";
reg = <0x49>;
};
sensor@4b {
compatible = "maxim,max6625";
reg = <0x4b>;
};
};
usb@23000 {
compatible = "fsl-usb2-dr";
reg = <0x23000 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
interrupt-parent = <&ipic>;
interrupts = <38 0x8>;
dr_mode = "peripheral";
phy_type = "ulpi";
};
enet0: ethernet@24000 {
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x24000 0x1000>;
cell-index = <0>;
device_type = "network";
model = "eTSEC";
compatible = "gianfar";
reg = <0x24000 0x1000>;
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <32 0x8 33 0x8 34 0x8>;
interrupt-parent = <&ipic>;
phy-handle = < &phy1 >;
mdio@520 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,gianfar-mdio";
reg = <0x520 0x20>;
phy1: ethernet-phy@1 {
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x1>;
device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupt-parent = <&ipic>;
interrupts = <19 0x8>;
reg = <0x2>;
device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
device_type = "tbi-phy";
};
};
};
enet1: ethernet@25000 {
#address-cells = <1>;
#size-cells = <1>;
cell-index = <1>;
device_type = "network";
model = "eTSEC";
compatible = "gianfar";
reg = <0x25000 0x1000>;
ranges = <0x0 0x25000 0x1000>;
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 0x8 36 0x8 37 0x8>;
interrupt-parent = <&ipic>;
phy-handle = < &phy2 >;
mdio@520 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,gianfar-tbi";
reg = <0x520 0x20>;
tbi1: tbi-phy@11 {
reg = <0x11>;
device_type = "tbi-phy";
};
};
};
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
compatible = "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <133333333>;
interrupts = <9 0x8>;
interrupt-parent = <&ipic>;
};
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
compatible = "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <133333333>;
interrupts = <10 0x8>;
interrupt-parent = <&ipic>;
};
gpio@c00 {
#gpio-cells = <2>;
compatible = "fsl,mpc8308-gpio", "fsl,mpc8349-gpio";
reg = <0xc00 0x18>;
interrupts = <74 0x8>;
interrupt-parent = <&ipic>;
gpio-controller;
};
timer@500 {
compatible = "fsl,mpc8308-gtm", "fsl,gtm";
reg = <0x500 0x100>;
interrupts = <90 8 78 8 84 8 72 8>;
interrupt-parent = <&ipic>;
clock-frequency = <133333333>;
};
/* IPIC
* interrupts cell = <intr #, sense>
* sense values match linux IORESOURCE_IRQ_* defines:
* sense == 8: Level, low assertion
* sense == 2: Edge, high-to-low change
*/
ipic: interrupt-controller@700 {
compatible = "fsl,ipic";
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <2>;
reg = <0x700 0x100>;
device_type = "ipic";
};
ipic-msi@7c0 {
compatible = "fsl,ipic-msi";
reg = <0x7c0 0x40>;
msi-available-ranges = <0x0 0x100>;
interrupts = < 0x43 0x8
0x4 0x8
0x51 0x8
0x52 0x8
0x56 0x8
0x57 0x8
0x58 0x8
0x59 0x8 >;
interrupt-parent = < &ipic >;
};
};
pci0: pcie@e0009000 {
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
device_type = "pci";
compatible = "fsl,mpc8308-pcie", "fsl,mpc8314-pcie";
reg = <0xe0009000 0x00001000
0xb0000000 0x01000000>;
ranges = <0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
0x01000000 0 0x00000000 0xb1000000 0 0x00800000>;
bus-range = <0 0>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &ipic 1 8>;
interrupts = <0x1 0x8>;
interrupt-parent = <&ipic>;
clock-frequency = <0>;
pcie@0 {
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
reg = <0 0 0 0 0>;
ranges = <0x02000000 0 0xa0000000
0x02000000 0 0xa0000000
0 0x10000000
0x01000000 0 0x00000000
0x01000000 0 0x00000000
0 0x00800000>;
};
};
};

View File

@ -148,6 +148,17 @@
label = "reserved-nand";
};
};
board-control@3,0 {
compatible = "fsl,p1022ds-pixis";
reg = <3 0 0x30>;
interrupt-parent = <&mpic>;
/*
* IRQ8 is generated if the "EVENT" switch is pressed
* and PX_CTL[EVESEL] is set to 00.
*/
interrupts = <8 8>;
};
};
soc@fffe00000 {

View File

@ -0,0 +1,68 @@
CONFIG_44x=y
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_PCI_QUIRKS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_BLUESTONE=y
# CONFIG_EBONY is not set
# CONFIG_KVM_GUEST is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SPARSE_IRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_IBM_NEW_EMAC=y
CONFIG_IBM_NEW_EMAC_RXB=256
CONFIG_IBM_NEW_EMAC_TXB=256
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_IBM_IIC=y
CONFIG_SENSORS_AD7414=y
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_M41T80=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS=y

View File

@ -0,0 +1,84 @@
CONFIG_PPC64=y
CONFIG_PPC_BOOK3E_64=y
# CONFIG_VIRT_CPU_ACCOUNTING is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EMBEDDED=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_P5020_DS=y
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
CONFIG_SPARSE_IRQ=y
# CONFIG_PCI is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
CONFIG_EEPROM_LEGACY=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_SERIO_LIBPS2=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
CONFIG_I2C=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
CONFIG_NLS=y
CONFIG_NLS_UTF8=m
CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=m
CONFIG_LIBCRC32C=m
CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_VIRQ_DEBUG=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=m
CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_TALITOS=y

View File

@ -12,6 +12,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_BAMBOO=y
CONFIG_BLUESTONE=y
CONFIG_SAM440EP=y
CONFIG_SEQUOIA=y
CONFIG_TAISHAN=y
@ -97,14 +98,17 @@ CONFIG_USB_STORAGE=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=m
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_INOTIFY=y
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=m
CONFIG_UBIFS_FS_XATTR=y
CONFIG_LOGFS=m
CONFIG_CRAMFS=y
CONFIG_SQUASHFS=m
CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_LZO=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
@ -116,11 +120,8 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_VIRTUALIZATION=y

View File

@ -18,6 +18,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_P5020_DS=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
@ -256,7 +257,6 @@ CONFIG_HID_ZEROPLUS=y
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=m
@ -290,7 +290,6 @@ CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_POSIX_ACL=y
CONFIG_INOTIFY=y
CONFIG_AUTOFS4_FS=m
CONFIG_ISO9660_FS=y
CONFIG_UDF_FS=m
@ -384,7 +383,6 @@ CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m

View File

@ -52,12 +52,22 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
extern __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err, int *dst_err);
#ifdef __powerpc64__
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr);
#define HAVE_CSUM_COPY_USER
extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
int len, __wsum sum, int *err_ptr);
#else
/*
* the same as csum_partial, but copies from src to dst while it
* checksums.
*/
#define csum_partial_copy_from_user(src, dst, len, sum, errp) \
csum_partial_copy_generic((__force const void *)(src), (dst), (len), (sum), (errp), NULL)
#endif
#define csum_partial_copy_nocheck(src, dst, len, sum) \
csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)

View File

@ -143,7 +143,7 @@ static inline void __user *arch_compat_alloc_user_space(long len)
* We cant access below the stack pointer in the 32bit ABI and
* can access 288 bytes in the 64bit ABI
*/
if (!(test_thread_flag(TIF_32BIT)))
if (!is_32bit_task())
usp -= 288;
return (void __user *) (usp - len);
@ -213,7 +213,7 @@ struct compat_shmid64_ds {
static inline int is_compat_task(void)
{
return test_thread_flag(TIF_32BIT);
return is_32bit_task();
}
#endif /* __KERNEL__ */

View File

@ -198,6 +198,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000)
#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000)
#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000)
#ifndef __ASSEMBLY__
@ -392,28 +393,31 @@ extern const char *powerpc_base_platform;
CPU_FTR_MMCRA | CPU_FTR_CTRL)
#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ)
CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \
CPU_FTR_STCX_CHECKS_ADDRESS)
#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
CPU_FTR_CP_USE_DCBTZ)
CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS)
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR)
CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS)
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD)
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
CPU_FTR_STCX_CHECKS_ADDRESS)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT)
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \

View File

@ -127,19 +127,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
return dma_ops->dma_supported(dev, mask);
}
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (unlikely(dma_ops == NULL))
return -EIO;
if (dma_ops->set_dma_mask != NULL)
return dma_ops->set_dma_mask(dev, dma_mask);
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
*dev->dma_mask = dma_mask;
return 0;
}
extern int dma_set_mask(struct device *dev, u64 dma_mask);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)

View File

@ -250,7 +250,7 @@ do { \
* the 64bit ABI has never had these issues dont enable the workaround
* even if we have an executable stack.
*/
# define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \
# define elf_read_implies_exec(ex, exec_stk) (is_32bit_task() ? \
(exec_stk == EXSTACK_DEFAULT) : 0)
#else
# define SET_PERSONALITY(ex) \

View File

@ -137,7 +137,8 @@
li r10,0; \
ld r11,exception_marker@toc(r2); \
std r10,RESULT(r1); /* clear regs->result */ \
std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
ACCOUNT_STOLEN_TIME
/*
* Exception vectors.

View File

@ -0,0 +1,48 @@
/*
* Copyright 2009 Freescale Semiconductor, Inc.
*
* Cache SRAM handling for QorIQ platform
*
* Author: Vivek Mahajan <vivek.mahajan@freescale.com>
* This file is derived from the original work done
* by Sylvain Munaut for the Bestcomm SRAM allocator.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__
#define __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__
#include <asm/rheap.h>
#include <linux/spinlock.h>
/*
* Cache-SRAM
*/
struct mpc85xx_cache_sram {
phys_addr_t base_phys;
void *base_virt;
unsigned int size;
rh_info_t *rh;
spinlock_t lock;
};
extern void mpc85xx_cache_sram_free(void *ptr);
extern void *mpc85xx_cache_sram_alloc(unsigned int size,
phys_addr_t *phys, unsigned int align);
#endif /* __AMS_POWERPC_FSL_85XX_CACHE_SRAM_H__ */

View File

@ -91,6 +91,7 @@ extern void machine_kexec_simple(struct kimage *image);
extern void crash_kexec_secondary(struct pt_regs *regs);
extern int overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
#else /* !CONFIG_KEXEC */
static inline int kexec_sr_activated(int cpu) { return 0; }

View File

@ -82,7 +82,7 @@ FPD_THREE_IN(fmadd)
FPD_THREE_IN(fnmsub)
FPD_THREE_IN(fnmadd)
extern void kvm_cvt_fd(u32 *from, u64 *to, u64 *fpscr);
extern void kvm_cvt_df(u64 *from, u32 *to, u64 *fpscr);
extern void kvm_cvt_fd(u32 *from, u64 *to);
extern void kvm_cvt_df(u64 *from, u32 *to);
#endif

View File

@ -153,6 +153,8 @@ struct lppaca {
extern struct lppaca lppaca[];
#define lppaca_of(cpu) (*paca[cpu].lppaca_ptr)
/*
* SLB shadow buffer structure as defined in the PAPR. The save_area
* contains adjacent ESID and VSID pairs for each shadowed SLB. The
@ -170,6 +172,33 @@ struct slb_shadow {
extern struct slb_shadow slb_shadow[];
/*
* Layout of entries in the hypervisor's dispatch trace log buffer.
*/
struct dtl_entry {
u8 dispatch_reason;
u8 preempt_reason;
u16 processor_id;
u32 enqueue_to_dispatch_time;
u32 ready_to_enqueue_time;
u32 waiting_to_ready_time;
u64 timebase;
u64 fault_addr;
u64 srr0;
u64 srr1;
};
#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
/*
* When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls
* reading from the dispatch trace log. If other code wants to consume
* DTL entries, it can set this pointer to a function that will get
* called once for each DTL entry that gets processed.
*/
extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
#endif /* CONFIG_PPC_BOOK3S */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_LPPACA_H */

View File

@ -102,6 +102,9 @@ struct machdep_calls {
void (*pci_dma_dev_setup)(struct pci_dev *dev);
void (*pci_dma_bus_setup)(struct pci_bus *bus);
/* Platform set_dma_mask override */
int (*dma_set_mask)(struct device *dev, u64 dma_mask);
int (*probe)(void);
void (*setup_arch)(void); /* Optional, may be NULL */
void (*init_early)(void);

View File

@ -114,6 +114,17 @@
#define MAS7_RPN 0xFFFFFFFF
/* Bit definitions for MMUCFG */
#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */
#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */
#define MMUCFG_MAVN_V2 0x00000001 /* v2.0 */
#define MMUCFG_NTLBS 0x0000000c /* Number of TLBs */
#define MMUCFG_PIDSIZE 0x000007c0 /* PID Reg Size */
#define MMUCFG_TWC 0x00008000 /* TLB Write Conditional (v2.0) */
#define MMUCFG_LRAT 0x00010000 /* LRAT Supported (v2.0) */
#define MMUCFG_RASIZE 0x00fe0000 /* Real Addr Size */
#define MMUCFG_LPIDSIZE 0x0f000000 /* LPID Reg Size */
/* Bit definitions for MMUCSR0 */
#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */
#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */
@ -133,6 +144,10 @@
#define TLBnCFG_GTWE 0x00010000 /* Guest can write */
#define TLBnCFG_IND 0x00020000 /* IND entries supported */
#define TLBnCFG_PT 0x00040000 /* Can load from page table */
#define TLBnCFG_MINSIZE 0x00f00000 /* Minimum Page Size (v1.0) */
#define TLBnCFG_MINSIZE_SHIFT 20
#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */
#define TLBnCFG_MAXSIZE_SHIFT 16
#define TLBnCFG_ASSOC 0xff000000 /* Associativity */
/* TLBnPS encoding */

View File

@ -85,6 +85,8 @@ struct paca_struct {
u8 kexec_state; /* set when kexec down has irqs off */
#ifdef CONFIG_PPC_STD_MMU_64
struct slb_shadow *slb_shadow_ptr;
struct dtl_entry *dispatch_log;
struct dtl_entry *dispatch_log_end;
/*
* Now, starting in cacheline 2, the exception save areas
@ -134,8 +136,14 @@ struct paca_struct {
/* Stuff for accurate time accounting */
u64 user_time; /* accumulated usermode TB ticks */
u64 system_time; /* accumulated system TB ticks */
u64 startpurr; /* PURR/TB value snapshot */
u64 user_time_scaled; /* accumulated usermode SPURR ticks */
u64 starttime; /* TB value snapshot */
u64 starttime_user; /* TB value on exit to usermode */
u64 startspurr; /* SPURR value snapshot */
u64 utime_sspurr; /* ->user_time when ->startspurr set */
u64 stolen_time; /* TB ticks taken by hypervisor */
u64 dtl_ridx; /* read index in dispatch log */
struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
#ifdef CONFIG_KVM_BOOK3S_HANDLER
/* We use this to store guest state in */

View File

@ -163,7 +163,7 @@ do { \
#endif /* !CONFIG_HUGETLB_PAGE */
#define VM_DATA_DEFAULT_FLAGS \
(test_thread_flag(TIF_32BIT) ? \
(is_32bit_task() ? \
VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
/*
@ -179,7 +179,7 @@ do { \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define VM_STACK_DEFAULT_FLAGS \
(test_thread_flag(TIF_32BIT) ? \
(is_32bit_task() ? \
VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
#include <asm-generic/getorder.h>

View File

@ -28,8 +28,8 @@ extern void find_and_init_phbs(void);
extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */
/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
#define BUID_HI(buid) ((buid) >> 32)
#define BUID_LO(buid) ((buid) & 0xffffffff)
#define BUID_HI(buid) upper_32_bits(buid)
#define BUID_LO(buid) lower_32_bits(buid)
/* PCI device_node operations */
struct device_node;

View File

@ -9,6 +9,7 @@
#include <asm/asm-compat.h>
#include <asm/processor.h>
#include <asm/ppc-opcode.h>
#include <asm/firmware.h>
#ifndef __ASSEMBLY__
#error __FILE__ should only be used in assembler files
@ -26,17 +27,13 @@
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
#define ACCOUNT_CPU_USER_EXIT(ra, rb)
#define ACCOUNT_STOLEN_TIME
#else
#define ACCOUNT_CPU_USER_ENTRY(ra, rb) \
beq 2f; /* if from kernel mode */ \
BEGIN_FTR_SECTION; \
mfspr ra,SPRN_PURR; /* get processor util. reg */ \
END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
BEGIN_FTR_SECTION; \
MFTB(ra); /* or get TB if no PURR */ \
END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
ld rb,PACA_STARTPURR(r13); \
std ra,PACA_STARTPURR(r13); \
MFTB(ra); /* get timebase */ \
ld rb,PACA_STARTTIME_USER(r13); \
std ra,PACA_STARTTIME(r13); \
subf rb,rb,ra; /* subtract start value */ \
ld ra,PACA_USER_TIME(r13); \
add ra,ra,rb; /* add on to user time */ \
@ -44,19 +41,34 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
2:
#define ACCOUNT_CPU_USER_EXIT(ra, rb) \
BEGIN_FTR_SECTION; \
mfspr ra,SPRN_PURR; /* get processor util. reg */ \
END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
BEGIN_FTR_SECTION; \
MFTB(ra); /* or get TB if no PURR */ \
END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
ld rb,PACA_STARTPURR(r13); \
std ra,PACA_STARTPURR(r13); \
MFTB(ra); /* get timebase */ \
ld rb,PACA_STARTTIME(r13); \
std ra,PACA_STARTTIME_USER(r13); \
subf rb,rb,ra; /* subtract start value */ \
ld ra,PACA_SYSTEM_TIME(r13); \
add ra,ra,rb; /* add on to user time */ \
std ra,PACA_SYSTEM_TIME(r13);
#endif
add ra,ra,rb; /* add on to system time */ \
std ra,PACA_SYSTEM_TIME(r13)
#ifdef CONFIG_PPC_SPLPAR
#define ACCOUNT_STOLEN_TIME \
BEGIN_FW_FTR_SECTION; \
beq 33f; \
/* from user - see if there are any DTL entries to process */ \
ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \
ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \
ld r10,LPPACA_DTLIDX(r10); /* get log write index */ \
cmpd cr1,r11,r10; \
beq+ cr1,33f; \
bl .accumulate_stolen_time; \
33: \
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#else /* CONFIG_PPC_SPLPAR */
#define ACCOUNT_STOLEN_TIME
#endif /* CONFIG_PPC_SPLPAR */
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
/*
* Macros for storing registers into and loading registers from

View File

@ -118,7 +118,7 @@ extern struct task_struct *last_task_used_spe;
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)) ? \
#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
#endif
@ -128,7 +128,7 @@ extern struct task_struct *last_task_used_spe;
#define STACK_TOP_USER64 TASK_SIZE_USER64
#define STACK_TOP_USER32 TASK_SIZE_USER32
#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
#define STACK_TOP (is_32bit_task() ? \
STACK_TOP_USER32 : STACK_TOP_USER64)
#define STACK_TOP_MAX STACK_TOP_USER64

View File

@ -171,6 +171,13 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
/* Make modules code happy. We don't set RO yet */
#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
/*
* Don't just check for any non zero bits in __PAGE_USER, since for book3e
* and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
* _PAGE_USER. Need to explictly match _PAGE_BAP_UR bit in that case too.
*/
#define pte_user(val) ((val & _PAGE_USER) == _PAGE_USER)
/* Advertise special mapping type for AGP */
#define PAGE_AGP (PAGE_KERNEL_NC)
#define HAVE_PAGE_AGP

View File

@ -187,6 +187,7 @@ extern void rtas_progress(char *s, unsigned short hex);
extern void rtas_initialize(void);
extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
extern int rtas_ibm_suspend_me(struct rtas_args *);
struct rtc_time;
extern unsigned long rtas_get_boot_time(void);

View File

@ -329,3 +329,22 @@ COMPAT_SYS(rt_tgsigqueueinfo)
SYSCALL(fanotify_init)
COMPAT_SYS(fanotify_mark)
SYSCALL_SPU(prlimit64)
SYSCALL_SPU(socket)
SYSCALL_SPU(bind)
SYSCALL_SPU(connect)
SYSCALL_SPU(listen)
SYSCALL_SPU(accept)
SYSCALL_SPU(getsockname)
SYSCALL_SPU(getpeername)
SYSCALL_SPU(socketpair)
SYSCALL_SPU(send)
SYSCALL_SPU(sendto)
COMPAT_SYS_SPU(recv)
COMPAT_SYS_SPU(recvfrom)
SYSCALL_SPU(shutdown)
COMPAT_SYS_SPU(setsockopt)
COMPAT_SYS_SPU(getsockopt)
COMPAT_SYS_SPU(sendmsg)
COMPAT_SYS_SPU(recvmsg)
COMPAT_SYS_SPU(recvmmsg)
SYSCALL_SPU(accept4)

View File

@ -154,8 +154,8 @@ extern void enable_kernel_spe(void);
extern void giveup_spe(struct task_struct *);
extern void load_up_spe(struct task_struct *);
extern int fix_alignment(struct pt_regs *);
extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
extern void cvt_df(double *from, float *to, struct thread_struct *thread);
extern void cvt_fd(float *from, double *to);
extern void cvt_df(double *from, float *to);
#ifndef CONFIG_SMP
extern void discard_lazy_cpu_state(void);

View File

@ -34,7 +34,6 @@ extern void to_tm(int tim, struct rtc_time * tm);
extern void GregorianDay(struct rtc_time *tm);
extern void generic_calibrate_decr(void);
extern void snapshot_timebase(void);
extern void set_dec_cpu6(unsigned int val);
@ -212,12 +211,8 @@ struct cpu_usage {
DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
#if defined(CONFIG_VIRT_CPU_ACCOUNTING)
extern void calculate_steal_time(void);
extern void snapshot_timebases(void);
#define account_process_vtime(tsk) account_process_tick(tsk, 0)
#else
#define calculate_steal_time() do { } while (0)
#define snapshot_timebases() do { } while (0)
#define account_process_vtime(tsk) do { } while (0)
#endif

View File

@ -348,10 +348,29 @@
#define __NR_fanotify_init 323
#define __NR_fanotify_mark 324
#define __NR_prlimit64 325
#define __NR_socket 326
#define __NR_bind 327
#define __NR_connect 328
#define __NR_listen 329
#define __NR_accept 330
#define __NR_getsockname 331
#define __NR_getpeername 332
#define __NR_socketpair 333
#define __NR_send 334
#define __NR_sendto 335
#define __NR_recv 336
#define __NR_recvfrom 337
#define __NR_shutdown 338
#define __NR_setsockopt 339
#define __NR_getsockopt 340
#define __NR_sendmsg 341
#define __NR_recvmsg 342
#define __NR_recvmmsg 343
#define __NR_accept4 344
#ifdef __KERNEL__
#define __NR_syscalls 326
#define __NR_syscalls 345
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls

View File

@ -55,7 +55,9 @@ obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_IBMEBUS) += ibmebus.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
ifeq ($(CONFIG_PPC32),y)
obj-$(CONFIG_E500) += idle_e500.o
endif
obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
obj-$(CONFIG_TAU) += tau_6xx.o
obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o
@ -67,7 +69,7 @@ endif
obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o
obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_44x) += cpu_setup_44x.o
obj-$(CONFIG_FSL_BOOKE) += cpu_setup_fsl_booke.o dbell.o
obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o
obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o
extra-y := head_$(CONFIG_WORD_SIZE).o

View File

@ -889,7 +889,7 @@ int fix_alignment(struct pt_regs *regs)
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
cvt_df(&data.dd, (float *)&data.v[4], &current->thread);
cvt_df(&data.dd, (float *)&data.v[4]);
preempt_enable();
#else
return 0;
@ -933,7 +933,7 @@ int fix_alignment(struct pt_regs *regs)
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
cvt_fd((float *)&data.v[4], &data.dd, &current->thread);
cvt_fd((float *)&data.v[4], &data.dd);
preempt_enable();
#else
return 0;

View File

@ -61,7 +61,7 @@
#endif
#endif
#if defined(CONFIG_FSL_BOOKE)
#if defined(CONFIG_PPC_FSL_BOOK3E)
#include "../mm/mmu_decl.h"
#endif
@ -181,17 +181,19 @@ int main(void)
offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid));
DEFINE(SLBSHADOW_STACKESID,
offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid));
DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
#endif /* CONFIG_PPC_STD_MMU_64 */
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime));
DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user));
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
@ -468,7 +470,7 @@ int main(void)
DEFINE(PGD_T_LOG2, PGD_T_LOG2);
DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif
#ifdef CONFIG_FSL_BOOKE
#ifdef CONFIG_PPC_FSL_BOOK3E
DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0));
DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1));

View File

@ -35,6 +35,7 @@ _GLOBAL(__setup_cpu_440grx)
_GLOBAL(__setup_cpu_460ex)
_GLOBAL(__setup_cpu_460gt)
_GLOBAL(__setup_cpu_460sx)
_GLOBAL(__setup_cpu_apm821xx)
mflr r4
bl __init_fpu_44x
bl __fixup_440A_mcheck

View File

@ -51,6 +51,7 @@ _GLOBAL(__e500_dcache_setup)
isync
blr
#ifdef CONFIG_PPC32
_GLOBAL(__setup_cpu_e200)
/* enable dedicated debug exception handling resources (Debug APU) */
mfspr r3,SPRN_HID0
@ -72,3 +73,17 @@ _GLOBAL(__setup_cpu_e500mc)
bl __setup_e500mc_ivors
mtlr r4
blr
#endif
/* Right now, restore and setup are the same thing */
_GLOBAL(__restore_cpu_e5500)
_GLOBAL(__setup_cpu_e5500)
mflr r4
bl __e500_icache_setup
bl __e500_dcache_setup
#ifdef CONFIG_PPC_BOOK3E_64
bl .__setup_base_ivors
#else
bl __setup_e500mc_ivors
#endif
mtlr r4
blr

View File

@ -48,6 +48,7 @@ extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec);
extern void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec);
extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
@ -66,6 +67,10 @@ extern void __restore_cpu_ppc970(void);
extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power7(void);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_e5500(void);
#endif /* CONFIG_E500 */
/* This table only contains "desktop" CPUs, it need to be filled with embedded
* ones as well...
@ -1805,6 +1810,20 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_440A,
.platform = "ppc440",
},
{ /* 464 in APM821xx */
.pvr_mask = 0xffffff00,
.pvr_value = 0x12C41C80,
.cpu_name = "APM821XX",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_FPU,
.mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_apm821xx,
.machine_check = machine_check_440A,
.platform = "ppc440",
},
{ /* 476 core */
.pvr_mask = 0xffff0000,
.pvr_value = 0x11a50000,
@ -1891,7 +1910,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.platform = "ppc5554",
}
#endif /* CONFIG_E200 */
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_E500
#ifdef CONFIG_PPC32
{ /* e500 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x80200000,
@ -1946,6 +1967,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_e500mc,
.platform = "ppce500mc",
},
#endif /* CONFIG_PPC32 */
{ /* e5500 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x80240000,
.cpu_name = "e5500",
.cpu_features = CPU_FTRS_E500MC,
.cpu_user_features = COMMON_USER_BOOKE,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
MMU_FTR_USE_TLBILX,
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500mc",
.oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e5500,
.cpu_restore = __restore_cpu_e5500,
.machine_check = machine_check_e500mc,
.platform = "ppce5500",
},
#ifdef CONFIG_PPC32
{ /* default match */
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
@ -1960,8 +2001,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_e500,
.platform = "powerpc",
}
#endif /* CONFIG_E500 */
#endif /* CONFIG_PPC32 */
#endif /* CONFIG_E500 */
#ifdef CONFIG_PPC_BOOK3E_64
{ /* This is a default entry to get going, to be replaced by

View File

@ -414,18 +414,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
crash_kexec_wait_realmode(crashing_cpu);
#endif
for_each_irq(i) {
struct irq_desc *desc = irq_to_desc(i);
if (!desc || !desc->chip || !desc->chip->eoi)
continue;
if (desc->status & IRQ_INPROGRESS)
desc->chip->eoi(i);
if (!(desc->status & IRQ_DISABLED))
desc->chip->shutdown(i);
}
machine_kexec_mask_interrupts();
/*
* Call registered shutdown routines savely. Swap out

View File

@ -74,16 +74,17 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
{
struct iommu_table *tbl = get_iommu_table_base(dev);
if (!tbl || tbl->it_offset > mask) {
printk(KERN_INFO
"Warning: IOMMU offset too big for device mask\n");
if (tbl)
printk(KERN_INFO
"mask: 0x%08llx, table offset: 0x%08lx\n",
mask, tbl->it_offset);
else
printk(KERN_INFO "mask: 0x%08llx, table unavailable\n",
mask);
if (!tbl) {
dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx"
", table unavailable\n", mask);
return 0;
}
if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) {
dev_info(dev, "Warning: IOMMU window too big for device mask\n");
dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n",
mask, (tbl->it_offset + tbl->it_size) <<
IOMMU_PAGE_SHIFT);
return 0;
} else
return 1;

View File

@ -12,6 +12,7 @@
#include <linux/memblock.h>
#include <asm/bug.h>
#include <asm/abs_addr.h>
#include <asm/machdep.h>
/*
* Generic direct DMA implementation
@ -89,7 +90,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
/* Could be improved so platforms can set the limit in case
* they have limited DMA windows
*/
return mask >= (memblock_end_of_DRAM() - 1);
return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
#else
return 1;
#endif
@ -154,6 +155,23 @@ EXPORT_SYMBOL(dma_direct_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
int dma_set_mask(struct device *dev, u64 dma_mask)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (ppc_md.dma_set_mask)
return ppc_md.dma_set_mask(dev, dma_mask);
if (unlikely(dma_ops == NULL))
return -EIO;
if (dma_ops->set_dma_mask != NULL)
return dma_ops->set_dma_mask(dev, dma_mask);
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
*dev->dma_mask = dma_mask;
return 0;
}
EXPORT_SYMBOL(dma_set_mask);
static int __init dma_init(void)
{
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);

View File

@ -97,6 +97,24 @@ system_call_common:
addi r9,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
std r11,-16(r9) /* "regshere" marker */
#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
BEGIN_FW_FTR_SECTION
beq 33f
/* if from user, see if there are any DTL entries to process */
ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
ld r11,PACA_DTL_RIDX(r13) /* get log read index */
ld r10,LPPACA_DTLIDX(r10) /* get log write index */
cmpd cr1,r11,r10
beq+ cr1,33f
bl .accumulate_stolen_time
REST_GPR(0,r1)
REST_4GPRS(3,r1)
REST_2GPRS(7,r1)
addi r9,r1,STACK_FRAME_OVERHEAD
33:
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
#ifdef CONFIG_TRACE_IRQFLAGS
bl .trace_hardirqs_on
REST_GPR(0,r1)
@ -202,7 +220,9 @@ syscall_exit:
bge- syscall_error
syscall_error_cont:
ld r7,_NIP(r1)
BEGIN_FTR_SECTION
stdcx. r0,0,r1 /* to clear the reservation */
END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
andi. r6,r8,MSR_PR
ld r4,_LINK(r1)
/*
@ -419,6 +439,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync
#endif /* CONFIG_SMP */
/*
* If we optimise away the clear of the reservation in system
* calls because we know the CPU tracks the address of the
* reservation, then we need to clear it here to cover the
* case that the kernel context switch path has no larx
* instructions.
*/
BEGIN_FTR_SECTION
ldarx r6,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
addi r6,r4,-THREAD /* Convert THREAD to 'current' */
std r6,PACACURRENT(r13) /* Set new 'current' */
@ -576,7 +607,16 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
andi. r0,r3,MSR_RI
beq- unrecov_restore
/*
* Clear the reservation. If we know the CPU tracks the address of
* the reservation then we can potentially save some cycles and use
* a larx. On POWER6 and POWER7 this is significantly faster.
*/
BEGIN_FTR_SECTION
stdcx. r0,0,r1 /* to clear the reservation */
FTR_SECTION_ELSE
ldarx r4,0,r1
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
/*
* Clear RI before restoring r13. If we are returning to

View File

@ -163,24 +163,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
/*
* These are used in the alignment trap handler when emulating
* single-precision loads and stores.
* We restore and save the fpscr so the task gets the same result
* and exceptions as if the cpu had performed the load or store.
*/
_GLOBAL(cvt_fd)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
MTFSF_L(0)
lfs 0,0(r3)
stfd 0,0(r4)
mffs 0
stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
blr
_GLOBAL(cvt_df)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
MTFSF_L(0)
lfd 0,0(r3)
stfs 0,0(r4)
mffs 0
stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
blr

View File

@ -152,8 +152,11 @@ _ENTRY(__early_start)
/* Check to see if we're the second processor, and jump
* to the secondary_start code if so
*/
mfspr r24,SPRN_PIR
cmpwi r24,0
lis r24, boot_cpuid@h
ori r24, r24, boot_cpuid@l
lwz r24, 0(r24)
cmpwi r24, -1
mfspr r24,SPRN_PIR
bne __secondary_start
#endif
@ -175,6 +178,9 @@ _ENTRY(__early_start)
li r0,0
stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
rlwinm r22,r1,0,0,31-THREAD_SHIFT /* current thread_info */
stw r24, TI_CPU(r22)
bl early_init
#ifdef CONFIG_RELOCATABLE

View File

@ -587,8 +587,10 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
* this will be fixed once slab is made available early
* instead of the current cruft
*/
if (mem_init_done)
if (mem_init_done) {
of_node_put(host->of_node);
kfree(host);
}
return NULL;
}
irq_map[0].host = host;
@ -1143,7 +1145,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
unsigned long flags;
struct irq_desc *desc;
const char *p;
char none[] = "none";
static const char none[] = "none";
int i;
seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq",

View File

@ -56,7 +56,7 @@ static unsigned long get_purr(void)
for_each_possible_cpu(cpu) {
if (firmware_has_feature(FW_FEATURE_ISERIES))
sum_purr += lppaca[cpu].emulated_time_base;
sum_purr += lppaca_of(cpu).emulated_time_base;
else {
struct cpu_usage *cu;
@ -263,7 +263,7 @@ static void parse_ppp_data(struct seq_file *m)
ppp_data.active_system_procs);
/* pool related entries are apropriate for shared configs */
if (lppaca[0].shared_proc) {
if (lppaca_of(0).shared_proc) {
unsigned long pool_idle_time, pool_procs;
seq_printf(m, "pool=%d\n", ppp_data.pool_num);
@ -460,8 +460,8 @@ static void pseries_cmo_data(struct seq_file *m)
return;
for_each_possible_cpu(cpu) {
cmo_faults += lppaca[cpu].cmo_faults;
cmo_fault_time += lppaca[cpu].cmo_fault_time;
cmo_faults += lppaca_of(cpu).cmo_faults;
cmo_fault_time += lppaca_of(cpu).cmo_fault_time;
}
seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
@ -479,8 +479,8 @@ static void splpar_dispatch_data(struct seq_file *m)
unsigned long dispatch_dispersions = 0;
for_each_possible_cpu(cpu) {
dispatches += lppaca[cpu].yield_count;
dispatch_dispersions += lppaca[cpu].dispersion_count;
dispatches += lppaca_of(cpu).yield_count;
dispatch_dispersions += lppaca_of(cpu).dispersion_count;
}
seq_printf(m, "dispatches=%lu\n", dispatches);
@ -545,7 +545,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "partition_potential_processors=%d\n",
partition_potential_processors);
seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc);
seq_printf(m, "shared_processor_mode=%d\n", lppaca_of(0).shared_proc);
seq_printf(m, "slb_size=%d\n", mmu_slb_size);

View File

@ -14,10 +14,34 @@
#include <linux/threads.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/irq.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/sections.h>
void machine_kexec_mask_interrupts(void) {
unsigned int i;
for_each_irq(i) {
struct irq_desc *desc = irq_to_desc(i);
if (!desc || !desc->chip)
continue;
if (desc->chip->eoi &&
desc->status & IRQ_INPROGRESS)
desc->chip->eoi(i);
if (desc->chip->mask)
desc->chip->mask(i);
if (desc->chip->disable &&
!(desc->status & IRQ_DISABLED))
desc->chip->disable(i);
}
}
void machine_crash_shutdown(struct pt_regs *regs)
{
if (ppc_md.machine_crash_shutdown)

View File

@ -39,6 +39,10 @@ void default_machine_kexec(struct kimage *image)
/* Interrupts aren't acceptable while we reboot */
local_irq_disable();
/* mask each interrupt so we are in a more sane state for the
* kexec kernel */
machine_kexec_mask_interrupts();
page_list = image->head;
/* we need both effective and real address here */

View File

@ -26,6 +26,20 @@ extern unsigned long __toc_start;
#ifdef CONFIG_PPC_BOOK3S
/*
* We only have to have statically allocated lppaca structs on
* legacy iSeries, which supports at most 64 cpus.
*/
#ifdef CONFIG_PPC_ISERIES
#if NR_CPUS < 64
#define NR_LPPACAS NR_CPUS
#else
#define NR_LPPACAS 64
#endif
#else /* not iSeries */
#define NR_LPPACAS 1
#endif
/*
* The structure which the hypervisor knows about - this structure
* should not cross a page boundary. The vpa_init/register_vpa call
@ -36,7 +50,7 @@ extern unsigned long __toc_start;
* will suffice to ensure that it doesn't cross a page boundary.
*/
struct lppaca lppaca[] = {
[0 ... (NR_CPUS-1)] = {
[0 ... (NR_LPPACAS-1)] = {
.desc = 0xd397d781, /* "LpPa" */
.size = sizeof(struct lppaca),
.dyn_proc_status = 2,
@ -49,6 +63,54 @@ struct lppaca lppaca[] = {
},
};
static struct lppaca *extra_lppacas;
static long __initdata lppaca_size;
static void allocate_lppacas(int nr_cpus, unsigned long limit)
{
if (nr_cpus <= NR_LPPACAS)
return;
lppaca_size = PAGE_ALIGN(sizeof(struct lppaca) *
(nr_cpus - NR_LPPACAS));
extra_lppacas = __va(memblock_alloc_base(lppaca_size,
PAGE_SIZE, limit));
}
static struct lppaca *new_lppaca(int cpu)
{
struct lppaca *lp;
if (cpu < NR_LPPACAS)
return &lppaca[cpu];
lp = extra_lppacas + (cpu - NR_LPPACAS);
*lp = lppaca[0];
return lp;
}
static void free_lppacas(void)
{
long new_size = 0, nr;
if (!lppaca_size)
return;
nr = num_possible_cpus() - NR_LPPACAS;
if (nr > 0)
new_size = PAGE_ALIGN(nr * sizeof(struct lppaca));
if (new_size >= lppaca_size)
return;
memblock_free(__pa(extra_lppacas) + new_size, lppaca_size - new_size);
lppaca_size = new_size;
}
#else
static inline void allocate_lppacas(int nr_cpus, unsigned long limit) { }
static inline void free_lppacas(void) { }
#endif /* CONFIG_PPC_BOOK3S */
#ifdef CONFIG_PPC_STD_MMU_64
@ -88,7 +150,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL;
#ifdef CONFIG_PPC_BOOK3S
new_paca->lppaca_ptr = &lppaca[cpu];
new_paca->lppaca_ptr = new_lppaca(cpu);
#else
new_paca->kernel_pgd = swapper_pg_dir;
#endif
@ -144,6 +206,8 @@ void __init allocate_pacas(void)
printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
paca_size, nr_cpus, paca);
allocate_lppacas(nr_cpus, limit);
/* Can't use for_each_*_cpu, as they aren't functional yet */
for (cpu = 0; cpu < nr_cpus; cpu++)
initialise_paca(&paca[cpu], cpu);
@ -164,4 +228,6 @@ void __init free_unused_pacas(void)
paca_size - new_size);
paca_size = new_size;
free_lppacas();
}

View File

@ -1090,8 +1090,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
bus->number, bus->self ? pci_name(bus->self) : "PHB");
list_for_each_entry(dev, &bus->devices, bus_list) {
struct dev_archdata *sd = &dev->dev.archdata;
/* Cardbus can call us to add new devices to a bus, so ignore
* those who are already fully discovered
*/
@ -1107,7 +1105,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
/* Hook up default DMA ops */
sd->dma_ops = pci_dma_ops;
set_dma_ops(&dev->dev, pci_dma_ops);
set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
/* Additional platform DMA/iommu setup */

View File

@ -169,9 +169,11 @@ static int p970_marked_instr_event(u64 event)
switch (unit) {
case PM_VPU:
mask = 0x4c; /* byte 0 bits 2,3,6 */
break;
case PM_LSU0:
/* byte 2 bits 0,2,3,4,6; all of byte 1 */
mask = 0x085dff00;
break;
case PM_LSU1L:
mask = 0x50 << 24; /* byte 3 bits 4,6 */
break;

View File

@ -517,7 +517,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
account_system_vtime(current);
account_process_vtime(current);
calculate_steal_time();
/*
* We can't take a PMU exception inside _switch() since there is a
@ -1298,14 +1297,3 @@ unsigned long randomize_et_dyn(unsigned long base)
return ret;
}
#ifdef CONFIG_SMP
int arch_sd_sibling_asym_packing(void)
{
if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
return SD_ASYM_PACKING;
}
return 0;
}
#endif

View File

@ -1681,7 +1681,7 @@ long do_syscall_trace_enter(struct pt_regs *regs)
if (unlikely(current->audit_context)) {
#ifdef CONFIG_PPC64
if (!test_thread_flag(TIF_32BIT))
if (!is_32bit_task())
audit_syscall_entry(AUDIT_ARCH_PPC64,
regs->gpr[0],
regs->gpr[3], regs->gpr[4],

View File

@ -805,7 +805,7 @@ static void rtas_percpu_suspend_me(void *info)
__rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
}
static int rtas_ibm_suspend_me(struct rtas_args *args)
int rtas_ibm_suspend_me(struct rtas_args *args)
{
long state;
long rc;
@ -855,7 +855,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args)
return atomic_read(&data.error);
}
#else /* CONFIG_PPC_PSERIES */
static int rtas_ibm_suspend_me(struct rtas_args *args)
int rtas_ibm_suspend_me(struct rtas_args *args)
{
return -ENOSYS;
}

View File

@ -46,7 +46,7 @@
extern void bootx_init(unsigned long r4, unsigned long phys);
int boot_cpuid;
int boot_cpuid = -1;
EXPORT_SYMBOL_GPL(boot_cpuid);
int boot_cpuid_phys;

View File

@ -508,9 +508,6 @@ int __devinit start_secondary(void *unused)
if (smp_ops->take_timebase)
smp_ops->take_timebase();
if (system_state > SYSTEM_BOOTING)
snapshot_timebase();
secondary_cpu_time_init();
ipi_call_lock();
@ -575,11 +572,18 @@ void __init smp_cpus_done(unsigned int max_cpus)
free_cpumask_var(old_mask);
snapshot_timebases();
dump_numa_cpu_topology();
}
int arch_sd_sibling_asym_packing(void)
{
if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
return SD_ASYM_PACKING;
}
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
int __cpu_disable(void)
{

View File

@ -161,10 +161,9 @@ extern struct timezone sys_tz;
static long timezone_offset;
unsigned long ppc_proc_freq;
EXPORT_SYMBOL(ppc_proc_freq);
EXPORT_SYMBOL_GPL(ppc_proc_freq);
unsigned long ppc_tb_freq;
static DEFINE_PER_CPU(u64, last_jiffy);
EXPORT_SYMBOL_GPL(ppc_tb_freq);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
@ -185,6 +184,8 @@ DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
cputime_t cputime_one_jiffy;
void (*dtl_consumer)(struct dtl_entry *, u64);
static void calc_cputime_factors(void)
{
struct div_result res;
@ -200,62 +201,153 @@ static void calc_cputime_factors(void)
}
/*
* Read the PURR on systems that have it, otherwise the timebase.
* Read the SPURR on systems that have it, otherwise the PURR,
* or if that doesn't exist return the timebase value passed in.
*/
static u64 read_purr(void)
static u64 read_spurr(u64 tb)
{
if (cpu_has_feature(CPU_FTR_SPURR))
return mfspr(SPRN_SPURR);
if (cpu_has_feature(CPU_FTR_PURR))
return mfspr(SPRN_PURR);
return mftb();
return tb;
}
#ifdef CONFIG_PPC_SPLPAR
/*
* Scan the dispatch trace log and count up the stolen time.
* Should be called with interrupts disabled.
*/
static u64 scan_dispatch_log(u64 stop_tb)
{
u64 i = local_paca->dtl_ridx;
struct dtl_entry *dtl = local_paca->dtl_curr;
struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
struct lppaca *vpa = local_paca->lppaca_ptr;
u64 tb_delta;
u64 stolen = 0;
u64 dtb;
if (i == vpa->dtl_idx)
return 0;
while (i < vpa->dtl_idx) {
if (dtl_consumer)
dtl_consumer(dtl, i);
dtb = dtl->timebase;
tb_delta = dtl->enqueue_to_dispatch_time +
dtl->ready_to_enqueue_time;
barrier();
if (i + N_DISPATCH_LOG < vpa->dtl_idx) {
/* buffer has overflowed */
i = vpa->dtl_idx - N_DISPATCH_LOG;
dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
continue;
}
if (dtb > stop_tb)
break;
stolen += tb_delta;
++i;
++dtl;
if (dtl == dtl_end)
dtl = local_paca->dispatch_log;
}
local_paca->dtl_ridx = i;
local_paca->dtl_curr = dtl;
return stolen;
}
/*
* Read the SPURR on systems that have it, otherwise the purr
* Accumulate stolen time by scanning the dispatch trace log.
* Called on entry from user mode.
*/
static u64 read_spurr(u64 purr)
void accumulate_stolen_time(void)
{
/*
* cpus without PURR won't have a SPURR
* We already know the former when we use this, so tell gcc
*/
if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR))
return mfspr(SPRN_SPURR);
return purr;
u64 sst, ust;
sst = scan_dispatch_log(get_paca()->starttime_user);
ust = scan_dispatch_log(get_paca()->starttime);
get_paca()->system_time -= sst;
get_paca()->user_time -= ust;
get_paca()->stolen_time += ust + sst;
}
static inline u64 calculate_stolen_time(u64 stop_tb)
{
u64 stolen = 0;
if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) {
stolen = scan_dispatch_log(stop_tb);
get_paca()->system_time -= stolen;
}
stolen += get_paca()->stolen_time;
get_paca()->stolen_time = 0;
return stolen;
}
#else /* CONFIG_PPC_SPLPAR */
static inline u64 calculate_stolen_time(u64 stop_tb)
{
return 0;
}
#endif /* CONFIG_PPC_SPLPAR */
/*
* Account time for a transition between system, hard irq
* or soft irq state.
*/
void account_system_vtime(struct task_struct *tsk)
{
u64 now, nowscaled, delta, deltascaled, sys_time;
u64 now, nowscaled, delta, deltascaled;
unsigned long flags;
u64 stolen, udelta, sys_scaled, user_scaled;
local_irq_save(flags);
now = read_purr();
now = mftb();
nowscaled = read_spurr(now);
delta = now - get_paca()->startpurr;
get_paca()->system_time += now - get_paca()->starttime;
get_paca()->starttime = now;
deltascaled = nowscaled - get_paca()->startspurr;
get_paca()->startpurr = now;
get_paca()->startspurr = nowscaled;
if (!in_interrupt()) {
/* deltascaled includes both user and system time.
* Hence scale it based on the purr ratio to estimate
* the system time */
sys_time = get_paca()->system_time;
if (get_paca()->user_time)
deltascaled = deltascaled * sys_time /
(sys_time + get_paca()->user_time);
delta += sys_time;
get_paca()->system_time = 0;
stolen = calculate_stolen_time(now);
delta = get_paca()->system_time;
get_paca()->system_time = 0;
udelta = get_paca()->user_time - get_paca()->utime_sspurr;
get_paca()->utime_sspurr = get_paca()->user_time;
/*
* Because we don't read the SPURR on every kernel entry/exit,
* deltascaled includes both user and system SPURR ticks.
* Apportion these ticks to system SPURR ticks and user
* SPURR ticks in the same ratio as the system time (delta)
* and user time (udelta) values obtained from the timebase
* over the same interval. The system ticks get accounted here;
* the user ticks get saved up in paca->user_time_scaled to be
* used by account_process_tick.
*/
sys_scaled = delta;
user_scaled = udelta;
if (deltascaled != delta + udelta) {
if (udelta) {
sys_scaled = deltascaled * delta / (delta + udelta);
user_scaled = deltascaled - sys_scaled;
} else {
sys_scaled = deltascaled;
}
}
get_paca()->user_time_scaled += user_scaled;
if (in_irq() || idle_task(smp_processor_id()) != tsk) {
account_system_time(tsk, 0, delta, sys_scaled);
if (stolen)
account_steal_time(stolen);
} else {
account_idle_time(delta + stolen);
}
if (in_irq() || idle_task(smp_processor_id()) != tsk)
account_system_time(tsk, 0, delta, deltascaled);
else
account_idle_time(delta);
__get_cpu_var(cputime_last_delta) = delta;
__get_cpu_var(cputime_scaled_last_delta) = deltascaled;
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(account_system_vtime);
@ -265,125 +357,26 @@ EXPORT_SYMBOL_GPL(account_system_vtime);
* by the exception entry and exit code to the generic process
* user and system time records.
* Must be called with interrupts disabled.
* Assumes that account_system_vtime() has been called recently
* (i.e. since the last entry from usermode) so that
* get_paca()->user_time_scaled is up to date.
*/
void account_process_tick(struct task_struct *tsk, int user_tick)
{
cputime_t utime, utimescaled;
utime = get_paca()->user_time;
utimescaled = get_paca()->user_time_scaled;
get_paca()->user_time = 0;
utimescaled = cputime_to_scaled(utime);
get_paca()->user_time_scaled = 0;
get_paca()->utime_sspurr = 0;
account_user_time(tsk, utime, utimescaled);
}
/*
* Stuff for accounting stolen time.
*/
struct cpu_purr_data {
int initialized; /* thread is running */
u64 tb; /* last TB value read */
u64 purr; /* last PURR value read */
u64 spurr; /* last SPURR value read */
};
/*
* Each entry in the cpu_purr_data array is manipulated only by its
* "owner" cpu -- usually in the timer interrupt but also occasionally
* in process context for cpu online. As long as cpus do not touch
* each others' cpu_purr_data, disabling local interrupts is
* sufficient to serialize accesses.
*/
static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
static void snapshot_tb_and_purr(void *data)
{
unsigned long flags;
struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
local_irq_save(flags);
p->tb = get_tb_or_rtc();
p->purr = mfspr(SPRN_PURR);
wmb();
p->initialized = 1;
local_irq_restore(flags);
}
/*
* Called during boot when all cpus have come up.
*/
void snapshot_timebases(void)
{
if (!cpu_has_feature(CPU_FTR_PURR))
return;
on_each_cpu(snapshot_tb_and_purr, NULL, 1);
}
/*
* Must be called with interrupts disabled.
*/
void calculate_steal_time(void)
{
u64 tb, purr;
s64 stolen;
struct cpu_purr_data *pme;
pme = &__get_cpu_var(cpu_purr_data);
if (!pme->initialized)
return; /* !CPU_FTR_PURR or early in early boot */
tb = mftb();
purr = mfspr(SPRN_PURR);
stolen = (tb - pme->tb) - (purr - pme->purr);
if (stolen > 0) {
if (idle_task(smp_processor_id()) != current)
account_steal_time(stolen);
else
account_idle_time(stolen);
}
pme->tb = tb;
pme->purr = purr;
}
#ifdef CONFIG_PPC_SPLPAR
/*
* Must be called before the cpu is added to the online map when
* a cpu is being brought up at runtime.
*/
static void snapshot_purr(void)
{
struct cpu_purr_data *pme;
unsigned long flags;
if (!cpu_has_feature(CPU_FTR_PURR))
return;
local_irq_save(flags);
pme = &__get_cpu_var(cpu_purr_data);
pme->tb = mftb();
pme->purr = mfspr(SPRN_PURR);
pme->initialized = 1;
local_irq_restore(flags);
}
#endif /* CONFIG_PPC_SPLPAR */
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
#define calc_cputime_factors()
#define calculate_steal_time() do { } while (0)
#endif
#if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
#define snapshot_purr() do { } while (0)
#endif
/*
* Called when a cpu comes up after the system has finished booting,
* i.e. as a result of a hotplug cpu action.
*/
void snapshot_timebase(void)
{
__get_cpu_var(last_jiffy) = get_tb_or_rtc();
snapshot_purr();
}
void __delay(unsigned long loops)
{
unsigned long start;
@ -585,8 +578,6 @@ void timer_interrupt(struct pt_regs * regs)
old_regs = set_irq_regs(regs);
irq_enter();
calculate_steal_time();
if (test_irq_work_pending()) {
clear_irq_work_pending();
irq_work_run();

View File

@ -538,6 +538,11 @@ int machine_check_e500(struct pt_regs *regs)
return 0;
}
int machine_check_generic(struct pt_regs *regs)
{
return 0;
}
#elif defined(CONFIG_E200)
int machine_check_e200(struct pt_regs *regs)
{

View File

@ -159,7 +159,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma)
{
int i;
if (!vma || test_thread_flag(TIF_32BIT)) {
if (!vma || is_32bit_task()) {
printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase);
for (i=0; i<vdso32_pages; i++) {
struct page *pg = virt_to_page(vdso32_kbase +
@ -170,7 +170,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma)
dump_one_vdso_page(pg, upg);
}
}
if (!vma || !test_thread_flag(TIF_32BIT)) {
if (!vma || !is_32bit_task()) {
printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase);
for (i=0; i<vdso64_pages; i++) {
struct page *pg = virt_to_page(vdso64_kbase +
@ -200,7 +200,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
return 0;
#ifdef CONFIG_PPC64
if (test_thread_flag(TIF_32BIT)) {
if (is_32bit_task()) {
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
vdso_base = VDSO32_MBASE;

View File

@ -14,10 +14,10 @@ obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
GCOV_PROFILE := n
EXTRA_CFLAGS := -shared -fno-common -fno-builtin
EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
EXTRA_AFLAGS := -D__VDSO32__ -s
asflags-y := -D__VDSO32__ -s
obj-y += vdso32_wrapper.o
extra-y += vdso32.lds

View File

@ -9,10 +9,10 @@ obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
GCOV_PROFILE := n
EXTRA_CFLAGS := -shared -fno-common -fno-builtin
EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
EXTRA_AFLAGS := -D__VDSO64__ -s
asflags-y := -D__VDSO64__ -s
obj-y += vdso64_wrapper.o
extra-y += vdso64.lds

View File

@ -1184,7 +1184,12 @@ EXPORT_SYMBOL(vio_unregister_driver);
/* vio_dev refcount hit 0 */
static void __devinit vio_dev_release(struct device *dev)
{
/* XXX should free TCE table */
struct iommu_table *tbl = get_iommu_table_base(dev);
/* iSeries uses a common table for all vio devices */
if (!firmware_has_feature(FW_FEATURE_ISERIES) && tbl)
iommu_free_table(tbl, dev->of_node ?
dev->of_node->full_name : dev_name(dev));
of_node_put(dev->of_node);
kfree(to_vio_dev(dev));
}
@ -1254,8 +1259,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
if (device_register(&viodev->dev)) {
printk(KERN_ERR "%s: failed to register device %s\n",
__func__, dev_name(&viodev->dev));
/* XXX free TCE table */
kfree(viodev);
put_device(&viodev->dev);
return NULL;
}

View File

@ -4,7 +4,7 @@
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm
ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)

View File

@ -159,7 +159,7 @@
static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
{
kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt], &vcpu->arch.fpscr);
kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]);
}
static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
@ -204,7 +204,7 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* put in registers */
switch (ls_type) {
case FPU_LS_SINGLE:
kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs], &vcpu->arch.fpscr);
kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]);
vcpu->arch.qpr[rs] = *((u32*)tmp);
break;
case FPU_LS_DOUBLE:
@ -230,7 +230,7 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (ls_type) {
case FPU_LS_SINGLE:
kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp, &vcpu->arch.fpscr);
kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp);
val = *((u32*)tmp);
len = sizeof(u32);
break;
@ -296,7 +296,7 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
emulated = EMULATE_DONE;
/* put in registers */
kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs], &vcpu->arch.fpscr);
kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]);
vcpu->arch.qpr[rs] = tmp[1];
dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
@ -314,7 +314,7 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u32 tmp[2];
int len = w ? sizeof(u32) : sizeof(u64);
kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0], &vcpu->arch.fpscr);
kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]);
tmp[1] = vcpu->arch.qpr[rs];
r = kvmppc_st(vcpu, &addr, len, tmp, true);
@ -516,9 +516,9 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
WARN_ON(rc);
/* PS0 */
kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr);
kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr);
kvm_cvt_df(&fpr[reg_in3], &ps0_in3, &vcpu->arch.fpscr);
kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
kvm_cvt_df(&fpr[reg_in3], &ps0_in3);
if (scalar & SCALAR_LOW)
ps0_in2 = qpr[reg_in2];
@ -529,7 +529,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
ps0_in1, ps0_in2, ps0_in3, ps0_out);
if (!(scalar & SCALAR_NO_PS0))
kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr);
kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
/* PS1 */
ps1_in1 = qpr[reg_in1];
@ -566,12 +566,12 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
WARN_ON(rc);
/* PS0 */
kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr);
kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
if (scalar & SCALAR_LOW)
ps0_in2 = qpr[reg_in2];
else
kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr);
kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
@ -579,7 +579,7 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
ps0_in1, ps0_in2, ps0_out);
kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr);
kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
}
/* PS1 */
@ -615,13 +615,13 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
WARN_ON(rc);
/* PS0 */
kvm_cvt_df(&fpr[reg_in], &ps0_in, &vcpu->arch.fpscr);
kvm_cvt_df(&fpr[reg_in], &ps0_in);
func(&vcpu->arch.fpscr, &ps0_out, &ps0_in);
dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
ps0_in, ps0_out);
kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr);
kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
/* PS1 */
ps1_in = qpr[reg_in];
@ -671,7 +671,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
#ifdef DEBUG
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
u32 f;
kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr);
kvm_cvt_df(&vcpu->arch.fpr[i], &f);
dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
}
@ -796,8 +796,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
/* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
&vcpu->arch.qpr[ax_rd],
&vcpu->arch.fpscr);
&vcpu->arch.qpr[ax_rd]);
break;
case OP_4X_PS_MERGE01:
WARN_ON(rcomp);
@ -808,19 +807,16 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
WARN_ON(rcomp);
/* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
&vcpu->arch.fpr[ax_rd],
&vcpu->arch.fpscr);
&vcpu->arch.fpr[ax_rd]);
/* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
&vcpu->arch.qpr[ax_rd],
&vcpu->arch.fpscr);
&vcpu->arch.qpr[ax_rd]);
break;
case OP_4X_PS_MERGE11:
WARN_ON(rcomp);
/* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
&vcpu->arch.fpr[ax_rd],
&vcpu->arch.fpscr);
&vcpu->arch.fpr[ax_rd]);
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
break;
}
@ -1255,7 +1251,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
#ifdef DEBUG
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
u32 f;
kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr);
kvm_cvt_df(&vcpu->arch.fpr[i], &f);
dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
}
#endif

View File

@ -145,7 +145,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
switch (get_op(inst)) {
case OP_TRAP:
@ -275,7 +275,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
u64 jd = get_tb() - vcpu->arch.dec_jiffies;
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
pr_debug("mfDEC: %x - %llx = %lx\n",
vcpu->arch.dec, jd,
kvmppc_get_gpr(vcpu, rt));
break;

View File

@ -273,19 +273,11 @@ FPD_THREE_IN(fnmsub)
FPD_THREE_IN(fnmadd)
_GLOBAL(kvm_cvt_fd)
lfd 0,0(r5) /* load up fpscr value */
MTFSF_L(0)
lfs 0,0(r3)
stfd 0,0(r4)
mffs 0
stfd 0,0(r5) /* save new fpscr value */
blr
_GLOBAL(kvm_cvt_df)
lfd 0,0(r5) /* load up fpscr value */
MTFSF_L(0)
lfd 0,0(r3)
stfs 0,0(r4)
mffs 0
stfd 0,0(r5) /* save new fpscr value */
blr

View File

@ -4,9 +4,7 @@
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
endif
ccflags-$(CONFIG_PPC64) := -mno-minimal-toc
CFLAGS_REMOVE_code-patching.o = -pg
CFLAGS_REMOVE_feature-fixups.o = -pg
@ -17,7 +15,8 @@ obj-$(CONFIG_PPC32) += div64.o copy_32.o
obj-$(CONFIG_HAS_IOMEM) += devres.o
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
memcpy_64.o usercopy_64.o mem_64.o string.o
memcpy_64.o usercopy_64.o mem_64.o string.o \
checksum_wrappers_64.o
obj-$(CONFIG_XMON) += sstep.o ldstfp.o
obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o

View File

@ -65,165 +65,393 @@ _GLOBAL(csum_tcpudp_magic)
srwi r3,r3,16
blr
#define STACKFRAMESIZE 256
#define STK_REG(i) (112 + ((i)-14)*8)
/*
* Computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit).
*
* This code assumes at least halfword alignment, though the length
* can be any number of bytes. The sum is accumulated in r5.
*
* csum_partial(r3=buff, r4=len, r5=sum)
*/
_GLOBAL(csum_partial)
subi r3,r3,8 /* we'll offset by 8 for the loads */
srdi. r6,r4,3 /* divide by 8 for doubleword count */
addic r5,r5,0 /* clear carry */
beq 3f /* if we're doing < 8 bytes */
andi. r0,r3,2 /* aligned on a word boundary already? */
beq+ 1f
lhz r6,8(r3) /* do 2 bytes to get aligned */
addi r3,r3,2
subi r4,r4,2
addc r5,r5,r6
srdi. r6,r4,3 /* recompute number of doublewords */
beq 3f /* any left? */
1: mtctr r6
2: ldu r6,8(r3) /* main sum loop */
adde r5,r5,r6
bdnz 2b
andi. r4,r4,7 /* compute bytes left to sum after doublewords */
3: cmpwi 0,r4,4 /* is at least a full word left? */
blt 4f
lwz r6,8(r3) /* sum this word */
addic r0,r5,0 /* clear carry */
srdi. r6,r4,3 /* less than 8 bytes? */
beq .Lcsum_tail_word
/*
* If only halfword aligned, align to a double word. Since odd
* aligned addresses should be rare and they would require more
* work to calculate the correct checksum, we ignore that case
* and take the potential slowdown of unaligned loads.
*/
rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
beq .Lcsum_aligned
li r7,4
sub r6,r7,r6
mtctr r6
1:
lhz r6,0(r3) /* align to doubleword */
subi r4,r4,2
addi r3,r3,2
adde r0,r0,r6
bdnz 1b
.Lcsum_aligned:
/*
* We unroll the loop such that each iteration is 64 bytes with an
* entry and exit limb of 64 bytes, meaning a minimum size of
* 128 bytes.
*/
srdi. r6,r4,7
beq .Lcsum_tail_doublewords /* len < 128 */
srdi r6,r4,6
subi r6,r6,1
mtctr r6
stdu r1,-STACKFRAMESIZE(r1)
std r14,STK_REG(r14)(r1)
std r15,STK_REG(r15)(r1)
std r16,STK_REG(r16)(r1)
ld r6,0(r3)
ld r9,8(r3)
ld r10,16(r3)
ld r11,24(r3)
/*
* On POWER6 and POWER7 back to back addes take 2 cycles because of
* the XER dependency. This means the fastest this loop can go is
* 16 cycles per iteration. The scheduling of the loop below has
* been shown to hit this on both POWER6 and POWER7.
*/
.align 5
2:
adde r0,r0,r6
ld r12,32(r3)
ld r14,40(r3)
adde r0,r0,r9
ld r15,48(r3)
ld r16,56(r3)
addi r3,r3,64
adde r0,r0,r10
adde r0,r0,r11
adde r0,r0,r12
adde r0,r0,r14
adde r0,r0,r15
ld r6,0(r3)
ld r9,8(r3)
adde r0,r0,r16
ld r10,16(r3)
ld r11,24(r3)
bdnz 2b
adde r0,r0,r6
ld r12,32(r3)
ld r14,40(r3)
adde r0,r0,r9
ld r15,48(r3)
ld r16,56(r3)
addi r3,r3,64
adde r0,r0,r10
adde r0,r0,r11
adde r0,r0,r12
adde r0,r0,r14
adde r0,r0,r15
adde r0,r0,r16
ld r14,STK_REG(r14)(r1)
ld r15,STK_REG(r15)(r1)
ld r16,STK_REG(r16)(r1)
addi r1,r1,STACKFRAMESIZE
andi. r4,r4,63
.Lcsum_tail_doublewords: /* Up to 127 bytes to go */
srdi. r6,r4,3
beq .Lcsum_tail_word
mtctr r6
3:
ld r6,0(r3)
addi r3,r3,8
adde r0,r0,r6
bdnz 3b
andi. r4,r4,7
.Lcsum_tail_word: /* Up to 7 bytes to go */
srdi. r6,r4,2
beq .Lcsum_tail_halfword
lwz r6,0(r3)
addi r3,r3,4
adde r0,r0,r6
subi r4,r4,4
adde r5,r5,r6
4: cmpwi 0,r4,2 /* is at least a halfword left? */
blt+ 5f
lhz r6,8(r3) /* sum this halfword */
addi r3,r3,2
subi r4,r4,2
adde r5,r5,r6
5: cmpwi 0,r4,1 /* is at least a byte left? */
bne+ 6f
lbz r6,8(r3) /* sum this byte */
slwi r6,r6,8 /* this byte is assumed to be the upper byte of a halfword */
adde r5,r5,r6
6: addze r5,r5 /* add in final carry */
rldicl r4,r5,32,0 /* fold two 32-bit halves together */
add r3,r4,r5
srdi r3,r3,32
blr
.Lcsum_tail_halfword: /* Up to 3 bytes to go */
srdi. r6,r4,1
beq .Lcsum_tail_byte
lhz r6,0(r3)
addi r3,r3,2
adde r0,r0,r6
subi r4,r4,2
.Lcsum_tail_byte: /* Up to 1 byte to go */
andi. r6,r4,1
beq .Lcsum_finish
lbz r6,0(r3)
sldi r9,r6,8 /* Pad the byte out to 16 bits */
adde r0,r0,r9
.Lcsum_finish:
addze r0,r0 /* add in final carry */
rldicl r4,r0,32,0 /* fold two 32 bit halves together */
add r3,r4,r0
srdi r3,r3,32
blr
.macro source
100:
.section __ex_table,"a"
.align 3
.llong 100b,.Lsrc_error
.previous
.endm
.macro dest
200:
.section __ex_table,"a"
.align 3
.llong 200b,.Ldest_error
.previous
.endm
/*
* Computes the checksum of a memory block at src, length len,
* and adds in "sum" (32-bit), while copying the block to dst.
* If an access exception occurs on src or dst, it stores -EFAULT
* to *src_err or *dst_err respectively, and (for an error on
* src) zeroes the rest of dst.
*
* This code needs to be reworked to take advantage of 64 bit sum+copy.
* However, due to tokenring halfword alignment problems this will be very
* tricky. For now we'll leave it until we instrument it somehow.
* to *src_err or *dst_err respectively. The caller must take any action
* required in this case (zeroing memory, recalculating partial checksum etc).
*
* csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err)
*/
_GLOBAL(csum_partial_copy_generic)
addic r0,r6,0
subi r3,r3,4
subi r4,r4,4
srwi. r6,r5,2
beq 3f /* if we're doing < 4 bytes */
andi. r9,r4,2 /* Align dst to longword boundary */
beq+ 1f
81: lhz r6,4(r3) /* do 2 bytes to get aligned */
addi r3,r3,2
addic r0,r6,0 /* clear carry */
srdi. r6,r5,3 /* less than 8 bytes? */
beq .Lcopy_tail_word
/*
* If only halfword aligned, align to a double word. Since odd
* aligned addresses should be rare and they would require more
* work to calculate the correct checksum, we ignore that case
* and take the potential slowdown of unaligned loads.
*
* If the source and destination are relatively unaligned we only
* align the source. This keeps things simple.
*/
rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
beq .Lcopy_aligned
li r7,4
sub r6,r7,r6
mtctr r6
1:
source; lhz r6,0(r3) /* align to doubleword */
subi r5,r5,2
91: sth r6,4(r4)
addi r4,r4,2
addc r0,r0,r6
srwi. r6,r5,2 /* # words to do */
beq 3f
1: mtctr r6
82: lwzu r6,4(r3) /* the bdnz has zero overhead, so it should */
92: stwu r6,4(r4) /* be unnecessary to unroll this loop */
adde r0,r0,r6
bdnz 82b
andi. r5,r5,3
3: cmpwi 0,r5,2
blt+ 4f
83: lhz r6,4(r3)
addi r3,r3,2
subi r5,r5,2
93: sth r6,4(r4)
adde r0,r0,r6
dest; sth r6,0(r4)
addi r4,r4,2
bdnz 1b
.Lcopy_aligned:
/*
* We unroll the loop such that each iteration is 64 bytes with an
* entry and exit limb of 64 bytes, meaning a minimum size of
* 128 bytes.
*/
srdi. r6,r5,7
beq .Lcopy_tail_doublewords /* len < 128 */
srdi r6,r5,6
subi r6,r6,1
mtctr r6
stdu r1,-STACKFRAMESIZE(r1)
std r14,STK_REG(r14)(r1)
std r15,STK_REG(r15)(r1)
std r16,STK_REG(r16)(r1)
source; ld r6,0(r3)
source; ld r9,8(r3)
source; ld r10,16(r3)
source; ld r11,24(r3)
/*
* On POWER6 and POWER7 back to back addes take 2 cycles because of
* the XER dependency. This means the fastest this loop can go is
* 16 cycles per iteration. The scheduling of the loop below has
* been shown to hit this on both POWER6 and POWER7.
*/
.align 5
2:
adde r0,r0,r6
4: cmpwi 0,r5,1
bne+ 5f
84: lbz r6,4(r3)
94: stb r6,4(r4)
slwi r6,r6,8 /* Upper byte of word */
source; ld r12,32(r3)
source; ld r14,40(r3)
adde r0,r0,r9
source; ld r15,48(r3)
source; ld r16,56(r3)
addi r3,r3,64
adde r0,r0,r10
dest; std r6,0(r4)
dest; std r9,8(r4)
adde r0,r0,r11
dest; std r10,16(r4)
dest; std r11,24(r4)
adde r0,r0,r12
dest; std r12,32(r4)
dest; std r14,40(r4)
adde r0,r0,r14
dest; std r15,48(r4)
dest; std r16,56(r4)
addi r4,r4,64
adde r0,r0,r15
source; ld r6,0(r3)
source; ld r9,8(r3)
adde r0,r0,r16
source; ld r10,16(r3)
source; ld r11,24(r3)
bdnz 2b
adde r0,r0,r6
5: addze r3,r0 /* add in final carry (unlikely with 64-bit regs) */
rldicl r4,r3,32,0 /* fold 64 bit value */
add r3,r4,r3
srdi r3,r3,32
source; ld r12,32(r3)
source; ld r14,40(r3)
adde r0,r0,r9
source; ld r15,48(r3)
source; ld r16,56(r3)
addi r3,r3,64
adde r0,r0,r10
dest; std r6,0(r4)
dest; std r9,8(r4)
adde r0,r0,r11
dest; std r10,16(r4)
dest; std r11,24(r4)
adde r0,r0,r12
dest; std r12,32(r4)
dest; std r14,40(r4)
adde r0,r0,r14
dest; std r15,48(r4)
dest; std r16,56(r4)
addi r4,r4,64
adde r0,r0,r15
adde r0,r0,r16
ld r14,STK_REG(r14)(r1)
ld r15,STK_REG(r15)(r1)
ld r16,STK_REG(r16)(r1)
addi r1,r1,STACKFRAMESIZE
andi. r5,r5,63
.Lcopy_tail_doublewords: /* Up to 127 bytes to go */
srdi. r6,r5,3
beq .Lcopy_tail_word
mtctr r6
3:
source; ld r6,0(r3)
addi r3,r3,8
adde r0,r0,r6
dest; std r6,0(r4)
addi r4,r4,8
bdnz 3b
andi. r5,r5,7
.Lcopy_tail_word: /* Up to 7 bytes to go */
srdi. r6,r5,2
beq .Lcopy_tail_halfword
source; lwz r6,0(r3)
addi r3,r3,4
adde r0,r0,r6
dest; stw r6,0(r4)
addi r4,r4,4
subi r5,r5,4
.Lcopy_tail_halfword: /* Up to 3 bytes to go */
srdi. r6,r5,1
beq .Lcopy_tail_byte
source; lhz r6,0(r3)
addi r3,r3,2
adde r0,r0,r6
dest; sth r6,0(r4)
addi r4,r4,2
subi r5,r5,2
.Lcopy_tail_byte: /* Up to 1 byte to go */
andi. r6,r5,1
beq .Lcopy_finish
source; lbz r6,0(r3)
sldi r9,r6,8 /* Pad the byte out to 16 bits */
adde r0,r0,r9
dest; stb r6,0(r4)
.Lcopy_finish:
addze r0,r0 /* add in final carry */
rldicl r4,r0,32,0 /* fold two 32 bit halves together */
add r3,r4,r0
srdi r3,r3,32
blr
/* These shouldn't go in the fixup section, since that would
cause the ex_table addresses to get out of order. */
.globl src_error_1
src_error_1:
li r6,0
subi r5,r5,2
95: sth r6,4(r4)
addi r4,r4,2
srwi. r6,r5,2
beq 3f
mtctr r6
.globl src_error_2
src_error_2:
li r6,0
96: stwu r6,4(r4)
bdnz 96b
3: andi. r5,r5,3
beq src_error
.globl src_error_3
src_error_3:
li r6,0
mtctr r5
addi r4,r4,3
97: stbu r6,1(r4)
bdnz 97b
.globl src_error
src_error:
.Lsrc_error:
cmpdi 0,r7,0
beq 1f
beqlr
li r6,-EFAULT
stw r6,0(r7)
1: addze r3,r0
blr
.globl dst_error
dst_error:
.Ldest_error:
cmpdi 0,r8,0
beq 1f
beqlr
li r6,-EFAULT
stw r6,0(r8)
1: addze r3,r0
blr
.section __ex_table,"a"
.align 3
.llong 81b,src_error_1
.llong 91b,dst_error
.llong 82b,src_error_2
.llong 92b,dst_error
.llong 83b,src_error_3
.llong 93b,dst_error
.llong 84b,src_error_3
.llong 94b,dst_error
.llong 95b,dst_error
.llong 96b,dst_error
.llong 97b,dst_error

View File

@ -0,0 +1,102 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2010
*
* Author: Anton Blanchard <anton@au.ibm.com>
*/
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/checksum.h>
#include <asm/uaccess.h>
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
unsigned int csum;
might_sleep();
*err_ptr = 0;
if (!len) {
csum = 0;
goto out;
}
if (unlikely((len < 0) || !access_ok(VERIFY_READ, src, len))) {
*err_ptr = -EFAULT;
csum = (__force unsigned int)sum;
goto out;
}
csum = csum_partial_copy_generic((void __force *)src, dst,
len, sum, err_ptr, NULL);
if (unlikely(*err_ptr)) {
int missing = __copy_from_user(dst, src, len);
if (missing) {
memset(dst + len - missing, 0, missing);
*err_ptr = -EFAULT;
} else {
*err_ptr = 0;
}
csum = csum_partial(dst, len, sum);
}
out:
return (__force __wsum)csum;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err_ptr)
{
unsigned int csum;
might_sleep();
*err_ptr = 0;
if (!len) {
csum = 0;
goto out;
}
if (unlikely((len < 0) || !access_ok(VERIFY_WRITE, dst, len))) {
*err_ptr = -EFAULT;
csum = -1; /* invalid checksum */
goto out;
}
csum = csum_partial_copy_generic(src, (void __force *)dst,
len, sum, NULL, err_ptr);
if (unlikely(*err_ptr)) {
csum = csum_partial(src, len, sum);
if (copy_to_user(dst, src, len)) {
*err_ptr = -EFAULT;
csum = -1; /* invalid checksum */
}
}
out:
return (__force __wsum)csum;
}
EXPORT_SYMBOL(csum_and_copy_to_user);

View File

@ -62,7 +62,7 @@
.text
.stabs "arch/powerpc/lib/",N_SO,0,0,0f
.stabs "copy32.S",N_SO,0,0,0f
.stabs "copy_32.S",N_SO,0,0,0f
0:
CACHELINE_BYTES = L1_CACHE_BYTES

View File

@ -17,6 +17,8 @@
#include <asm/asm-offsets.h>
#include <linux/errno.h>
#ifdef CONFIG_PPC_FPU
#define STKFRM (PPC_MIN_STKFRM + 16)
.macro extab instr,handler
@ -81,7 +83,7 @@ _GLOBAL(do_lfs)
mfmsr r6
ori r7,r6,MSR_FP
cmpwi cr7,r3,0
mtmsrd r7
MTMSRD(r7)
isync
beq cr7,1f
stfd fr0,STKFRM-16(r1)
@ -93,7 +95,7 @@ _GLOBAL(do_lfs)
lfd fr0,STKFRM-16(r1)
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
mtmsrd r6
MTMSRD(r6)
isync
mr r3,r9
addi r1,r1,STKFRM
@ -108,7 +110,7 @@ _GLOBAL(do_lfd)
mfmsr r6
ori r7,r6,MSR_FP
cmpwi cr7,r3,0
mtmsrd r7
MTMSRD(r7)
isync
beq cr7,1f
stfd fr0,STKFRM-16(r1)
@ -120,7 +122,7 @@ _GLOBAL(do_lfd)
lfd fr0,STKFRM-16(r1)
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
mtmsrd r6
MTMSRD(r6)
isync
mr r3,r9
addi r1,r1,STKFRM
@ -135,7 +137,7 @@ _GLOBAL(do_stfs)
mfmsr r6
ori r7,r6,MSR_FP
cmpwi cr7,r3,0
mtmsrd r7
MTMSRD(r7)
isync
beq cr7,1f
stfd fr0,STKFRM-16(r1)
@ -147,7 +149,7 @@ _GLOBAL(do_stfs)
lfd fr0,STKFRM-16(r1)
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
mtmsrd r6
MTMSRD(r6)
isync
mr r3,r9
addi r1,r1,STKFRM
@ -162,7 +164,7 @@ _GLOBAL(do_stfd)
mfmsr r6
ori r7,r6,MSR_FP
cmpwi cr7,r3,0
mtmsrd r7
MTMSRD(r7)
isync
beq cr7,1f
stfd fr0,STKFRM-16(r1)
@ -174,7 +176,7 @@ _GLOBAL(do_stfd)
lfd fr0,STKFRM-16(r1)
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
mtmsrd r6
MTMSRD(r6)
isync
mr r3,r9
addi r1,r1,STKFRM
@ -229,7 +231,7 @@ _GLOBAL(do_lvx)
oris r7,r6,MSR_VEC@h
cmpwi cr7,r3,0
li r8,STKFRM-16
mtmsrd r7
MTMSRD(r7)
isync
beq cr7,1f
stvx vr0,r1,r8
@ -241,7 +243,7 @@ _GLOBAL(do_lvx)
lvx vr0,r1,r8
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
mtmsrd r6
MTMSRD(r6)
isync
mr r3,r9
addi r1,r1,STKFRM
@ -257,7 +259,7 @@ _GLOBAL(do_stvx)
oris r7,r6,MSR_VEC@h
cmpwi cr7,r3,0
li r8,STKFRM-16
mtmsrd r7
MTMSRD(r7)
isync
beq cr7,1f
stvx vr0,r1,r8
@ -269,7 +271,7 @@ _GLOBAL(do_stvx)
lvx vr0,r1,r8
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
mtmsrd r6
MTMSRD(r6)
isync
mr r3,r9
addi r1,r1,STKFRM
@ -325,7 +327,7 @@ _GLOBAL(do_lxvd2x)
oris r7,r6,MSR_VSX@h
cmpwi cr7,r3,0
li r8,STKFRM-16
mtmsrd r7
MTMSRD(r7)
isync
beq cr7,1f
STXVD2X(0,r1,r8)
@ -337,7 +339,7 @@ _GLOBAL(do_lxvd2x)
LXVD2X(0,r1,r8)
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
mtmsrd r6
MTMSRD(r6)
isync
mr r3,r9
addi r1,r1,STKFRM
@ -353,7 +355,7 @@ _GLOBAL(do_stxvd2x)
oris r7,r6,MSR_VSX@h
cmpwi cr7,r3,0
li r8,STKFRM-16
mtmsrd r7
MTMSRD(r7)
isync
beq cr7,1f
STXVD2X(0,r1,r8)
@ -365,7 +367,7 @@ _GLOBAL(do_stxvd2x)
LXVD2X(0,r1,r8)
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
mtmsrd r6
MTMSRD(r6)
isync
mr r3,r9
addi r1,r1,STKFRM
@ -373,3 +375,5 @@ _GLOBAL(do_stxvd2x)
extab 2b,3b
#endif /* CONFIG_VSX */
#endif /* CONFIG_PPC_FPU */

View File

@ -34,7 +34,7 @@ void __spin_yield(arch_spinlock_t *lock)
return;
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
yield_count = lppaca[holder_cpu].yield_count;
yield_count = lppaca_of(holder_cpu).yield_count;
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
@ -65,7 +65,7 @@ void __rw_yield(arch_rwlock_t *rw)
return; /* no write lock at present */
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
yield_count = lppaca[holder_cpu].yield_count;
yield_count = lppaca_of(holder_cpu).yield_count;
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();

View File

@ -30,6 +30,7 @@ extern char system_call_common[];
#define XER_OV 0x40000000U
#define XER_CA 0x20000000U
#ifdef CONFIG_PPC_FPU
/*
* Functions in ldstfp.S
*/
@ -41,6 +42,7 @@ extern int do_lvx(int rn, unsigned long ea);
extern int do_stvx(int rn, unsigned long ea);
extern int do_lxvd2x(int rn, unsigned long ea);
extern int do_stxvd2x(int rn, unsigned long ea);
#endif
/*
* Determine whether a conditional branch instruction would branch.
@ -290,6 +292,7 @@ static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
return write_mem_unaligned(val, ea, nb, regs);
}
#ifdef CONFIG_PPC_FPU
/*
* Check the address and alignment, and call func to do the actual
* load or store.
@ -351,6 +354,7 @@ static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
}
return err;
}
#endif
#ifdef CONFIG_ALTIVEC
/* For Altivec/VMX, no need to worry about alignment */
@ -1393,6 +1397,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
regs->gpr[rd] = byterev_4(val);
goto ldst_done;
#ifdef CONFIG_PPC_CPU
case 535: /* lfsx */
case 567: /* lfsux */
if (!(regs->msr & MSR_FP))
@ -1424,6 +1429,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
ea = xform_ea(instr, regs, u);
err = do_fp_store(rd, do_stfd, ea, 8, regs);
goto ldst_done;
#endif
#ifdef __powerpc64__
case 660: /* stdbrx */
@ -1534,6 +1540,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
} while (++rd < 32);
goto instr_done;
#ifdef CONFIG_PPC_FPU
case 48: /* lfs */
case 49: /* lfsu */
if (!(regs->msr & MSR_FP))
@ -1565,6 +1572,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
ea = dform_ea(instr, regs);
err = do_fp_store(rd, do_stfd, ea, 8, regs);
goto ldst_done;
#endif
#ifdef __powerpc64__
case 58: /* ld[u], lwa */

View File

@ -15,4 +15,4 @@ obj-$(CONFIG_SPE) += math_efp.o
CFLAGS_fabs.o = -fno-builtin-fabs
CFLAGS_math.o = -fno-builtin-fabs
EXTRA_CFLAGS = -I. -Iinclude/math-emu -w
ccflags-y = -I. -Iinclude/math-emu -w

View File

@ -4,9 +4,7 @@
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
endif
ccflags-$(CONFIG_PPC64) := -mno-minimal-toc
obj-y := fault.o mem.o pgtable.o gup.o \
init_$(CONFIG_WORD_SIZE).o \
@ -25,7 +23,7 @@ obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
mmu_context_hash$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_40x) += 40x_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o
obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
ifeq ($(CONFIG_HUGETLB_PAGE),y)

View File

@ -30,6 +30,7 @@
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/perf_event.h>
#include <linux/magic.h>
#include <asm/firmware.h>
#include <asm/page.h>
@ -385,6 +386,7 @@ do_sigbus:
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
const struct exception_table_entry *entry;
unsigned long *stackend;
/* Are we prepared to handle this fault? */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
@ -413,5 +415,9 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
regs->nip);
stackend = end_of_stack(current);
if (current != &init_task && *stackend != STACK_END_MAGIC)
printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
die("Kernel access of bad area", regs, sig);
}

View File

@ -57,11 +57,6 @@
unsigned int tlbcam_index;
#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
#endif
#define NUM_TLBCAMS (64)
struct tlbcam TLBCAM[NUM_TLBCAMS];
@ -138,7 +133,8 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
if (mmu_has_feature(MMU_FTR_BIG_PHYS))
TLBCAM[index].MAS7 = (u64)phys >> 32;
if (flags & _PAGE_USER) {
/* Below is unlikely -- only for large user pages or similar */
if (pte_user(flags)) {
TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
}
@ -185,6 +181,12 @@ unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx)
return amount_mapped;
}
#ifdef CONFIG_PPC32
#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
#endif
unsigned long __init mmu_mapin_ram(unsigned long top)
{
return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1;
@ -225,3 +227,4 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
/* 64M mapped initially according to head_fsl_booke.S */
memblock_set_current_limit(min_t(u64, limit, 0x04000000));
}
#endif

View File

@ -334,7 +334,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
/* We don't touch CPU 0 map, it's allocated at aboot and kept
* around forever
*/
if (cpu == 0)
if (cpu == boot_cpuid)
return NOTIFY_OK;
switch (action) {
@ -420,9 +420,11 @@ void __init mmu_context_init(void)
*/
context_map = alloc_bootmem(CTX_MAP_SIZE);
context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
#ifndef CONFIG_SMP
stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);
#else
stale_map[boot_cpuid] = alloc_bootmem(CTX_MAP_SIZE);
#ifdef CONFIG_SMP
register_cpu_notifier(&mmu_context_cpu_nb);
#endif

View File

@ -140,10 +140,13 @@ extern void wii_memory_fixups(void);
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(unsigned long top);
#elif defined(CONFIG_FSL_BOOKE)
#elif defined(CONFIG_PPC_FSL_BOOK3E)
extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx);
#ifdef CONFIG_PPC32
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(unsigned long top);
extern void adjust_total_lowmem(void);
#endif
extern void loadcam_entry(unsigned int index);
struct tlbcam {

View File

@ -349,11 +349,47 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
static void setup_page_sizes(void)
{
unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
unsigned int tlb0ps = mfspr(SPRN_TLB0PS);
unsigned int eptcfg = mfspr(SPRN_EPTCFG);
unsigned int tlb0cfg;
unsigned int tlb0ps;
unsigned int eptcfg;
int i, psize;
#ifdef CONFIG_PPC_FSL_BOOK3E
unsigned int mmucfg = mfspr(SPRN_MMUCFG);
if (((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) &&
(mmu_has_feature(MMU_FTR_TYPE_FSL_E))) {
unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
unsigned int min_pg, max_pg;
min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def;
unsigned int shift;
def = &mmu_psize_defs[psize];
shift = def->shift;
if (shift == 0)
continue;
/* adjust to be in terms of 4^shift Kb */
shift = (shift - 10) >> 1;
if ((shift >= min_pg) && (shift <= max_pg))
def->flags |= MMU_PAGE_SIZE_DIRECT;
}
goto no_indirect;
}
#endif
tlb0cfg = mfspr(SPRN_TLB0CFG);
tlb0ps = mfspr(SPRN_TLB0PS);
eptcfg = mfspr(SPRN_EPTCFG);
/* Look for supported direct sizes */
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def = &mmu_psize_defs[psize];
@ -505,6 +541,20 @@ static void __early_init_mmu(int boot_cpu)
*/
linear_map_top = memblock_end_of_DRAM();
#ifdef CONFIG_PPC_FSL_BOOK3E
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned int num_cams;
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
linear_map_top = map_mem_in_cams(linear_map_top, num_cams);
/* limit memory so we dont have linear faults */
memblock_enforce_memory_limit(linear_map_top);
memblock_analyze();
}
#endif
/* A sync won't hurt us after mucking around with
* the MMU configuration
*/

View File

@ -367,7 +367,7 @@ _GLOBAL(set_context)
#error Unsupported processor type !
#endif
#if defined(CONFIG_FSL_BOOKE)
#if defined(CONFIG_PPC_FSL_BOOK3E)
/*
* extern void loadcam_entry(unsigned int index)
*

View File

@ -1,8 +1,6 @@
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
endif
ccflags-$(CONFIG_PPC64) := -mno-minimal-toc
obj-$(CONFIG_OPROFILE) += oprofile.o

View File

@ -105,7 +105,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
}
} else {
#ifdef CONFIG_PPC64
if (!test_thread_flag(TIF_32BIT)) {
if (!is_32bit_task()) {
while (depth--) {
sp = user_getsp64(sp, first_frame);
if (!sp)

View File

@ -2,7 +2,7 @@
* Freescale Embedded oprofile support, based on ppc64 oprofile support
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
*
* Copyright (c) 2004 Freescale Semiconductor, Inc
* Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
*
* Author: Andy Fleming
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
@ -321,9 +321,6 @@ static void fsl_emb_handle_interrupt(struct pt_regs *regs,
int val;
int i;
/* set the PMM bit (see comment below) */
mtmsr(mfmsr() | MSR_PMM);
pc = regs->nip;
is_kernel = is_kernel_addr(pc);
@ -340,9 +337,13 @@ static void fsl_emb_handle_interrupt(struct pt_regs *regs,
}
/* The freeze bit was set by the interrupt. */
/* Clear the freeze bit, and reenable the interrupt.
* The counters won't actually start until the rfi clears
* the PMM bit */
/* Clear the freeze bit, and reenable the interrupt. The
* counters won't actually start until the rfi clears the PMM
* bit. The PMM bit should not be set until after the interrupt
* is cleared to avoid it getting lost in some hypervisor
* environments.
*/
mtmsr(mfmsr() | MSR_PMM);
pmc_start_ctrs(1);
}

View File

@ -17,6 +17,16 @@ config BAMBOO
help
This option enables support for the IBM PPC440EP evaluation board.
config BLUESTONE
bool "Bluestone"
depends on 44x
default n
select PPC44x_SIMPLE
select APM821xx
select IBM_NEW_EMAC_RGMII
help
This option enables support for the APM APM821xx Evaluation board.
config EBONY
bool "Ebony"
depends on 44x
@ -293,6 +303,12 @@ config 460SX
select IBM_NEW_EMAC_ZMII
select IBM_NEW_EMAC_TAH
config APM821xx
bool
select PPC_FPU
select IBM_NEW_EMAC_EMAC4
select IBM_NEW_EMAC_TAH
# 44x errata/workaround config symbols, selected by the CPU models above
config IBM440EP_ERR42
bool

View File

@ -52,6 +52,7 @@ machine_device_initcall(ppc44x_simple, ppc44x_device_probe);
static char *board[] __initdata = {
"amcc,arches",
"amcc,bamboo",
"amcc,bluestone",
"amcc,canyonlands",
"amcc,glacier",
"ibm,ebony",

View File

@ -10,12 +10,12 @@ menuconfig PPC_83xx
if PPC_83xx
config MPC830x_RDB
bool "Freescale MPC830x RDB"
bool "Freescale MPC830x RDB and derivatives"
select DEFAULT_UIMAGE
select PPC_MPC831x
select FSL_GTM
help
This option enables support for the MPC8308 RDB board.
This option enables support for the MPC8308 RDB and MPC8308 P1M boards.
config MPC831x_RDB
bool "Freescale MPC831x RDB"

View File

@ -65,7 +65,8 @@ static int __init mpc830x_rdb_probe(void)
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "MPC8308RDB") ||
of_flat_dt_is_compatible(root, "fsl,mpc8308rdb");
of_flat_dt_is_compatible(root, "fsl,mpc8308rdb") ||
of_flat_dt_is_compatible(root, "denx,mpc8308_p1m");
}
static struct of_device_id __initdata of_bus_ids[] = {

View File

@ -11,6 +11,8 @@ menuconfig FSL_SOC_BOOKE
if FSL_SOC_BOOKE
if PPC32
config MPC8540_ADS
bool "Freescale MPC8540 ADS"
select DEFAULT_UIMAGE
@ -153,10 +155,20 @@ config SBC8560
help
This option enables support for the Wind River SBC8560 board
config P3041_DS
bool "Freescale P3041 DS"
select DEFAULT_UIMAGE
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
select MPC8xxx_GPIO
select HAS_RAPIDIO
help
This option enables support for the P3041 DS board
config P4080_DS
bool "Freescale P4080 DS"
select DEFAULT_UIMAGE
select PPC_FSL_BOOK3E
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
@ -165,6 +177,20 @@ config P4080_DS
help
This option enables support for the P4080 DS board
endif # PPC32
config P5020_DS
bool "Freescale P5020 DS"
select DEFAULT_UIMAGE
select E500
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
select MPC8xxx_GPIO
select HAS_RAPIDIO
help
This option enables support for the P5020 DS board
endif # FSL_SOC_BOOKE
config TQM85xx

View File

@ -11,7 +11,9 @@ obj-$(CONFIG_MPC85xx_DS) += mpc85xx_ds.o
obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o
obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o
obj-$(CONFIG_P1022_DS) += p1022_ds.o
obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o
obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o
obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o
obj-$(CONFIG_STX_GP3) += stx_gp3.o
obj-$(CONFIG_TQM85xx) += tqm85xx.o
obj-$(CONFIG_SBC8560) += sbc8560.o

View File

@ -112,6 +112,8 @@ static struct of_device_id __initdata p1022_ds_ids[] = {
{ .compatible = "soc", },
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
/* So that the DMA channel nodes can be probed individually: */
{ .compatible = "fsl,eloplus-dma", },
{},
};

View File

@ -0,0 +1,64 @@
/*
* P3041 DS Setup
*
* Maintained by Kumar Gala (see MAINTAINERS for contact information)
*
* Copyright 2009-2010 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/phy.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include "corenet_ds.h"
/*
* Called very early, device-tree isn't unflattened
*/
static int __init p3041_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "fsl,P3041DS");
}
define_machine(p3041_ds) {
.name = "P3041 DS",
.probe = p3041_ds_probe,
.setup_arch = corenet_ds_setup_arch,
.init_IRQ = corenet_ds_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
.get_irq = mpic_get_coreint_irq,
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
machine_device_initcall(p3041_ds, corenet_ds_publish_devices);
#ifdef CONFIG_SWIOTLB
machine_arch_initcall(p3041_ds, swiotlb_setup_bus_notifier);
#endif

View File

@ -0,0 +1,69 @@
/*
* P5020 DS Setup
*
* Maintained by Kumar Gala (see MAINTAINERS for contact information)
*
* Copyright 2009-2010 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/phy.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include "corenet_ds.h"
/*
* Called very early, device-tree isn't unflattened
*/
static int __init p5020_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "fsl,P5020DS");
}
define_machine(p5020_ds) {
.name = "P5020 DS",
.probe = p5020_ds_probe,
.setup_arch = corenet_ds_setup_arch,
.init_IRQ = corenet_ds_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
/* coreint doesn't play nice with lazy EE, use legacy mpic for now */
#ifdef CONFIG_PPC64
.get_irq = mpic_get_irq,
#else
.get_irq = mpic_get_coreint_irq,
#endif
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
machine_device_initcall(p5020_ds, corenet_ds_publish_devices);
#ifdef CONFIG_SWIOTLB
machine_arch_initcall(p5020_ds, swiotlb_setup_bus_notifier);
#endif

View File

@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/kexec.h>
#include <linux/highmem.h>
#include <asm/machdep.h>
#include <asm/pgtable.h>
@ -79,6 +80,7 @@ smp_85xx_kick_cpu(int nr)
local_irq_save(flags);
out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
#ifdef CONFIG_PPC32
out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
if (!ioremappable)
@ -88,6 +90,12 @@ smp_85xx_kick_cpu(int nr)
/* Wait a bit for the CPU to ack. */
while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
mdelay(1);
#else
out_be64((u64 *)(bptr_vaddr + BOOT_ENTRY_ADDR_UPPER),
__pa((u64)*((unsigned long long *) generic_secondary_smp_init)));
smp_generic_kick_cpu(nr);
#endif
local_irq_restore(flags);
@ -114,19 +122,15 @@ struct smp_ops_t smp_85xx_ops = {
};
#ifdef CONFIG_KEXEC
static int kexec_down_cpus = 0;
atomic_t kexec_down_cpus = ATOMIC_INIT(0);
void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
{
mpic_teardown_this_cpu(1);
/* When crashing, this gets called on all CPU's we only
* take down the non-boot cpus */
if (smp_processor_id() != boot_cpuid)
{
local_irq_disable();
kexec_down_cpus++;
local_irq_disable();
if (secondary) {
atomic_inc(&kexec_down_cpus);
/* loop forever */
while (1);
}
}
@ -137,16 +141,65 @@ static void mpc85xx_smp_kexec_down(void *arg)
ppc_md.kexec_cpu_down(0,1);
}
static void mpc85xx_smp_machine_kexec(struct kimage *image)
static void map_and_flush(unsigned long paddr)
{
int timeout = 2000;
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
unsigned long kaddr = (unsigned long)kmap(page);
flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
kunmap(page);
}
/**
* Before we reset the other cores, we need to flush relevant cache
* out to memory so we don't get anything corrupted, some of these flushes
* are performed out of an overabundance of caution as interrupts are not
* disabled yet and we can switch cores
*/
static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image)
{
kimage_entry_t *ptr, entry;
unsigned long paddr;
int i;
set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
if (image->type == KEXEC_TYPE_DEFAULT) {
/* normal kexec images are stored in temporary pages */
for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
ptr = (entry & IND_INDIRECTION) ?
phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
if (!(entry & IND_DESTINATION)) {
map_and_flush(entry);
}
}
/* flush out last IND_DONE page */
map_and_flush(entry);
} else {
/* crash type kexec images are copied to the crash region */
for (i = 0; i < image->nr_segments; i++) {
struct kexec_segment *seg = &image->segment[i];
for (paddr = seg->mem; paddr < seg->mem + seg->memsz;
paddr += PAGE_SIZE) {
map_and_flush(paddr);
}
}
}
smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
/* also flush the kimage struct to be passed in as well */
flush_dcache_range((unsigned long)image,
(unsigned long)image + sizeof(*image));
}
while ( (kexec_down_cpus != (num_online_cpus() - 1)) &&
static void mpc85xx_smp_machine_kexec(struct kimage *image)
{
int timeout = INT_MAX;
int i, num_cpus = num_present_cpus();
mpc85xx_smp_flush_dcache_kexec(image);
if (image->type == KEXEC_TYPE_DEFAULT)
smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) &&
( timeout > 0 ) )
{
timeout--;
@ -155,7 +208,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
if ( !timeout )
printk(KERN_ERR "Unable to bring down secondary cpu(s)");
for (i = 0; i < num_present_cpus(); i++)
for (i = 0; i < num_cpus; i++)
{
if ( i == smp_processor_id() ) continue;
mpic_reset_core(i);

View File

@ -125,6 +125,7 @@ config 8xx
config E500
select FSL_EMB_PERFMON
select PPC_FSL_BOOK3E
bool
config PPC_E500MC
@ -166,9 +167,14 @@ config BOOKE
config FSL_BOOKE
bool
depends on E200 || E500
depends on (E200 || E500) && PPC32
default y
# this is for common code between PPC32 & PPC64 FSL BOOKE
config PPC_FSL_BOOK3E
bool
select FSL_EMB_PERFMON
default y if FSL_BOOKE
config PTE_64BIT
bool

View File

@ -173,8 +173,10 @@ static int __init cbe_ptcal_enable(void)
return -ENODEV;
size = of_get_property(np, "ibm,cbe-ptcal-size", NULL);
if (!size)
if (!size) {
of_node_put(np);
return -ENODEV;
}
pr_debug("%s: enabling PTCAL, size = 0x%x\n", __func__, *size);
order = get_order(*size);

View File

@ -258,8 +258,10 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
return NO_IRQ;
imap += intsize + 1;
tmp = of_get_property(iic, "#interrupt-cells", NULL);
if (tmp == NULL)
if (tmp == NULL) {
of_node_put(iic);
return NO_IRQ;
}
intsize = *tmp;
/* Assume unit is last entry of interrupt specifier */
unit = imap[intsize - 1];

View File

@ -154,6 +154,7 @@ static const struct file_operations __fops = { \
.release = spufs_attr_release, \
.read = spufs_attr_read, \
.write = spufs_attr_write, \
.llseek = generic_file_llseek, \
};
@ -521,6 +522,7 @@ static const struct file_operations spufs_cntl_fops = {
.release = spufs_cntl_release,
.read = simple_attr_read,
.write = simple_attr_write,
.llseek = generic_file_llseek,
.mmap = spufs_cntl_mmap,
};
@ -714,6 +716,7 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
static const struct file_operations spufs_mbox_fops = {
.open = spufs_pipe_open,
.read = spufs_mbox_read,
.llseek = no_llseek,
};
static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
@ -743,6 +746,7 @@ static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
static const struct file_operations spufs_mbox_stat_fops = {
.open = spufs_pipe_open,
.read = spufs_mbox_stat_read,
.llseek = no_llseek,
};
/* low-level ibox access function */
@ -863,6 +867,7 @@ static const struct file_operations spufs_ibox_fops = {
.read = spufs_ibox_read,
.poll = spufs_ibox_poll,
.fasync = spufs_ibox_fasync,
.llseek = no_llseek,
};
static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
@ -890,6 +895,7 @@ static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
static const struct file_operations spufs_ibox_stat_fops = {
.open = spufs_pipe_open,
.read = spufs_ibox_stat_read,
.llseek = no_llseek,
};
/* low-level mailbox write */
@ -1011,6 +1017,7 @@ static const struct file_operations spufs_wbox_fops = {
.write = spufs_wbox_write,
.poll = spufs_wbox_poll,
.fasync = spufs_wbox_fasync,
.llseek = no_llseek,
};
static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
@ -1038,6 +1045,7 @@ static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
static const struct file_operations spufs_wbox_stat_fops = {
.open = spufs_pipe_open,
.read = spufs_wbox_stat_read,
.llseek = no_llseek,
};
static int spufs_signal1_open(struct inode *inode, struct file *file)
@ -1166,6 +1174,7 @@ static const struct file_operations spufs_signal1_fops = {
.read = spufs_signal1_read,
.write = spufs_signal1_write,
.mmap = spufs_signal1_mmap,
.llseek = no_llseek,
};
static const struct file_operations spufs_signal1_nosched_fops = {
@ -1173,6 +1182,7 @@ static const struct file_operations spufs_signal1_nosched_fops = {
.release = spufs_signal1_release,
.write = spufs_signal1_write,
.mmap = spufs_signal1_mmap,
.llseek = no_llseek,
};
static int spufs_signal2_open(struct inode *inode, struct file *file)
@ -1305,6 +1315,7 @@ static const struct file_operations spufs_signal2_fops = {
.read = spufs_signal2_read,
.write = spufs_signal2_write,
.mmap = spufs_signal2_mmap,
.llseek = no_llseek,
};
static const struct file_operations spufs_signal2_nosched_fops = {
@ -1312,6 +1323,7 @@ static const struct file_operations spufs_signal2_nosched_fops = {
.release = spufs_signal2_release,
.write = spufs_signal2_write,
.mmap = spufs_signal2_mmap,
.llseek = no_llseek,
};
/*
@ -1451,6 +1463,7 @@ static const struct file_operations spufs_mss_fops = {
.open = spufs_mss_open,
.release = spufs_mss_release,
.mmap = spufs_mss_mmap,
.llseek = no_llseek,
};
static int
@ -1508,6 +1521,7 @@ static const struct file_operations spufs_psmap_fops = {
.open = spufs_psmap_open,
.release = spufs_psmap_release,
.mmap = spufs_psmap_mmap,
.llseek = no_llseek,
};
@ -1871,6 +1885,7 @@ static const struct file_operations spufs_mfc_fops = {
.fsync = spufs_mfc_fsync,
.fasync = spufs_mfc_fasync,
.mmap = spufs_mfc_mmap,
.llseek = no_llseek,
};
static int spufs_npc_set(void *data, u64 val)
@ -2246,6 +2261,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
static const struct file_operations spufs_dma_info_fops = {
.open = spufs_info_open,
.read = spufs_dma_info_read,
.llseek = no_llseek,
};
static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
@ -2299,6 +2315,7 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
static const struct file_operations spufs_proxydma_info_fops = {
.open = spufs_info_open,
.read = spufs_proxydma_info_read,
.llseek = no_llseek,
};
static int spufs_show_tid(struct seq_file *s, void *private)
@ -2585,6 +2602,7 @@ static const struct file_operations spufs_switch_log_fops = {
.read = spufs_switch_log_read,
.poll = spufs_switch_log_poll,
.release = spufs_switch_log_release,
.llseek = no_llseek,
};
/**

View File

@ -74,8 +74,10 @@ void __init chrp_nvram_init(void)
return;
nbytes_p = of_get_property(nvram, "#bytes", &proplen);
if (nbytes_p == NULL || proplen != sizeof(unsigned int))
if (nbytes_p == NULL || proplen != sizeof(unsigned int)) {
of_node_put(nvram);
return;
}
nvram_size = *nbytes_p;

View File

@ -1,4 +1,4 @@
EXTRA_CFLAGS += -mno-minimal-toc
ccflags-y := -mno-minimal-toc
obj-y += exception.o
obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt.o mf.o lpevents.o \

View File

@ -243,7 +243,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
for (i = 0; i < NR_CPUS; i++) {
if (lppaca[i].dyn_proc_status >= 2)
if (lppaca_of(i).dyn_proc_status >= 2)
continue;
snprintf(p, 32 - (p - buf), "@%d", i);
@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
dt_prop_str(dt, "device_type", device_type_cpu);
index = lppaca[i].dyn_hv_phys_proc_index;
index = lppaca_of(i).dyn_hv_phys_proc_index;
d = &xIoHriProcessorVpd[index];
dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);

Some files were not shown because too many files have changed in this diff Show More