From 999f6338780fa0577b6581941c697c868d1ec2d3 Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Mon, 17 Nov 2008 20:29:03 +0100 Subject: [PATCH 01/18] [ARM] pxa/MioA701: fix memory corruption. In the resume bootstrap, the early disable address is wrong. Fix it to RAM address 0xa020b000 instead of 0xa0200000, and make it consistent with RESUME_ENABLE_ADDR in mioa701.c. Signed-off-by: Robert Jarzmik Acked-by: Russell King Signed-off-by: Eric Miao --- arch/arm/mach-pxa/mioa701_bootresume.S | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/mach-pxa/mioa701_bootresume.S b/arch/arm/mach-pxa/mioa701_bootresume.S index a647693d9856..324d25a48c85 100644 --- a/arch/arm/mach-pxa/mioa701_bootresume.S +++ b/arch/arm/mach-pxa/mioa701_bootresume.S @@ -24,6 +24,7 @@ ENTRY(mioa701_jumpaddr) 1: mov r0, #0xa0000000 @ Don't suppose memory access works orr r0, r0, #0x00200000 @ even if it's supposed to + orr r0, r0, #0x0000b000 mov r1, #0 str r1, [r0] @ Early disable resume for next boot ldr r0, mioa701_jumpaddr @ (Murphy's Law) From 844c6f6a36984c5fe1bcc2d68a88f2ed212d1eef Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Mon, 17 Nov 2008 20:29:04 +0100 Subject: [PATCH 02/18] [ARM] pxa/MioA701: bluetooth resume fix The G3IPL expects the value at RAM address 0xa020b020 to be exactly 1 to setup the bluetooth GPIOs properly. The actual code got a value from gpio_get_value() which was not 1, but a "not equal to 0" integer. Signed-off-by: Robert Jarzmik Acked-by: Russell King Signed-off-by: Eric Miao --- arch/arm/mach-pxa/mioa701.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c index 0842c531ee4d..782903fe9c6c 100644 --- a/arch/arm/mach-pxa/mioa701.c +++ b/arch/arm/mach-pxa/mioa701.c @@ -565,7 +565,7 @@ static int mioa701_sys_suspend(struct sys_device *sysdev, pm_message_t state) u32 *mem_resume_unknown = phys_to_virt(RESUME_UNKNOWN_ADDR); /* Devices prepare suspend */ - is_bt_on = gpio_get_value(GPIO83_BT_ON); + is_bt_on = !!gpio_get_value(GPIO83_BT_ON); pxa2xx_mfp_set_lpm(GPIO83_BT_ON, is_bt_on ? MFP_LPM_DRIVE_HIGH : MFP_LPM_DRIVE_LOW); From 47fd6f7c94e15eb5f97dd1cbb0427a46b03c8185 Mon Sep 17 00:00:00 2001 From: Jaya Kumar Date: Tue, 18 Nov 2008 02:32:36 +0100 Subject: [PATCH 03/18] [ARM] 5335/1: pxa25x_udc: Fix is_vbus_present to return 1 or 0 the use of is_blah() suggests a 1 or 0 return. This assumption is made in pxa25x_udc code such as: dev->vbus = is_vbus_present(); where dev->vbus is a bitfield. This fix allows pxa25x_udc_probe to correctly detect vbus. Other changes were to make its use consistent in the rest of the code. Signed-off-by: Jaya Kumar Signed-off-by: Russell King --- drivers/usb/gadget/pxa25x_udc.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c index da6e93c201d2..2dbc0db0b46c 100644 --- a/drivers/usb/gadget/pxa25x_udc.c +++ b/drivers/usb/gadget/pxa25x_udc.c @@ -141,7 +141,11 @@ static int is_vbus_present(void) if (mach->gpio_vbus) { int value = gpio_get_value(mach->gpio_vbus); - return mach->gpio_vbus_inverted ? !value : value; + + if (mach->gpio_vbus_inverted) + return !value; + else + return !!value; } if (mach->udc_is_connected) return mach->udc_is_connected(); @@ -982,7 +986,7 @@ static int pxa25x_udc_vbus_session(struct usb_gadget *_gadget, int is_active) struct pxa25x_udc *udc; udc = container_of(_gadget, struct pxa25x_udc, gadget); - udc->vbus = (is_active != 0); + udc->vbus = is_active; DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); pullup(udc); return 0; @@ -1399,12 +1403,8 @@ lubbock_vbus_irq(int irq, void *_dev) static irqreturn_t udc_vbus_irq(int irq, void *_dev) { struct pxa25x_udc *dev = _dev; - int vbus = gpio_get_value(dev->mach->gpio_vbus); - if (dev->mach->gpio_vbus_inverted) - vbus = !vbus; - - pxa25x_udc_vbus_session(&dev->gadget, vbus); + pxa25x_udc_vbus_session(&dev->gadget, is_vbus_present()); return IRQ_HANDLED; } From 7953031da4200323ab5d85bd514054ca4ba9d225 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 24 Nov 2008 18:11:16 -0800 Subject: [PATCH 04/18] ARM: OMAP: Remove broken LCD driver for SX1 Recently the omap McBSP code was cleaned up to get rid of direct McBSP register tinkering by the drivers. Looks like lcd_sx1.c never got converted, and now it breaks builds. It seems the lcd_sx1.c driver is attempting SPI mode, but doing it in a different way compared to omap_mcbsp_set_spi_mode(). Remove the broken driver, patches welcome to add it back when done properly by patching both mcbsp.c and lcd_sx1.c. Cc: Vovan888@gmail.com Cc: linux-fbdev-devel@lists.sourceforge.net Signed-off-by: Tony Lindgren --- drivers/video/omap/Makefile | 1 - drivers/video/omap/lcd_sx1.c | 327 ----------------------------------- 2 files changed, 328 deletions(-) delete mode 100644 drivers/video/omap/lcd_sx1.c diff --git a/drivers/video/omap/Makefile b/drivers/video/omap/Makefile index 99da8b6d2c36..ed13889c1162 100644 --- a/drivers/video/omap/Makefile +++ b/drivers/video/omap/Makefile @@ -23,7 +23,6 @@ objs-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o -objs-y$(CONFIG_MACH_SX1) += lcd_sx1.o omapfb-objs := $(objs-yy) diff --git a/drivers/video/omap/lcd_sx1.c b/drivers/video/omap/lcd_sx1.c deleted file mode 100644 index e55de201b8ff..000000000000 --- a/drivers/video/omap/lcd_sx1.c +++ /dev/null @@ -1,327 +0,0 @@ -/* - * LCD panel support for the Siemens SX1 mobile phone - * - * Current version : Vovan888@gmail.com, great help from FCA00000 - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#include -#include -#include -#include - -#include -#include -#include -#include - -/* - * OMAP310 GPIO registers - */ -#define GPIO_DATA_INPUT 0xfffce000 -#define GPIO_DATA_OUTPUT 0xfffce004 -#define GPIO_DIR_CONTROL 0xfffce008 -#define GPIO_INT_CONTROL 0xfffce00c -#define GPIO_INT_MASK 0xfffce010 -#define GPIO_INT_STATUS 0xfffce014 -#define GPIO_PIN_CONTROL 0xfffce018 - - -#define A_LCD_SSC_RD 3 -#define A_LCD_SSC_SD 7 -#define _A_LCD_RESET 9 -#define _A_LCD_SSC_CS 12 -#define _A_LCD_SSC_A0 13 - -#define DSP_REG 0xE1017024 - -const unsigned char INIT_1[12] = { - 0x1C, 0x02, 0x88, 0x00, 0x1E, 0xE0, 0x00, 0xDC, 0x00, 0x02, 0x00 -}; - -const unsigned char INIT_2[127] = { - 0x15, 0x00, 0x29, 0x00, 0x3E, 0x00, 0x51, 0x00, - 0x65, 0x00, 0x7A, 0x00, 0x8D, 0x00, 0xA1, 0x00, - 0xB6, 0x00, 0xC7, 0x00, 0xD8, 0x00, 0xEB, 0x00, - 0xFB, 0x00, 0x0B, 0x01, 0x1B, 0x01, 0x27, 0x01, - 0x34, 0x01, 0x41, 0x01, 0x4C, 0x01, 0x55, 0x01, - 0x5F, 0x01, 0x68, 0x01, 0x70, 0x01, 0x78, 0x01, - 0x7E, 0x01, 0x86, 0x01, 0x8C, 0x01, 0x94, 0x01, - 0x9B, 0x01, 0xA1, 0x01, 0xA4, 0x01, 0xA9, 0x01, - 0xAD, 0x01, 0xB2, 0x01, 0xB7, 0x01, 0xBC, 0x01, - 0xC0, 0x01, 0xC4, 0x01, 0xC8, 0x01, 0xCB, 0x01, - 0xCF, 0x01, 0xD2, 0x01, 0xD5, 0x01, 0xD8, 0x01, - 0xDB, 0x01, 0xE0, 0x01, 0xE3, 0x01, 0xE6, 0x01, - 0xE8, 0x01, 0xEB, 0x01, 0xEE, 0x01, 0xF1, 0x01, - 0xF3, 0x01, 0xF8, 0x01, 0xF9, 0x01, 0xFC, 0x01, - 0x00, 0x02, 0x03, 0x02, 0x07, 0x02, 0x09, 0x02, - 0x0E, 0x02, 0x13, 0x02, 0x1C, 0x02, 0x00 -}; - -const unsigned char INIT_3[15] = { - 0x14, 0x26, 0x33, 0x3D, 0x45, 0x4D, 0x53, 0x59, - 0x5E, 0x63, 0x67, 0x6D, 0x71, 0x78, 0xFF -}; - -static void epson_sendbyte(int flag, unsigned char byte) -{ - int i, shifter = 0x80; - - if (!flag) - gpio_set_value(_A_LCD_SSC_A0, 0); - mdelay(2); - gpio_set_value(A_LCD_SSC_RD, 1); - - gpio_set_value(A_LCD_SSC_SD, flag); - - OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200); - OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202); - for (i = 0; i < 8; i++) { - OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200); - gpio_set_value(A_LCD_SSC_SD, shifter & byte); - OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202); - shifter >>= 1; - } - gpio_set_value(_A_LCD_SSC_A0, 1); -} - -static void init_system(void) -{ - omap_mcbsp_request(OMAP_MCBSP3); - omap_mcbsp_stop(OMAP_MCBSP3); -} - -static void setup_GPIO(void) -{ - /* new wave */ - gpio_request(A_LCD_SSC_RD, "lcd_ssc_rd"); - gpio_request(A_LCD_SSC_SD, "lcd_ssc_sd"); - gpio_request(_A_LCD_RESET, "lcd_reset"); - gpio_request(_A_LCD_SSC_CS, "lcd_ssc_cs"); - gpio_request(_A_LCD_SSC_A0, "lcd_ssc_a0"); - - /* set GPIOs to output, with initial data */ - gpio_direction_output(A_LCD_SSC_RD, 1); - gpio_direction_output(A_LCD_SSC_SD, 0); - gpio_direction_output(_A_LCD_RESET, 0); - gpio_direction_output(_A_LCD_SSC_CS, 1); - gpio_direction_output(_A_LCD_SSC_A0, 1); -} - -static void display_init(void) -{ - int i; - - omap_cfg_reg(MCBSP3_CLKX); - - mdelay(2); - setup_GPIO(); - mdelay(2); - - /* reset LCD */ - gpio_set_value(A_LCD_SSC_SD, 1); - epson_sendbyte(0, 0x25); - - gpio_set_value(_A_LCD_RESET, 0); - mdelay(10); - gpio_set_value(_A_LCD_RESET, 1); - - gpio_set_value(_A_LCD_SSC_CS, 1); - mdelay(2); - gpio_set_value(_A_LCD_SSC_CS, 0); - - /* init LCD, phase 1 */ - epson_sendbyte(0, 0xCA); - for (i = 0; i < 10; i++) - epson_sendbyte(1, INIT_1[i]); - gpio_set_value(_A_LCD_SSC_CS, 1); - gpio_set_value(_A_LCD_SSC_CS, 0); - - /* init LCD phase 2 */ - epson_sendbyte(0, 0xCB); - for (i = 0; i < 125; i++) - epson_sendbyte(1, INIT_2[i]); - gpio_set_value(_A_LCD_SSC_CS, 1); - gpio_set_value(_A_LCD_SSC_CS, 0); - - /* init LCD phase 2a */ - epson_sendbyte(0, 0xCC); - for (i = 0; i < 14; i++) - epson_sendbyte(1, INIT_3[i]); - gpio_set_value(_A_LCD_SSC_CS, 1); - gpio_set_value(_A_LCD_SSC_CS, 0); - - /* init LCD phase 3 */ - epson_sendbyte(0, 0xBC); - epson_sendbyte(1, 0x08); - gpio_set_value(_A_LCD_SSC_CS, 1); - gpio_set_value(_A_LCD_SSC_CS, 0); - - /* init LCD phase 4 */ - epson_sendbyte(0, 0x07); - epson_sendbyte(1, 0x05); - gpio_set_value(_A_LCD_SSC_CS, 1); - gpio_set_value(_A_LCD_SSC_CS, 0); - - /* init LCD phase 5 */ - epson_sendbyte(0, 0x94); - gpio_set_value(_A_LCD_SSC_CS, 1); - gpio_set_value(_A_LCD_SSC_CS, 0); - - /* init LCD phase 6 */ - epson_sendbyte(0, 0xC6); - epson_sendbyte(1, 0x80); - gpio_set_value(_A_LCD_SSC_CS, 1); - mdelay(100); /* used to be 1000 */ - gpio_set_value(_A_LCD_SSC_CS, 0); - - /* init LCD phase 7 */ - epson_sendbyte(0, 0x16); - epson_sendbyte(1, 0x02); - epson_sendbyte(1, 0x00); - epson_sendbyte(1, 0xB1); - epson_sendbyte(1, 0x00); - gpio_set_value(_A_LCD_SSC_CS, 1); - gpio_set_value(_A_LCD_SSC_CS, 0); - - /* init LCD phase 8 */ - epson_sendbyte(0, 0x76); - epson_sendbyte(1, 0x00); - epson_sendbyte(1, 0x00); - epson_sendbyte(1, 0xDB); - epson_sendbyte(1, 0x00); - gpio_set_value(_A_LCD_SSC_CS, 1); - gpio_set_value(_A_LCD_SSC_CS, 0); - - /* init LCD phase 9 */ - epson_sendbyte(0, 0xAF); - gpio_set_value(_A_LCD_SSC_CS, 1); -} - -static int sx1_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) -{ - return 0; -} - -static void sx1_panel_cleanup(struct lcd_panel *panel) -{ -} - -static void sx1_panel_disable(struct lcd_panel *panel) -{ - printk(KERN_INFO "SX1: LCD panel disable\n"); - sx1_setmmipower(0); - gpio_set_value(_A_LCD_SSC_CS, 1); - - epson_sendbyte(0, 0x25); - gpio_set_value(_A_LCD_SSC_CS, 0); - - epson_sendbyte(0, 0xAE); - gpio_set_value(_A_LCD_SSC_CS, 1); - mdelay(100); - gpio_set_value(_A_LCD_SSC_CS, 0); - - epson_sendbyte(0, 0x95); - gpio_set_value(_A_LCD_SSC_CS, 1); -} - -static int sx1_panel_enable(struct lcd_panel *panel) -{ - printk(KERN_INFO "lcd_sx1: LCD panel enable\n"); - init_system(); - display_init(); - - sx1_setmmipower(1); - sx1_setbacklight(0x18); - sx1_setkeylight (0x06); - return 0; -} - - -static unsigned long sx1_panel_get_caps(struct lcd_panel *panel) -{ - return 0; -} - -struct lcd_panel sx1_panel = { - .name = "sx1", - .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | - OMAP_LCDC_INV_HSYNC | OMAP_LCDC_INV_PIX_CLOCK | - OMAP_LCDC_INV_OUTPUT_EN, - - .x_res = 176, - .y_res = 220, - .data_lines = 16, - .bpp = 16, - .hsw = 5, - .hfp = 5, - .hbp = 5, - .vsw = 2, - .vfp = 1, - .vbp = 1, - .pixel_clock = 1500, - - .init = sx1_panel_init, - .cleanup = sx1_panel_cleanup, - .enable = sx1_panel_enable, - .disable = sx1_panel_disable, - .get_caps = sx1_panel_get_caps, -}; - -static int sx1_panel_probe(struct platform_device *pdev) -{ - omapfb_register_panel(&sx1_panel); - return 0; -} - -static int sx1_panel_remove(struct platform_device *pdev) -{ - return 0; -} - -static int sx1_panel_suspend(struct platform_device *pdev, pm_message_t mesg) -{ - return 0; -} - -static int sx1_panel_resume(struct platform_device *pdev) -{ - return 0; -} - -struct platform_driver sx1_panel_driver = { - .probe = sx1_panel_probe, - .remove = sx1_panel_remove, - .suspend = sx1_panel_suspend, - .resume = sx1_panel_resume, - .driver = { - .name = "lcd_sx1", - .owner = THIS_MODULE, - }, -}; - -static int sx1_panel_drv_init(void) -{ - return platform_driver_register(&sx1_panel_driver); -} - -static void sx1_panel_drv_cleanup(void) -{ - platform_driver_unregister(&sx1_panel_driver); -} - -module_init(sx1_panel_drv_init); -module_exit(sx1_panel_drv_cleanup); From 147dcf5489fb86c4bfe400520186f9f11b304783 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Tue, 25 Nov 2008 15:11:12 -0800 Subject: [PATCH 05/18] ARM: OMAP: Typo fix for clock_allow_idle The second clk_deny_idle instance should be clk_allow_idle instead. Signed-off-by: Amit Kucheria Signed-off-by: Tony Lindgren --- arch/arm/plat-omap/include/mach/pm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/plat-omap/include/mach/pm.h b/arch/arm/plat-omap/include/mach/pm.h index 768eb6e7abcf..2a9c27ad4c37 100644 --- a/arch/arm/plat-omap/include/mach/pm.h +++ b/arch/arm/plat-omap/include/mach/pm.h @@ -128,7 +128,7 @@ void clk_deny_idle(struct clk *clk); * clk_allow_idle - Counters previous clk_deny_idle * @clk: clock signal handle */ -void clk_deny_idle(struct clk *clk); +void clk_allow_idle(struct clk *clk); extern void omap_pm_idle(void); extern void omap_pm_suspend(void); From 723fdb781abfe78d8ba7d911abbb581722348aa7 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Wed, 26 Nov 2008 14:35:16 -0800 Subject: [PATCH 06/18] ARM: OMAP: Fixes for suspend / resume GPIO wake-up handling Use the correct wake-up enable register, and make it work with 34xx also. Signed-off-by: Tero Kristo Signed-off-by: Kevin Hilman Signed-off-by: Tony Lindgren --- arch/arm/plat-omap/gpio.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index 8679fbca6bbe..424049d83fbe 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c @@ -101,6 +101,7 @@ #define OMAP24XX_GPIO_IRQSTATUS2 0x0028 #define OMAP24XX_GPIO_IRQENABLE2 0x002c #define OMAP24XX_GPIO_IRQENABLE1 0x001c +#define OMAP24XX_GPIO_WAKE_EN 0x0020 #define OMAP24XX_GPIO_CTRL 0x0030 #define OMAP24XX_GPIO_OE 0x0034 #define OMAP24XX_GPIO_DATAIN 0x0038 @@ -1551,7 +1552,7 @@ static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg) #endif #if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) case METHOD_GPIO_24XX: - wake_status = bank->base + OMAP24XX_GPIO_SETWKUENA; + wake_status = bank->base + OMAP24XX_GPIO_WAKE_EN; wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA; wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA; break; @@ -1574,7 +1575,7 @@ static int omap_gpio_resume(struct sys_device *dev) { int i; - if (!cpu_is_omap24xx() && !cpu_is_omap16xx()) + if (!cpu_class_is_omap2() && !cpu_is_omap16xx()) return 0; for (i = 0; i < gpio_bank_count; i++) { From 487ff32082a9bd7489d8185cf7d7a2fdf18a22fa Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 27 Nov 2008 11:13:58 +0000 Subject: [PATCH 07/18] Allow architectures to override copy_user_highpage() With aliasing VIPT cache support, the ARM implementation of clear_user_page() and copy_user_page() sets up a temporary kernel space mapping such that we have the same cache colour as the userspace page. This avoids having to consider any userspace aliases from this operation. However, when highmem is enabled, kmap_atomic() have to setup mappings. The copy_user_highpage() and clear_user_highpage() call these functions before delegating the copies to copy_user_page() and clear_user_page(). The effect of this is that each of the *_user_highpage() functions setup their own kmap mapping, followed by the *_user_page() functions setting up another mapping. This is rather wasteful. Thankfully, copy_user_highpage() can be overriden by architectures by defining __HAVE_ARCH_COPY_USER_HIGHPAGE. However, replacement of clear_user_highpage() is more difficult because its inline definition is not conditional. It seems that you're expected to define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and provide a replacement __alloc_zeroed_user_highpage() implementation instead. The allocation itself is fine, so we don't want to override that. What we really want to do is to override clear_user_highpage() with our own version which doesn't kmap_atomic() unnecessarily. Other VIPT architectures (PARISC and SH) would also like to override this function as well. Acked-by: Hugh Dickins Acked-by: James Bottomley Acked-by: Paul Mundt Signed-off-by: Russell King --- include/linux/highmem.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 7dcbc82f3b7b..13875ce9112a 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -63,12 +63,14 @@ static inline void *kmap_atomic(struct page *page, enum km_type idx) #endif /* CONFIG_HIGHMEM */ /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ +#ifndef clear_user_highpage static inline void clear_user_highpage(struct page *page, unsigned long vaddr) { void *addr = kmap_atomic(page, KM_USER0); clear_user_page(addr, vaddr, page); kunmap_atomic(addr, KM_USER0); } +#endif #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE /** From d73e60b7144a86baf0fdfcc9537a70bb4f72e11c Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 31 Oct 2008 13:08:02 +0000 Subject: [PATCH 08/18] [ARM] copypage: convert assembly files to C Signed-off-by: Russell King --- arch/arm/mm/copypage-feroceon.S | 95 ----------------------------- arch/arm/mm/copypage-feroceon.c | 100 +++++++++++++++++++++++++++++++ arch/arm/mm/copypage-v3.S | 67 --------------------- arch/arm/mm/copypage-v3.c | 69 +++++++++++++++++++++ arch/arm/mm/copypage-v4wb.S | 79 ------------------------- arch/arm/mm/copypage-v4wb.c | 83 ++++++++++++++++++++++++++ arch/arm/mm/copypage-v4wt.S | 73 ----------------------- arch/arm/mm/copypage-v4wt.c | 77 ++++++++++++++++++++++++ arch/arm/mm/copypage-xsc3.S | 97 ------------------------------ arch/arm/mm/copypage-xsc3.c | 102 ++++++++++++++++++++++++++++++++ 10 files changed, 431 insertions(+), 411 deletions(-) delete mode 100644 arch/arm/mm/copypage-feroceon.S create mode 100644 arch/arm/mm/copypage-feroceon.c delete mode 100644 arch/arm/mm/copypage-v3.S create mode 100644 arch/arm/mm/copypage-v3.c delete mode 100644 arch/arm/mm/copypage-v4wb.S create mode 100644 arch/arm/mm/copypage-v4wb.c delete mode 100644 arch/arm/mm/copypage-v4wt.S create mode 100644 arch/arm/mm/copypage-v4wt.c delete mode 100644 arch/arm/mm/copypage-xsc3.S create mode 100644 arch/arm/mm/copypage-xsc3.c diff --git a/arch/arm/mm/copypage-feroceon.S b/arch/arm/mm/copypage-feroceon.S deleted file mode 100644 index 7eb0d320d240..000000000000 --- a/arch/arm/mm/copypage-feroceon.S +++ /dev/null @@ -1,95 +0,0 @@ -/* - * linux/arch/arm/lib/copypage-feroceon.S - * - * Copyright (C) 2008 Marvell Semiconductors - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This handles copy_user_page and clear_user_page on Feroceon - * more optimally than the generic implementations. - */ -#include -#include -#include - - .text - .align 5 - -ENTRY(feroceon_copy_user_page) - stmfd sp!, {r4-r9, lr} - mov ip, #PAGE_SZ -1: mov lr, r1 - ldmia r1!, {r2 - r9} - pld [lr, #32] - pld [lr, #64] - pld [lr, #96] - pld [lr, #128] - pld [lr, #160] - pld [lr, #192] - pld [lr, #224] - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - subs ip, ip, #(32 * 8) - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - bne 1b - mcr p15, 0, ip, c7, c10, 4 @ drain WB - ldmfd sp!, {r4-r9, pc} - - .align 5 - -ENTRY(feroceon_clear_user_page) - stmfd sp!, {r4-r7, lr} - mov r1, #PAGE_SZ/32 - mov r2, #0 - mov r3, #0 - mov r4, #0 - mov r5, #0 - mov r6, #0 - mov r7, #0 - mov ip, #0 - mov lr, #0 -1: stmia r0, {r2-r7, ip, lr} - subs r1, r1, #1 - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - bne 1b - mcr p15, 0, r1, c7, c10, 4 @ drain WB - ldmfd sp!, {r4-r7, pc} - - __INITDATA - - .type feroceon_user_fns, #object -ENTRY(feroceon_user_fns) - .long feroceon_clear_user_page - .long feroceon_copy_user_page - .size feroceon_user_fns, . - feroceon_user_fns diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c new file mode 100644 index 000000000000..c8347670ab00 --- /dev/null +++ b/arch/arm/mm/copypage-feroceon.c @@ -0,0 +1,100 @@ +/* + * linux/arch/arm/mm/copypage-feroceon.S + * + * Copyright (C) 2008 Marvell Semiconductors + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This handles copy_user_page and clear_user_page on Feroceon + * more optimally than the generic implementations. + */ +#include + +#include + +void __attribute__((naked)) +feroceon_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +{ + asm("\ + stmfd sp!, {r4-r9, lr} \n\ + mov ip, %0 \n\ +1: mov lr, r1 \n\ + ldmia r1!, {r2 - r9} \n\ + pld [lr, #32] \n\ + pld [lr, #64] \n\ + pld [lr, #96] \n\ + pld [lr, #128] \n\ + pld [lr, #160] \n\ + pld [lr, #192] \n\ + pld [lr, #224] \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + subs ip, ip, #(32 * 8) \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + bne 1b \n\ + mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ + ldmfd sp!, {r4-r9, pc}" + : + : "I" (PAGE_SIZE)); +} + +void __attribute__((naked)) +feroceon_clear_user_page(void *kaddr, unsigned long vaddr) +{ + asm("\ + stmfd sp!, {r4-r7, lr} \n\ + mov r1, %0 \n\ + mov r2, #0 \n\ + mov r3, #0 \n\ + mov r4, #0 \n\ + mov r5, #0 \n\ + mov r6, #0 \n\ + mov r7, #0 \n\ + mov ip, #0 \n\ + mov lr, #0 \n\ +1: stmia r0, {r2-r7, ip, lr} \n\ + subs r1, r1, #1 \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + bne 1b \n\ + mcr p15, 0, r1, c7, c10, 4 @ drain WB\n\ + ldmfd sp!, {r4-r7, pc}" + : + : "I" (PAGE_SIZE / 32)); +} + +struct cpu_user_fns feroceon_user_fns __initdata = { + .cpu_clear_user_page = feroceon_clear_user_page, + .cpu_copy_user_page = feroceon_copy_user_page, +}; + diff --git a/arch/arm/mm/copypage-v3.S b/arch/arm/mm/copypage-v3.S deleted file mode 100644 index 2ee394b11bcb..000000000000 --- a/arch/arm/mm/copypage-v3.S +++ /dev/null @@ -1,67 +0,0 @@ -/* - * linux/arch/arm/lib/copypage.S - * - * Copyright (C) 1995-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * ASM optimised string functions - */ -#include -#include -#include -#include - - .text - .align 5 -/* - * ARMv3 optimised copy_user_page - * - * FIXME: do we need to handle cache stuff... - */ -ENTRY(v3_copy_user_page) - stmfd sp!, {r4, lr} @ 2 - mov r2, #PAGE_SZ/64 @ 1 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 -1: stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - subs r2, r2, #1 @ 1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmneia r1!, {r3, r4, ip, lr} @ 4 - bne 1b @ 1 - ldmfd sp!, {r4, pc} @ 3 - - .align 5 -/* - * ARMv3 optimised clear_user_page - * - * FIXME: do we need to handle cache stuff... - */ -ENTRY(v3_clear_user_page) - str lr, [sp, #-4]! - mov r1, #PAGE_SZ/64 @ 1 - mov r2, #0 @ 1 - mov r3, #0 @ 1 - mov ip, #0 @ 1 - mov lr, #0 @ 1 -1: stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - subs r1, r1, #1 @ 1 - bne 1b @ 1 - ldr pc, [sp], #4 - - __INITDATA - - .type v3_user_fns, #object -ENTRY(v3_user_fns) - .long v3_clear_user_page - .long v3_copy_user_page - .size v3_user_fns, . - v3_user_fns diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c new file mode 100644 index 000000000000..184911089e6c --- /dev/null +++ b/arch/arm/mm/copypage-v3.c @@ -0,0 +1,69 @@ +/* + * linux/arch/arm/mm/copypage-v3.c + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include + +#include + +/* + * ARMv3 optimised copy_user_page + * + * FIXME: do we need to handle cache stuff... + */ +void __attribute__((naked)) +v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +{ + asm("\n\ + stmfd sp!, {r4, lr} @ 2\n\ + mov r2, %2 @ 1\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ +1: stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4\n\ + subs r2, r2, #1 @ 1\n\ + stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmneia %0!, {r3, r4, ip, lr} @ 4\n\ + bne 1b @ 1\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64)); +} + +/* + * ARMv3 optimised clear_user_page + * + * FIXME: do we need to handle cache stuff... + */ +void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) +{ + asm("\n\ + str lr, [sp, #-4]!\n\ + mov r1, %1 @ 1\n\ + mov r2, #0 @ 1\n\ + mov r3, #0 @ 1\n\ + mov ip, #0 @ 1\n\ + mov lr, #0 @ 1\n\ +1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + subs r1, r1, #1 @ 1\n\ + bne 1b @ 1\n\ + ldr pc, [sp], #4" + : + : "r" (kaddr), "I" (PAGE_SIZE / 64)); +} + +struct cpu_user_fns v3_user_fns __initdata = { + .cpu_clear_user_page = v3_clear_user_page, + .cpu_copy_user_page = v3_copy_user_page, +}; diff --git a/arch/arm/mm/copypage-v4wb.S b/arch/arm/mm/copypage-v4wb.S deleted file mode 100644 index 83117354b1cd..000000000000 --- a/arch/arm/mm/copypage-v4wb.S +++ /dev/null @@ -1,79 +0,0 @@ -/* - * linux/arch/arm/lib/copypage.S - * - * Copyright (C) 1995-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * ASM optimised string functions - */ -#include -#include -#include - - .text - .align 5 -/* - * ARMv4 optimised copy_user_page - * - * We flush the destination cache lines just before we write the data into the - * corresponding address. Since the Dcache is read-allocate, this removes the - * Dcache aliasing issue. The writes will be forwarded to the write buffer, - * and merged as appropriate. - * - * Note: We rely on all ARMv4 processors implementing the "invalidate D line" - * instruction. If your processor does not supply this, you have to write your - * own copy_user_page that does the right thing. - */ -ENTRY(v4wb_copy_user_page) - stmfd sp!, {r4, lr} @ 2 - mov r2, #PAGE_SZ/64 @ 1 - ldmia r1!, {r3, r4, ip, lr} @ 4 -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - subs r2, r2, #1 @ 1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmneia r1!, {r3, r4, ip, lr} @ 4 - bne 1b @ 1 - mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB - ldmfd sp!, {r4, pc} @ 3 - - .align 5 -/* - * ARMv4 optimised clear_user_page - * - * Same story as above. - */ -ENTRY(v4wb_clear_user_page) - str lr, [sp, #-4]! - mov r1, #PAGE_SZ/64 @ 1 - mov r2, #0 @ 1 - mov r3, #0 @ 1 - mov ip, #0 @ 1 - mov lr, #0 @ 1 -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - subs r1, r1, #1 @ 1 - bne 1b @ 1 - mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB - ldr pc, [sp], #4 - - __INITDATA - - .type v4wb_user_fns, #object -ENTRY(v4wb_user_fns) - .long v4wb_clear_user_page - .long v4wb_copy_user_page - .size v4wb_user_fns, . - v4wb_user_fns diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c new file mode 100644 index 000000000000..230210822961 --- /dev/null +++ b/arch/arm/mm/copypage-v4wb.c @@ -0,0 +1,83 @@ +/* + * linux/arch/arm/mm/copypage-v4wb.c + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include + +#include + +/* + * ARMv4 optimised copy_user_page + * + * We flush the destination cache lines just before we write the data into the + * corresponding address. Since the Dcache is read-allocate, this removes the + * Dcache aliasing issue. The writes will be forwarded to the write buffer, + * and merged as appropriate. + * + * Note: We rely on all ARMv4 processors implementing the "invalidate D line" + * instruction. If your processor does not supply this, you have to write your + * own copy_user_page that does the right thing. + */ +void __attribute__((naked)) +v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +{ + asm("\ + stmfd sp!, {r4, lr} @ 2\n\ + mov r2, %0 @ 1\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + subs r2, r2, #1 @ 1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ + bne 1b @ 1\n\ + mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "I" (PAGE_SIZE / 64)); +} + +/* + * ARMv4 optimised clear_user_page + * + * Same story as above. + */ +void __attribute__((naked)) +v4wb_clear_user_page(void *kaddr, unsigned long vaddr) +{ + asm("\ + str lr, [sp, #-4]!\n\ + mov r1, %0 @ 1\n\ + mov r2, #0 @ 1\n\ + mov r3, #0 @ 1\n\ + mov ip, #0 @ 1\n\ + mov lr, #0 @ 1\n\ +1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + subs r1, r1, #1 @ 1\n\ + bne 1b @ 1\n\ + mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ + ldr pc, [sp], #4" + : + : "I" (PAGE_SIZE / 64)); +} + +struct cpu_user_fns v4wb_user_fns __initdata = { + .cpu_clear_user_page = v4wb_clear_user_page, + .cpu_copy_user_page = v4wb_copy_user_page, +}; diff --git a/arch/arm/mm/copypage-v4wt.S b/arch/arm/mm/copypage-v4wt.S deleted file mode 100644 index e1f2af28d549..000000000000 --- a/arch/arm/mm/copypage-v4wt.S +++ /dev/null @@ -1,73 +0,0 @@ -/* - * linux/arch/arm/lib/copypage-v4.S - * - * Copyright (C) 1995-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * ASM optimised string functions - * - * This is for CPUs with a writethrough cache and 'flush ID cache' is - * the only supported cache operation. - */ -#include -#include -#include - - .text - .align 5 -/* - * ARMv4 optimised copy_user_page - * - * Since we have writethrough caches, we don't have to worry about - * dirty data in the cache. However, we do have to ensure that - * subsequent reads are up to date. - */ -ENTRY(v4wt_copy_user_page) - stmfd sp!, {r4, lr} @ 2 - mov r2, #PAGE_SZ/64 @ 1 - ldmia r1!, {r3, r4, ip, lr} @ 4 -1: stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - subs r2, r2, #1 @ 1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmneia r1!, {r3, r4, ip, lr} @ 4 - bne 1b @ 1 - mcr p15, 0, r2, c7, c7, 0 @ flush ID cache - ldmfd sp!, {r4, pc} @ 3 - - .align 5 -/* - * ARMv4 optimised clear_user_page - * - * Same story as above. - */ -ENTRY(v4wt_clear_user_page) - str lr, [sp, #-4]! - mov r1, #PAGE_SZ/64 @ 1 - mov r2, #0 @ 1 - mov r3, #0 @ 1 - mov ip, #0 @ 1 - mov lr, #0 @ 1 -1: stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - subs r1, r1, #1 @ 1 - bne 1b @ 1 - mcr p15, 0, r2, c7, c7, 0 @ flush ID cache - ldr pc, [sp], #4 - - __INITDATA - - .type v4wt_user_fns, #object -ENTRY(v4wt_user_fns) - .long v4wt_clear_user_page - .long v4wt_copy_user_page - .size v4wt_user_fns, . - v4wt_user_fns diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c new file mode 100644 index 000000000000..d8ef39503ff0 --- /dev/null +++ b/arch/arm/mm/copypage-v4wt.c @@ -0,0 +1,77 @@ +/* + * linux/arch/arm/mm/copypage-v4wt.S + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This is for CPUs with a writethrough cache and 'flush ID cache' is + * the only supported cache operation. + */ +#include + +#include + +/* + * ARMv4 optimised copy_user_page + * + * Since we have writethrough caches, we don't have to worry about + * dirty data in the cache. However, we do have to ensure that + * subsequent reads are up to date. + */ +void __attribute__((naked)) +v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +{ + asm("\ + stmfd sp!, {r4, lr} @ 2\n\ + mov r2, %0 @ 1\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + subs r2, r2, #1 @ 1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ + bne 1b @ 1\n\ + mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "I" (PAGE_SIZE / 64)); +} + +/* + * ARMv4 optimised clear_user_page + * + * Same story as above. + */ +void __attribute__((naked)) +v4wt_clear_user_page(void *kaddr, unsigned long vaddr) +{ + asm("\ + str lr, [sp, #-4]!\n\ + mov r1, %0 @ 1\n\ + mov r2, #0 @ 1\n\ + mov r3, #0 @ 1\n\ + mov ip, #0 @ 1\n\ + mov lr, #0 @ 1\n\ +1: stmia r0!, {r2, r3, ip, lr} @ 4\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + subs r1, r1, #1 @ 1\n\ + bne 1b @ 1\n\ + mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ + ldr pc, [sp], #4" + : + : "I" (PAGE_SIZE / 64)); +} + +struct cpu_user_fns v4wt_user_fns __initdata = { + .cpu_clear_user_page = v4wt_clear_user_page, + .cpu_copy_user_page = v4wt_copy_user_page, +}; diff --git a/arch/arm/mm/copypage-xsc3.S b/arch/arm/mm/copypage-xsc3.S deleted file mode 100644 index 9a2cb4332b4c..000000000000 --- a/arch/arm/mm/copypage-xsc3.S +++ /dev/null @@ -1,97 +0,0 @@ -/* - * linux/arch/arm/lib/copypage-xsc3.S - * - * Copyright (C) 2004 Intel Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Adapted for 3rd gen XScale core, no more mini-dcache - * Author: Matt Gilbert (matthew.m.gilbert@intel.com) - */ - -#include -#include -#include - -/* - * General note: - * We don't really want write-allocate cache behaviour for these functions - * since that will just eat through 8K of the cache. - */ - - .text - .align 5 -/* - * XSC3 optimised copy_user_page - * r0 = destination - * r1 = source - * r2 = virtual user address of ultimate destination page - * - * The source page may have some clean entries in the cache already, but we - * can safely ignore them - break_cow() will flush them out of the cache - * if we eventually end up using our copied page. - * - */ -ENTRY(xsc3_mc_copy_user_page) - stmfd sp!, {r4, r5, lr} - mov lr, #PAGE_SZ/64-1 - - pld [r1, #0] - pld [r1, #32] -1: pld [r1, #64] - pld [r1, #96] - -2: ldrd r2, [r1], #8 - mov ip, r0 - ldrd r4, [r1], #8 - mcr p15, 0, ip, c7, c6, 1 @ invalidate - strd r2, [r0], #8 - ldrd r2, [r1], #8 - strd r4, [r0], #8 - ldrd r4, [r1], #8 - strd r2, [r0], #8 - strd r4, [r0], #8 - ldrd r2, [r1], #8 - mov ip, r0 - ldrd r4, [r1], #8 - mcr p15, 0, ip, c7, c6, 1 @ invalidate - strd r2, [r0], #8 - ldrd r2, [r1], #8 - subs lr, lr, #1 - strd r4, [r0], #8 - ldrd r4, [r1], #8 - strd r2, [r0], #8 - strd r4, [r0], #8 - bgt 1b - beq 2b - - ldmfd sp!, {r4, r5, pc} - - .align 5 -/* - * XScale optimised clear_user_page - * r0 = destination - * r1 = virtual user address of ultimate destination page - */ -ENTRY(xsc3_mc_clear_user_page) - mov r1, #PAGE_SZ/32 - mov r2, #0 - mov r3, #0 -1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line - strd r2, [r0], #8 - strd r2, [r0], #8 - strd r2, [r0], #8 - strd r2, [r0], #8 - subs r1, r1, #1 - bne 1b - mov pc, lr - - __INITDATA - - .type xsc3_mc_user_fns, #object -ENTRY(xsc3_mc_user_fns) - .long xsc3_mc_clear_user_page - .long xsc3_mc_copy_user_page - .size xsc3_mc_user_fns, . - xsc3_mc_user_fns diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c new file mode 100644 index 000000000000..51ed502e5777 --- /dev/null +++ b/arch/arm/mm/copypage-xsc3.c @@ -0,0 +1,102 @@ +/* + * linux/arch/arm/mm/copypage-xsc3.S + * + * Copyright (C) 2004 Intel Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Adapted for 3rd gen XScale core, no more mini-dcache + * Author: Matt Gilbert (matthew.m.gilbert@intel.com) + */ +#include + +#include + +/* + * General note: + * We don't really want write-allocate cache behaviour for these functions + * since that will just eat through 8K of the cache. + */ + +/* + * XSC3 optimised copy_user_page + * r0 = destination + * r1 = source + * r2 = virtual user address of ultimate destination page + * + * The source page may have some clean entries in the cache already, but we + * can safely ignore them - break_cow() will flush them out of the cache + * if we eventually end up using our copied page. + * + */ +void __attribute__((naked)) +xsc3_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +{ + asm("\ + stmfd sp!, {r4, r5, lr} \n\ + mov lr, %0 \n\ + \n\ + pld [r1, #0] \n\ + pld [r1, #32] \n\ +1: pld [r1, #64] \n\ + pld [r1, #96] \n\ + \n\ +2: ldrd r2, [r1], #8 \n\ + mov ip, r0 \n\ + ldrd r4, [r1], #8 \n\ + mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ + strd r2, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + strd r4, [r0], #8 \n\ + ldrd r4, [r1], #8 \n\ + strd r2, [r0], #8 \n\ + strd r4, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + mov ip, r0 \n\ + ldrd r4, [r1], #8 \n\ + mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ + strd r2, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + subs lr, lr, #1 \n\ + strd r4, [r0], #8 \n\ + ldrd r4, [r1], #8 \n\ + strd r2, [r0], #8 \n\ + strd r4, [r0], #8 \n\ + bgt 1b \n\ + beq 2b \n\ + \n\ + ldmfd sp!, {r4, r5, pc}" + : + : "I" (PAGE_SIZE / 64 - 1)); +} + +/* + * XScale optimised clear_user_page + * r0 = destination + * r1 = virtual user address of ultimate destination page + */ +void __attribute__((naked)) +xsc3_mc_clear_user_page(void *kaddr, unsigned long vaddr) +{ + asm("\ + mov r1, %0 \n\ + mov r2, #0 \n\ + mov r3, #0 \n\ +1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line\n\ + strd r2, [r0], #8 \n\ + strd r2, [r0], #8 \n\ + strd r2, [r0], #8 \n\ + strd r2, [r0], #8 \n\ + subs r1, r1, #1 \n\ + bne 1b \n\ + mov pc, lr" + : + : "I" (PAGE_SIZE / 32)); +} + +struct cpu_user_fns xsc3_mc_user_fns __initdata = { + .cpu_clear_user_page = xsc3_mc_clear_user_page, + .cpu_copy_user_page = xsc3_mc_copy_user_page, +}; From 063b0a4207e43acbeff3d4b09f43e750e0212b48 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 31 Oct 2008 15:08:35 +0000 Subject: [PATCH 09/18] [ARM] copypage: provide our own copy_user_highpage() We used to override the copy_user_page() function. However, this is not only inefficient, it also causes additional complexity for highmem support, since we convert from a struct page to a kernel direct mapped address and back to a struct page again. Moreover, with highmem support, we end up pointlessly setting up kmap entries for pages which we're going to remap. So, push the kmapping down into the copypage implementation files where it's required. Signed-off-by: Russell King --- arch/arm/include/asm/page.h | 23 ++++++++----- arch/arm/mm/copypage-feroceon.c | 23 +++++++++---- arch/arm/mm/copypage-v3.c | 23 +++++++++---- arch/arm/mm/copypage-v4mc.c | 21 +++++++----- arch/arm/mm/copypage-v4wb.c | 25 ++++++++++---- arch/arm/mm/copypage-v4wt.c | 23 +++++++++---- arch/arm/mm/copypage-v6.c | 61 ++++++++++++++++++++------------- arch/arm/mm/copypage-xsc3.c | 24 +++++++++---- arch/arm/mm/copypage-xscale.c | 19 +++++----- arch/arm/mm/proc-syms.c | 2 +- 10 files changed, 161 insertions(+), 83 deletions(-) diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index bed1c0a00368..1581b8cf8f33 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -108,30 +108,35 @@ #error Unknown user operations model #endif +struct page; + struct cpu_user_fns { void (*cpu_clear_user_page)(void *p, unsigned long user); - void (*cpu_copy_user_page)(void *to, const void *from, - unsigned long user); + void (*cpu_copy_user_highpage)(struct page *to, struct page *from, + unsigned long vaddr); }; #ifdef MULTI_USER extern struct cpu_user_fns cpu_user; -#define __cpu_clear_user_page cpu_user.cpu_clear_user_page -#define __cpu_copy_user_page cpu_user.cpu_copy_user_page +#define __cpu_clear_user_page cpu_user.cpu_clear_user_page +#define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage #else -#define __cpu_clear_user_page __glue(_USER,_clear_user_page) -#define __cpu_copy_user_page __glue(_USER,_copy_user_page) +#define __cpu_clear_user_page __glue(_USER,_clear_user_page) +#define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage) extern void __cpu_clear_user_page(void *p, unsigned long user); -extern void __cpu_copy_user_page(void *to, const void *from, - unsigned long user); +extern void __cpu_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr); #endif #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) -#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) + +#define __HAVE_ARCH_COPY_USER_HIGHPAGE +#define copy_user_highpage(to,from,vaddr,vma) \ + __cpu_copy_user_highpage(to, from, vaddr) #define clear_page(page) memzero((void *)(page), PAGE_SIZE) extern void copy_page(void *to, const void *from); diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index c8347670ab00..edd71686b8df 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -7,15 +7,14 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * - * This handles copy_user_page and clear_user_page on Feroceon + * This handles copy_user_highpage and clear_user_page on Feroceon * more optimally than the generic implementations. */ #include +#include -#include - -void __attribute__((naked)) -feroceon_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +static void __attribute__((naked)) +feroceon_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4-r9, lr} \n\ @@ -68,6 +67,18 @@ feroceon_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) : "I" (PAGE_SIZE)); } +void feroceon_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + feroceon_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + void __attribute__((naked)) feroceon_clear_user_page(void *kaddr, unsigned long vaddr) { @@ -95,6 +106,6 @@ feroceon_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns feroceon_user_fns __initdata = { .cpu_clear_user_page = feroceon_clear_user_page, - .cpu_copy_user_page = feroceon_copy_user_page, + .cpu_copy_user_highpage = feroceon_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index 184911089e6c..52df8f04d3f7 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -8,16 +8,15 @@ * published by the Free Software Foundation. */ #include - -#include +#include /* - * ARMv3 optimised copy_user_page + * ARMv3 optimised copy_user_highpage * * FIXME: do we need to handle cache stuff... */ -void __attribute__((naked)) -v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +static void __attribute__((naked)) +v3_copy_user_page(void *kto, const void *kfrom) { asm("\n\ stmfd sp!, {r4, lr} @ 2\n\ @@ -38,6 +37,18 @@ v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64)); } +void v3_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + v3_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + /* * ARMv3 optimised clear_user_page * @@ -65,5 +76,5 @@ void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns v3_user_fns __initdata = { .cpu_clear_user_page = v3_clear_user_page, - .cpu_copy_user_page = v3_copy_user_page, + .cpu_copy_user_highpage = v3_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 8d33e2549344..a7dc838fee76 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -15,8 +15,8 @@ */ #include #include +#include -#include #include #include #include @@ -33,7 +33,7 @@ static DEFINE_SPINLOCK(minicache_lock); /* - * ARMv4 mini-dcache optimised copy_user_page + * ARMv4 mini-dcache optimised copy_user_highpage * * We flush the destination cache lines just before we write the data into the * corresponding address. Since the Dcache is read-allocate, this removes the @@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock); * * Note: We rely on all ARMv4 processors implementing the "invalidate D line" * instruction. If your processor does not supply this, you have to write your - * own copy_user_page that does the right thing. + * own copy_user_highpage that does the right thing. */ static void __attribute__((naked)) mc_copy_user_page(void *from, void *to) @@ -68,21 +68,24 @@ mc_copy_user_page(void *from, void *to) : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); } -void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +void v4_mc_copy_user_highpage(struct page *from, struct page *to, + unsigned long vaddr) { - struct page *page = virt_to_page(kfrom); + void *kto = kmap_atomic(to, KM_USER1); - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) - __flush_dcache_page(page_mapping(page), page); + if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + __flush_dcache_page(page_mapping(from), from); spin_lock(&minicache_lock); - set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); + set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); flush_tlb_kernel_page(0xffff8000); mc_copy_user_page((void *)0xffff8000, kto); spin_unlock(&minicache_lock); + + kunmap_atomic(kto, KM_USER1); } /* @@ -113,5 +116,5 @@ v4_mc_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns v4_mc_user_fns __initdata = { .cpu_clear_user_page = v4_mc_clear_user_page, - .cpu_copy_user_page = v4_mc_copy_user_page, + .cpu_copy_user_highpage = v4_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 230210822961..186a68a794a9 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -8,11 +8,10 @@ * published by the Free Software Foundation. */ #include - -#include +#include /* - * ARMv4 optimised copy_user_page + * ARMv4 optimised copy_user_highpage * * We flush the destination cache lines just before we write the data into the * corresponding address. Since the Dcache is read-allocate, this removes the @@ -21,10 +20,10 @@ * * Note: We rely on all ARMv4 processors implementing the "invalidate D line" * instruction. If your processor does not supply this, you have to write your - * own copy_user_page that does the right thing. + * own copy_user_highpage that does the right thing. */ -void __attribute__((naked)) -v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +static void __attribute__((naked)) +v4wb_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, lr} @ 2\n\ @@ -48,6 +47,18 @@ v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) : "I" (PAGE_SIZE / 64)); } +void v4wb_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + v4wb_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + /* * ARMv4 optimised clear_user_page * @@ -79,5 +90,5 @@ v4wb_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns v4wb_user_fns __initdata = { .cpu_clear_user_page = v4wb_clear_user_page, - .cpu_copy_user_page = v4wb_copy_user_page, + .cpu_copy_user_highpage = v4wb_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index d8ef39503ff0..86c2cfdbde03 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -11,18 +11,17 @@ * the only supported cache operation. */ #include - -#include +#include /* - * ARMv4 optimised copy_user_page + * ARMv4 optimised copy_user_highpage * * Since we have writethrough caches, we don't have to worry about * dirty data in the cache. However, we do have to ensure that * subsequent reads are up to date. */ -void __attribute__((naked)) -v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +static void __attribute__((naked)) +v4wt_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, lr} @ 2\n\ @@ -44,6 +43,18 @@ v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) : "I" (PAGE_SIZE / 64)); } +void v4wt_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + v4wt_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + /* * ARMv4 optimised clear_user_page * @@ -73,5 +84,5 @@ v4wt_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns v4wt_user_fns __initdata = { .cpu_clear_user_page = v4wt_clear_user_page, - .cpu_copy_user_page = v4wt_copy_user_page, + .cpu_copy_user_highpage = v4wt_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 0e21c0767580..2ea75d0f5048 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -10,8 +10,8 @@ #include #include #include +#include -#include #include #include #include @@ -33,9 +33,16 @@ static DEFINE_SPINLOCK(v6_lock); * Copy the user page. No aliasing to deal with so we can just * attack the kernel's existing mapping of these pages. */ -static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) +static void v6_copy_user_highpage_nonaliasing(struct page *to, + struct page *from, unsigned long vaddr) { + void *kto, *kfrom; + + kfrom = kmap_atomic(from, KM_USER0); + kto = kmap_atomic(to, KM_USER1); copy_page(kto, kfrom); + kunmap_atomic(kto, KM_USER1); + kunmap_atomic(kfrom, KM_USER0); } /* @@ -48,26 +55,32 @@ static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) } /* - * Copy the page, taking account of the cache colour. + * Discard data in the kernel mapping for the new page. + * FIXME: needs this MCRR to be supported. */ -static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) +static void discard_old_kernel_data(void *kto) { - unsigned int offset = CACHE_COLOUR(vaddr); - unsigned long from, to; - struct page *page = virt_to_page(kfrom); - - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) - __flush_dcache_page(page_mapping(page), page); - - /* - * Discard data in the kernel mapping for the new page. - * FIXME: needs this MCRR to be supported. - */ __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" : : "r" (kto), "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) : "cc"); +} + +/* + * Copy the page, taking account of the cache colour. + */ +static void v6_copy_user_highpage_aliasing(struct page *to, + struct page *from, unsigned long vaddr) +{ + unsigned int offset = CACHE_COLOUR(vaddr); + unsigned long kfrom, kto; + + if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + __flush_dcache_page(page_mapping(from), from); + + /* FIXME: not highmem safe */ + discard_old_kernel_data(page_address(to)); /* * Now copy the page using the same cache colour as the @@ -75,16 +88,16 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo */ spin_lock(&v6_lock); - set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0); - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); - from = from_address + (offset << PAGE_SHIFT); - to = to_address + (offset << PAGE_SHIFT); + kfrom = from_address + (offset << PAGE_SHIFT); + kto = to_address + (offset << PAGE_SHIFT); - flush_tlb_kernel_page(from); - flush_tlb_kernel_page(to); + flush_tlb_kernel_page(kfrom); + flush_tlb_kernel_page(kto); - copy_page((void *)to, (void *)from); + copy_page((void *)kto, (void *)kfrom); spin_unlock(&v6_lock); } @@ -124,14 +137,14 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) struct cpu_user_fns v6_user_fns __initdata = { .cpu_clear_user_page = v6_clear_user_page_nonaliasing, - .cpu_copy_user_page = v6_copy_user_page_nonaliasing, + .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, }; static int __init v6_userpage_init(void) { if (cache_is_vipt_aliasing()) { cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; - cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; + cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; } return 0; diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 51ed502e5777..caa697ccd8db 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -11,8 +11,7 @@ * Author: Matt Gilbert (matthew.m.gilbert@intel.com) */ #include - -#include +#include /* * General note: @@ -21,18 +20,17 @@ */ /* - * XSC3 optimised copy_user_page + * XSC3 optimised copy_user_highpage * r0 = destination * r1 = source - * r2 = virtual user address of ultimate destination page * * The source page may have some clean entries in the cache already, but we * can safely ignore them - break_cow() will flush them out of the cache * if we eventually end up using our copied page. * */ -void __attribute__((naked)) -xsc3_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +static void __attribute__((naked)) +xsc3_mc_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, r5, lr} \n\ @@ -72,6 +70,18 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) : "I" (PAGE_SIZE / 64 - 1)); } +void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + xsc3_mc_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + /* * XScale optimised clear_user_page * r0 = destination @@ -98,5 +108,5 @@ xsc3_mc_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns xsc3_mc_user_fns __initdata = { .cpu_clear_user_page = xsc3_mc_clear_user_page, - .cpu_copy_user_page = xsc3_mc_copy_user_page, + .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index bad49331bbf9..01bafafce181 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -15,8 +15,8 @@ */ #include #include +#include -#include #include #include #include @@ -35,7 +35,7 @@ static DEFINE_SPINLOCK(minicache_lock); /* - * XScale mini-dcache optimised copy_user_page + * XScale mini-dcache optimised copy_user_highpage * * We flush the destination cache lines just before we write the data into the * corresponding address. Since the Dcache is read-allocate, this removes the @@ -90,21 +90,24 @@ mc_copy_user_page(void *from, void *to) : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); } -void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +void xscale_mc_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) { - struct page *page = virt_to_page(kfrom); + void *kto = kmap_atomic(to, KM_USER1); - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) - __flush_dcache_page(page_mapping(page), page); + if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + __flush_dcache_page(page_mapping(from), from); spin_lock(&minicache_lock); - set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); + set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); flush_tlb_kernel_page(COPYPAGE_MINICACHE); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); spin_unlock(&minicache_lock); + + kunmap_atomic(kto, KM_USER1); } /* @@ -133,5 +136,5 @@ xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns xscale_mc_user_fns __initdata = { .cpu_clear_user_page = xscale_mc_clear_user_page, - .cpu_copy_user_page = xscale_mc_copy_user_page, + .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 2b5ba396e3a6..b9743e6416c4 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -34,7 +34,7 @@ EXPORT_SYMBOL(cpu_cache); #ifdef CONFIG_MMU #ifndef MULTI_USER EXPORT_SYMBOL(__cpu_clear_user_page); -EXPORT_SYMBOL(__cpu_copy_user_page); +EXPORT_SYMBOL(__cpu_copy_user_highpage); #else EXPORT_SYMBOL(cpu_user); #endif From 303c6443659bc1dc911356f5de149f48ff1d97b8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 31 Oct 2008 16:32:19 +0000 Subject: [PATCH 10/18] [ARM] clearpage: provide our own clear_user_highpage() For similar reasons as copy_user_page(), we want to avoid the additional kmap_atomic if it's unnecessary. Signed-off-by: Russell King --- arch/arm/include/asm/page.h | 11 ++++++----- arch/arm/mm/copypage-feroceon.c | 20 ++++++++++---------- arch/arm/mm/copypage-v3.c | 13 +++++++------ arch/arm/mm/copypage-v4mc.c | 28 ++++++++++++++-------------- arch/arm/mm/copypage-v4wb.c | 28 ++++++++++++++-------------- arch/arm/mm/copypage-v4wt.c | 24 ++++++++++++------------ arch/arm/mm/copypage-v6.c | 23 +++++++++-------------- arch/arm/mm/copypage-xsc3.c | 25 +++++++++++++------------ arch/arm/mm/copypage-xscale.c | 26 ++++++++++++++------------ arch/arm/mm/proc-syms.c | 2 +- 10 files changed, 100 insertions(+), 100 deletions(-) diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 1581b8cf8f33..77747df713b4 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -111,7 +111,7 @@ struct page; struct cpu_user_fns { - void (*cpu_clear_user_page)(void *p, unsigned long user); + void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); void (*cpu_copy_user_highpage)(struct page *to, struct page *from, unsigned long vaddr); }; @@ -119,20 +119,21 @@ struct cpu_user_fns { #ifdef MULTI_USER extern struct cpu_user_fns cpu_user; -#define __cpu_clear_user_page cpu_user.cpu_clear_user_page +#define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage #define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage #else -#define __cpu_clear_user_page __glue(_USER,_clear_user_page) +#define __cpu_clear_user_highpage __glue(_USER,_clear_user_highpage) #define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage) -extern void __cpu_clear_user_page(void *p, unsigned long user); +extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); extern void __cpu_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr); #endif -#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) +#define clear_user_highpage(page,vaddr) \ + __cpu_clear_user_highpage(page, vaddr) #define __HAVE_ARCH_COPY_USER_HIGHPAGE #define copy_user_highpage(to,from,vaddr,vma) \ diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index edd71686b8df..c3651b2939c7 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -79,12 +79,11 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, kunmap_atomic(kto, KM_USER0); } -void __attribute__((naked)) -feroceon_clear_user_page(void *kaddr, unsigned long vaddr) +void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); asm("\ - stmfd sp!, {r4-r7, lr} \n\ - mov r1, %0 \n\ + mov r1, %1 \n\ mov r2, #0 \n\ mov r3, #0 \n\ mov r4, #0 \n\ @@ -93,19 +92,20 @@ feroceon_clear_user_page(void *kaddr, unsigned long vaddr) mov r7, #0 \n\ mov ip, #0 \n\ mov lr, #0 \n\ -1: stmia r0, {r2-r7, ip, lr} \n\ +1: stmia %0, {r2-r7, ip, lr} \n\ subs r1, r1, #1 \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ add r0, r0, #32 \n\ bne 1b \n\ - mcr p15, 0, r1, c7, c10, 4 @ drain WB\n\ - ldmfd sp!, {r4-r7, pc}" + mcr p15, 0, r1, c7, c10, 4 @ drain WB" : - : "I" (PAGE_SIZE / 32)); + : "r" (kaddr), "I" (PAGE_SIZE / 32) + : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns feroceon_user_fns __initdata = { - .cpu_clear_user_page = feroceon_clear_user_page, + .cpu_clear_user_highpage = feroceon_clear_user_highpage, .cpu_copy_user_highpage = feroceon_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index 52df8f04d3f7..13ce0baa6ba5 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -54,10 +54,10 @@ void v3_copy_user_highpage(struct page *to, struct page *from, * * FIXME: do we need to handle cache stuff... */ -void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) +void v3_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); asm("\n\ - str lr, [sp, #-4]!\n\ mov r1, %1 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ @@ -68,13 +68,14 @@ void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) stmia %0!, {r2, r3, ip, lr} @ 4\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ - bne 1b @ 1\n\ - ldr pc, [sp], #4" + bne 1b @ 1" : - : "r" (kaddr), "I" (PAGE_SIZE / 64)); + : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "r1", "r2", "r3", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns v3_user_fns __initdata = { - .cpu_clear_user_page = v3_clear_user_page, + .cpu_clear_user_highpage = v3_clear_user_highpage, .cpu_copy_user_highpage = v3_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index a7dc838fee76..a5eae503a34f 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -91,30 +91,30 @@ void v4_mc_copy_user_highpage(struct page *from, struct page *to, /* * ARMv4 optimised clear_user_page */ -void __attribute__((naked)) -v4_mc_clear_user_page(void *kaddr, unsigned long vaddr) +void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - asm volatile( - "str lr, [sp, #-4]!\n\ + void *kaddr = kmap_atomic(page, KM_USER0); + asm volatile("\ mov r1, %0 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ mov lr, #0 @ 1\n\ -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ +1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ - bne 1b @ 1\n\ - ldr pc, [sp], #4" + bne 1b @ 1" : - : "I" (PAGE_SIZE / 64)); + : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "r1", "r2", "r3", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns v4_mc_user_fns __initdata = { - .cpu_clear_user_page = v4_mc_clear_user_page, + .cpu_clear_user_highpage = v4_mc_clear_user_highpage, .cpu_copy_user_highpage = v4_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 186a68a794a9..9144a96037bf 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -64,31 +64,31 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, * * Same story as above. */ -void __attribute__((naked)) -v4wb_clear_user_page(void *kaddr, unsigned long vaddr) +void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); asm("\ - str lr, [sp, #-4]!\n\ - mov r1, %0 @ 1\n\ + mov r1, %1 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ mov lr, #0 @ 1\n\ -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ +1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1\n\ - mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ - ldr pc, [sp], #4" + mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB" : - : "I" (PAGE_SIZE / 64)); + : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "r1", "r2", "r3", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns v4wb_user_fns __initdata = { - .cpu_clear_user_page = v4wb_clear_user_page, + .cpu_clear_user_highpage = v4wb_clear_user_highpage, .cpu_copy_user_highpage = v4wb_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 86c2cfdbde03..b8a345d6e77e 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -60,29 +60,29 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, * * Same story as above. */ -void __attribute__((naked)) -v4wt_clear_user_page(void *kaddr, unsigned long vaddr) +void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); asm("\ - str lr, [sp, #-4]!\n\ - mov r1, %0 @ 1\n\ + mov r1, %1 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ mov lr, #0 @ 1\n\ -1: stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ +1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1\n\ - mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ - ldr pc, [sp], #4" + mcr p15, 0, r2, c7, c7, 0 @ flush ID cache" : - : "I" (PAGE_SIZE / 64)); + : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "r1", "r2", "r3", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns v4wt_user_fns __initdata = { - .cpu_clear_user_page = v4wt_clear_user_page, + .cpu_clear_user_highpage = v4wt_clear_user_highpage, .cpu_copy_user_highpage = v4wt_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 2ea75d0f5048..4127a7bddfe5 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -49,9 +49,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, * Clear the user page. No aliasing to deal with so we can just * attack the kernel's existing mapping of this page. */ -static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) +static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); clear_page(kaddr); + kunmap_atomic(kaddr, KM_USER0); } /* @@ -107,20 +109,13 @@ static void v6_copy_user_highpage_aliasing(struct page *to, * so remap the kernel page into the same cache colour as the user * page. */ -static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) +static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) { unsigned int offset = CACHE_COLOUR(vaddr); unsigned long to = to_address + (offset << PAGE_SHIFT); - /* - * Discard data in the kernel mapping for the new page - * FIXME: needs this MCRR to be supported. - */ - __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" - : - : "r" (kaddr), - "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES) - : "cc"); + /* FIXME: not highmem safe */ + discard_old_kernel_data(page_address(page)); /* * Now clear the page using the same cache colour as @@ -128,7 +123,7 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) */ spin_lock(&v6_lock); - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); flush_tlb_kernel_page(to); clear_page((void *)to); @@ -136,14 +131,14 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) } struct cpu_user_fns v6_user_fns __initdata = { - .cpu_clear_user_page = v6_clear_user_page_nonaliasing, + .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing, .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, }; static int __init v6_userpage_init(void) { if (cache_is_vipt_aliasing()) { - cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; + cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing; cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; } diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index caa697ccd8db..0e7cb325ca4c 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -87,26 +87,27 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, * r0 = destination * r1 = virtual user address of ultimate destination page */ -void __attribute__((naked)) -xsc3_mc_clear_user_page(void *kaddr, unsigned long vaddr) +void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); asm("\ - mov r1, %0 \n\ + mov r1, %1 \n\ mov r2, #0 \n\ mov r3, #0 \n\ -1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line\n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ +1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ subs r1, r1, #1 \n\ - bne 1b \n\ - mov pc, lr" + bne 1b" : - : "I" (PAGE_SIZE / 32)); + : "r" (kaddr), "I" (PAGE_SIZE / 32) + : "r1", "r2", "r3"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns xsc3_mc_user_fns __initdata = { - .cpu_clear_user_page = xsc3_mc_clear_user_page, + .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage, .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 01bafafce181..aa9f2ff9dce0 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -113,28 +113,30 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, /* * XScale optimised clear_user_page */ -void __attribute__((naked)) -xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr) +void +xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); asm volatile( - "mov r1, %0 \n\ + "mov r1, %1 \n\ mov r2, #0 \n\ mov r3, #0 \n\ -1: mov ip, r0 \n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ +1: mov ip, %0 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ subs r1, r1, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ - bne 1b \n\ - mov pc, lr" + bne 1b" : - : "I" (PAGE_SIZE / 32)); + : "r" (kaddr), "I" (PAGE_SIZE / 32) + : "r1", "r2", "r3", "ip"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns xscale_mc_user_fns __initdata = { - .cpu_clear_user_page = xscale_mc_clear_user_page, + .cpu_clear_user_highpage = xscale_mc_clear_user_highpage, .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index b9743e6416c4..4ad3bf291ad3 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -33,7 +33,7 @@ EXPORT_SYMBOL(cpu_cache); #ifdef CONFIG_MMU #ifndef MULTI_USER -EXPORT_SYMBOL(__cpu_clear_user_page); +EXPORT_SYMBOL(__cpu_clear_user_highpage); EXPORT_SYMBOL(__cpu_copy_user_highpage); #else EXPORT_SYMBOL(cpu_user); From 43ae286b7d4d8c4983bc263ef2e3cccc10dedb2b Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 4 Nov 2008 02:42:27 -0500 Subject: [PATCH 11/18] [ARM] fix a couple clear_user_highpage assembly constraints In all cases the kaddr is assigned an input register even though it is modified in the assembly code. Let's assign a new variable to the modified value and mark those inline asm with volatile otherwise they get optimized away because the output variable is otherwise not used. Also fix a few conversion errors in copypage-feroceon.c and copypage-v4mc.c. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/copypage-feroceon.c | 12 ++++++------ arch/arm/mm/copypage-v3.c | 10 +++++----- arch/arm/mm/copypage-v4mc.c | 8 ++++---- arch/arm/mm/copypage-v4wb.c | 10 +++++----- arch/arm/mm/copypage-v4wt.c | 10 +++++----- arch/arm/mm/copypage-xsc3.c | 10 +++++----- arch/arm/mm/copypage-xscale.c | 8 ++++---- 7 files changed, 34 insertions(+), 34 deletions(-) diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index c3651b2939c7..c3ba6a94da0c 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -81,9 +81,9 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); - asm("\ - mov r1, %1 \n\ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile ("\ + mov r1, %2 \n\ mov r2, #0 \n\ mov r3, #0 \n\ mov r4, #0 \n\ @@ -95,11 +95,11 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) 1: stmia %0, {r2-r7, ip, lr} \n\ subs r1, r1, #1 \n\ mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ + add %0, %0, #32 \n\ bne 1b \n\ mcr p15, 0, r1, c7, c10, 4 @ drain WB" - : - : "r" (kaddr), "I" (PAGE_SIZE / 32) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); kunmap_atomic(kaddr, KM_USER0); } diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index 13ce0baa6ba5..70ed96c8af8e 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -56,9 +56,9 @@ void v3_copy_user_highpage(struct page *to, struct page *from, */ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); - asm("\n\ - mov r1, %1 @ 1\n\ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile("\n\ + mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ @@ -69,8 +69,8 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1" - : - : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr, KM_USER0); } diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index a5eae503a34f..bdb5fd983b15 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -93,9 +93,9 @@ void v4_mc_copy_user_highpage(struct page *from, struct page *to, */ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); asm volatile("\ - mov r1, %0 @ 1\n\ + mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ @@ -108,8 +108,8 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1" - : - : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr, KM_USER0); } diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 9144a96037bf..3ec93dab7656 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -66,9 +66,9 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, */ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); - asm("\ - mov r1, %1 @ 1\n\ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile("\ + mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ @@ -82,8 +82,8 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) subs r1, r1, #1 @ 1\n\ bne 1b @ 1\n\ mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB" - : - : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr, KM_USER0); } diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index b8a345d6e77e..0f1188efae45 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -62,9 +62,9 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, */ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); - asm("\ - mov r1, %1 @ 1\n\ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile("\ + mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ @@ -76,8 +76,8 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) subs r1, r1, #1 @ 1\n\ bne 1b @ 1\n\ mcr p15, 0, r2, c7, c7, 0 @ flush ID cache" - : - : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr, KM_USER0); } diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 0e7cb325ca4c..39a994542cad 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -89,9 +89,9 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, */ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); - asm("\ - mov r1, %1 \n\ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile ("\ + mov r1, %2 \n\ mov r2, #0 \n\ mov r3, #0 \n\ 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ @@ -101,8 +101,8 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) strd r2, [%0], #8 \n\ subs r1, r1, #1 \n\ bne 1b" - : - : "r" (kaddr), "I" (PAGE_SIZE / 32) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3"); kunmap_atomic(kaddr, KM_USER0); } diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index aa9f2ff9dce0..d18f2397ee2d 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -116,9 +116,9 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, void xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); asm volatile( - "mov r1, %1 \n\ + "mov r1, %2 \n\ mov r2, #0 \n\ mov r3, #0 \n\ 1: mov ip, %0 \n\ @@ -130,8 +130,8 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) subs r1, r1, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ bne 1b" - : - : "r" (kaddr), "I" (PAGE_SIZE / 32) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3", "ip"); kunmap_atomic(kaddr, KM_USER0); } From 4b5f32cee0cce7b9783ced5cbeabd17aa53c51fb Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 6 Oct 2008 13:24:40 -0400 Subject: [PATCH 12/18] [ARM] rationalize memory configuration code some more Currently there are two instances of struct meminfo: one in kernel/setup.c marked __initdata, and another in mm/init.c with permanent storage. Let's keep only the later to directly populate the permanent version from arm_add_memory(). Also move common validation tests between the MMU and non-MMU cases into arm_add_memory() to remove some duplication. Protection against overflowing the membank array is also moved in there in order to cover the kernel cmdline parsing path as well. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/setup.h | 6 ++++-- arch/arm/kernel/setup.c | 37 ++++++++++++++++++++---------------- arch/arm/mm/init.c | 12 ++++++------ arch/arm/mm/mm.h | 2 +- arch/arm/mm/mmu.c | 29 +++++++++++----------------- arch/arm/mm/nommu.c | 18 ++---------------- 6 files changed, 45 insertions(+), 59 deletions(-) diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index a65413ba121d..f2cd18a0932b 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -209,9 +209,11 @@ struct meminfo { struct membank bank[NR_BANKS]; }; +extern struct meminfo meminfo; + #define for_each_nodebank(iter,mi,no) \ - for (iter = 0; iter < mi->nr_banks; iter++) \ - if (mi->bank[iter].node == no) + for (iter = 0; iter < (mi)->nr_banks; iter++) \ + if ((mi)->bank[iter].node == no) #define bank_pfn_start(bank) __phys_to_pfn((bank)->start) #define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1f1eecca7f55..d21786712c88 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -59,7 +59,7 @@ static int __init fpe_setup(char *line) __setup("fpe=", fpe_setup); #endif -extern void paging_init(struct meminfo *, struct machine_desc *desc); +extern void paging_init(struct machine_desc *desc); extern void reboot_setup(char *str); extern void _text, _etext, __data_start, _edata, _end; @@ -112,7 +112,6 @@ static struct stack stacks[NR_CPUS]; char elf_platform[ELF_PLATFORM_SIZE]; EXPORT_SYMBOL(elf_platform); -static struct meminfo meminfo __initdata = { 0, }; static const char *cpu_name; static const char *machine_name; static char __initdata command_line[COMMAND_LINE_SIZE]; @@ -367,21 +366,34 @@ static struct machine_desc * __init setup_machine(unsigned int nr) return list; } -static void __init arm_add_memory(unsigned long start, unsigned long size) +static int __init arm_add_memory(unsigned long start, unsigned long size) { - struct membank *bank; + struct membank *bank = &meminfo.bank[meminfo.nr_banks]; + + if (meminfo.nr_banks >= NR_BANKS) { + printk(KERN_CRIT "NR_BANKS too low, " + "ignoring memory at %#lx\n", start); + return -EINVAL; + } /* * Ensure that start/size are aligned to a page boundary. * Size is appropriately rounded down, start is rounded up. */ size -= start & ~PAGE_MASK; - - bank = &meminfo.bank[meminfo.nr_banks++]; - bank->start = PAGE_ALIGN(start); bank->size = size & PAGE_MASK; bank->node = PHYS_TO_NID(start); + + /* + * Check whether this memory region has non-zero size or + * invalid node number. + */ + if (bank->size == 0 || bank->node >= MAX_NUMNODES) + return -EINVAL; + + meminfo.nr_banks++; + return 0; } /* @@ -539,14 +551,7 @@ __tagtable(ATAG_CORE, parse_tag_core); static int __init parse_tag_mem32(const struct tag *tag) { - if (meminfo.nr_banks >= NR_BANKS) { - printk(KERN_WARNING - "Ignoring memory bank 0x%08x size %dKB\n", - tag->u.mem.start, tag->u.mem.size / 1024); - return -EINVAL; - } - arm_add_memory(tag->u.mem.start, tag->u.mem.size); - return 0; + return arm_add_memory(tag->u.mem.start, tag->u.mem.size); } __tagtable(ATAG_MEM, parse_tag_mem32); @@ -718,7 +723,7 @@ void __init setup_arch(char **cmdline_p) memcpy(boot_command_line, from, COMMAND_LINE_SIZE); boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; parse_cmdline(cmdline_p, from); - paging_init(&meminfo, mdesc); + paging_init(mdesc); request_standard_resources(&meminfo, mdesc); #ifdef CONFIG_SMP diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 82c4b4217989..b43da2479fa0 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -64,10 +64,11 @@ static int __init parse_tag_initrd2(const struct tag *tag) __tagtable(ATAG_INITRD2, parse_tag_initrd2); /* - * This is used to pass memory configuration data from paging_init - * to mem_init, and by show_mem() to skip holes in the memory map. + * This keeps memory configuration data used by a couple memory + * initialization functions, as well as show_mem() for the skipping + * of holes in the memory map. It is populated by arm_add_memory(). */ -static struct meminfo meminfo = { 0, }; +struct meminfo meminfo; void show_mem(void) { @@ -331,13 +332,12 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) free_area_init_node(node, zone_size, start_pfn, zhole_size); } -void __init bootmem_init(struct meminfo *mi) +void __init bootmem_init(void) { + struct meminfo *mi = &meminfo; unsigned long memend_pfn = 0; int node, initrd_node; - memcpy(&meminfo, mi, sizeof(meminfo)); - /* * Locate which node contains the ramdisk image, if any. */ diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 5d9f53907b4e..94367bdbb5a8 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -32,7 +32,7 @@ struct meminfo; struct pglist_data; void __init create_mapping(struct map_desc *md); -void __init bootmem_init(struct meminfo *mi); +void __init bootmem_init(void); void reserve_node_zero(struct pglist_data *pgdat); extern void _text, _stext, _etext, __data_start, _end, __init_begin, __init_end; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 7f36c825718d..6870805c31dd 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -653,13 +653,6 @@ __early_param("vmalloc=", early_vmalloc); static int __init check_membank_valid(struct membank *mb) { - /* - * Check whether this memory region has non-zero size or - * invalid node number. - */ - if (mb->size == 0 || mb->node >= MAX_NUMNODES) - return 0; - /* * Check whether this memory region would entirely overlap * the vmalloc area. @@ -689,18 +682,18 @@ static int __init check_membank_valid(struct membank *mb) return 1; } -static void __init sanity_check_meminfo(struct meminfo *mi) +static void __init sanity_check_meminfo(void) { int i, j; - for (i = 0, j = 0; i < mi->nr_banks; i++) { - if (check_membank_valid(&mi->bank[i])) - mi->bank[j++] = mi->bank[i]; + for (i = 0, j = 0; i < meminfo.nr_banks; i++) { + if (check_membank_valid(&meminfo.bank[i])) + meminfo.bank[j++] = meminfo.bank[i]; } - mi->nr_banks = j; + meminfo.nr_banks = j; } -static inline void prepare_page_table(struct meminfo *mi) +static inline void prepare_page_table(void) { unsigned long addr; @@ -721,7 +714,7 @@ static inline void prepare_page_table(struct meminfo *mi) * Clear out all the kernel space mappings, except for the first * memory bank, up to the end of the vmalloc region. */ - for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); + for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); addr < VMALLOC_END; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); } @@ -880,14 +873,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc) * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ -void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) +void __init paging_init(struct machine_desc *mdesc) { void *zero_page; build_mem_type_table(); - sanity_check_meminfo(mi); - prepare_page_table(mi); - bootmem_init(mi); + sanity_check_meminfo(); + prepare_page_table(); + bootmem_init(); devicemaps_init(mdesc); top_pmd = pmd_off_k(0xffff0000); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 07b62b238979..c085f4e8248b 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -41,27 +41,13 @@ void __init reserve_node_zero(pg_data_t *pgdat) BOOTMEM_DEFAULT); } -static void __init sanity_check_meminfo(struct meminfo *mi) -{ - int i, j; - - for (i = 0, j = 0; i < mi->nr_banks; i++) { - struct membank *mb = &mi->bank[i]; - - if (mb->size != 0 && mb->node < MAX_NUMNODES) - mi->bank[j++] = mi->bank[i]; - } - mi->nr_banks = j; -} - /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ -void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) +void __init paging_init(struct machine_desc *mdesc) { - sanity_check_meminfo(mi); - bootmem_init(mi); + bootmem_init(); } /* From a1bbaec0cd2a59d4bb09b72e4541a8a12e480d5d Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 2 Sep 2008 11:44:21 -0400 Subject: [PATCH 13/18] [ARM] split highmem into its own memory bank Doing so will greatly simplify the bootmem initialization code as each bank is therefore entirely lowmem or highmem with no crossing between those zones. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 84 ++++++++++++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 33 deletions(-) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6870805c31dd..ab511d94d917 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -651,44 +651,62 @@ __early_param("vmalloc=", early_vmalloc); #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) -static int __init check_membank_valid(struct membank *mb) -{ - /* - * Check whether this memory region would entirely overlap - * the vmalloc area. - */ - if (phys_to_virt(mb->start) >= VMALLOC_MIN) { - printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " - "(vmalloc region overlap).\n", - mb->start, mb->start + mb->size - 1); - return 0; - } - - /* - * Check whether this memory region would partially overlap - * the vmalloc area. - */ - if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) || - phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) { - unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start); - - printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " - "to -%.8lx (vmalloc region overlap).\n", - mb->start, mb->start + mb->size - 1, - mb->start + newsize - 1); - mb->size = newsize; - } - - return 1; -} - static void __init sanity_check_meminfo(void) { int i, j; for (i = 0, j = 0; i < meminfo.nr_banks; i++) { - if (check_membank_valid(&meminfo.bank[i])) - meminfo.bank[j++] = meminfo.bank[i]; + struct membank *bank = &meminfo.bank[j]; + *bank = meminfo.bank[i]; + +#ifdef CONFIG_HIGHMEM + /* + * Split those memory banks which are partially overlapping + * the vmalloc area greatly simplifying things later. + */ + if (__va(bank->start) < VMALLOC_MIN && + bank->size > VMALLOC_MIN - __va(bank->start)) { + if (meminfo.nr_banks >= NR_BANKS) { + printk(KERN_CRIT "NR_BANKS too low, " + "ignoring high memory\n"); + } else { + memmove(bank + 1, bank, + (meminfo.nr_banks - i) * sizeof(*bank)); + meminfo.nr_banks++; + i++; + bank[1].size -= VMALLOC_MIN - __va(bank->start); + bank[1].start = __pa(VMALLOC_MIN - 1) + 1; + j++; + } + bank->size = VMALLOC_MIN - __va(bank->start); + } +#else + /* + * Check whether this memory bank would entirely overlap + * the vmalloc area. + */ + if (__va(bank->start) >= VMALLOC_MIN) { + printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " + "(vmalloc region overlap).\n", + bank->start, bank->start + bank->size - 1); + continue; + } + + /* + * Check whether this memory bank would partially overlap + * the vmalloc area. + */ + if (__va(bank->start + bank->size) > VMALLOC_MIN || + __va(bank->start + bank->size) < __va(bank->start)) { + unsigned long newsize = VMALLOC_MIN - __va(bank->start); + printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " + "to -%.8lx (vmalloc region overlap).\n", + bank->start, bank->start + bank->size - 1, + bank->start + newsize - 1); + bank->size = newsize; + } +#endif + j++; } meminfo.nr_banks = j; } From 6db015e49c03d42247d2a985475b833635406a4f Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Wed, 17 Sep 2008 14:50:42 -0400 Subject: [PATCH 14/18] [ARM] mem_init() cleanups Make free_area() arguments pfn based, and return number of freed pages. This will simplify highmem initialization later. Also, codepages, datapages and initpages are actually codesize, datasize and initsize. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/init.c | 45 ++++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index b43da2479fa0..ab5c9abd5c34 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -394,20 +394,22 @@ void __init bootmem_init(void) max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; } -static inline void free_area(unsigned long addr, unsigned long end, char *s) +static inline int free_area(unsigned long pfn, unsigned long end, char *s) { - unsigned int size = (end - addr) >> 10; + unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); - for (; addr < end; addr += PAGE_SIZE) { - struct page *page = virt_to_page(addr); + for (; pfn < end; pfn++) { + struct page *page = pfn_to_page(pfn); ClearPageReserved(page); init_page_count(page); - free_page(addr); - totalram_pages++; + __free_page(page); + pages++; } if (size && s) printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); + + return pages; } static inline void @@ -478,13 +480,9 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) */ void __init mem_init(void) { - unsigned int codepages, datapages, initpages; + unsigned int codesize, datasize, initsize; int i, node; - codepages = &_etext - &_text; - datapages = &_end - &__data_start; - initpages = &__init_end - &__init_begin; - #ifndef CONFIG_DISCONTIGMEM max_mapnr = virt_to_page(high_memory) - mem_map; #endif @@ -501,7 +499,8 @@ void __init mem_init(void) #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ - free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL); + totalram_pages += free_area(PHYS_PFN_OFFSET, + __phys_to_pfn(__pa(swapper_pg_dir)), NULL); #endif /* @@ -509,18 +508,21 @@ void __init mem_init(void) * real number of pages we have in this system */ printk(KERN_INFO "Memory:"); - num_physpages = 0; for (i = 0; i < meminfo.nr_banks; i++) { num_physpages += bank_pfn_size(&meminfo.bank[i]); printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); } - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); + + codesize = &_etext - &_text; + datasize = &_end - &__data_start; + initsize = &__init_end - &__init_begin; + printk(KERN_NOTICE "Memory: %luKB available (%dK code, " "%dK data, %dK init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - codepages >> 10, datapages >> 10, initpages >> 10); + codesize >> 10, datasize >> 10, initsize >> 10); if (PAGE_SIZE >= 16384 && num_physpages <= 128) { extern int sysctl_overcommit_memory; @@ -535,11 +537,10 @@ void __init mem_init(void) void free_initmem(void) { - if (!machine_is_integrator() && !machine_is_cintegrator()) { - free_area((unsigned long)(&__init_begin), - (unsigned long)(&__init_end), - "init"); - } + if (!machine_is_integrator() && !machine_is_cintegrator()) + totalram_pages += free_area(__phys_to_pfn(__pa(&__init_begin)), + __phys_to_pfn(__pa(&__init_end)), + "init"); } #ifdef CONFIG_BLK_DEV_INITRD @@ -549,7 +550,9 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) - free_area(start, end, "initrd"); + totalram_pages += free_area(__phys_to_pfn(__pa(start)), + __phys_to_pfn(__pa(end)), + "initrd"); } static int __init keepinitrd_setup(char *__unused) From 9210807cb5a3f19a0e954dd401e3a2c3626d1b48 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Fri, 19 Sep 2008 10:43:06 -0400 Subject: [PATCH 15/18] [ARM] prevent the vmalloc cmdline argument from eating all memory Commit 8d5796d2ec6b5a4e7a52861144e63af438d6f8f7 allows for the vmalloc area to be resized from the kernel cmdline. Make sure it cannot overlap with RAM entirely. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ab511d94d917..636cf8fc70ef 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -646,6 +646,13 @@ static void __init early_vmalloc(char **arg) "vmalloc area too small, limiting to %luMB\n", vmalloc_reserve >> 20); } + + if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { + vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); + printk(KERN_WARNING + "vmalloc area is too big, limiting to %luMB\n", + vmalloc_reserve >> 20); + } } __early_param("vmalloc=", early_vmalloc); From 252d4c276dc0895834af48743579cf19d1fa150b Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 11 Sep 2008 11:52:02 -0400 Subject: [PATCH 16/18] [ARM] remove bogus #ifdef CONFIG_HIGHMEM in show_pte() The restriction on !CONFIG_HIGHMEM is unneeded since page tables are currently never allocated with highmem pages, and actually disable PTE dump whenever highmem is configured. Let's have a dynamic test to better describe the current limitation instead. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/fault.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 2df8d9facf57..ffd8b228a139 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -83,13 +84,14 @@ void show_pte(struct mm_struct *mm, unsigned long addr) break; } -#ifndef CONFIG_HIGHMEM /* We must not map this if we have highmem enabled */ + if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) + break; + pte = pte_offset_map(pmd, addr); printk(", *pte=%08lx", pte_val(*pte)); printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE])); pte_unmap(pte); -#endif } while(0); printk("\n"); From 75f4aa15cf05ce6d99c8261cf57dcd749877fd1c Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Fri, 5 Sep 2008 16:05:14 -0400 Subject: [PATCH 17/18] [ARM] unconditionally define __virt_to_phys and __phys_to_virt There is no machine class overriding this. If non linear translations are implemented again for some machines then this could be restored at that time. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 77764301844b..7238b3b50643 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -112,10 +112,8 @@ * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. */ -#ifndef __virt_to_phys #define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) -#endif /* * Convert a physical address to a Page Frame Number and back From b5ee9002583fc14e6d45a04c18f208987a8fbced Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Fri, 5 Sep 2008 21:53:30 -0400 Subject: [PATCH 18/18] [ARM] remove a common set of __virt_to_bus definitions Let's provide an overridable default instead of having every machine class define __virt_to_bus and __bus_to_virt to the same thing. What most platforms are using is bus_addr == phys_addr so such is the default. One exception is ebsa110 which has no DMA what so ever, so the actual definition is not important except only for proper compilation. Also added a comment about the special footbridge bus translation. Let's also remove comments alluding to set_dma_addr which is not (and should not) be commonly used. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 5 +++++ arch/arm/mach-aaec2000/include/mach/memory.h | 3 --- arch/arm/mach-at91/include/mach/memory.h | 11 ---------- arch/arm/mach-clps711x/include/mach/memory.h | 20 +------------------ arch/arm/mach-clps7500/include/mach/memory.h | 7 ------- arch/arm/mach-davinci/include/mach/memory.h | 6 ------ arch/arm/mach-ebsa110/include/mach/memory.h | 7 ------- arch/arm/mach-ep93xx/include/mach/memory.h | 4 ---- .../arm/mach-footbridge/include/mach/memory.h | 9 +++++++++ arch/arm/mach-h720x/include/mach/memory.h | 17 ---------------- arch/arm/mach-imx/include/mach/memory.h | 10 ---------- .../arm/mach-integrator/include/mach/memory.h | 9 +-------- arch/arm/mach-iop13xx/include/mach/memory.h | 16 ++------------- arch/arm/mach-iop32x/include/mach/memory.h | 11 ---------- arch/arm/mach-iop33x/include/mach/memory.h | 11 ---------- arch/arm/mach-ixp2000/include/mach/memory.h | 7 ------- arch/arm/mach-ixp23xx/include/mach/memory.h | 13 ------------ arch/arm/mach-ixp4xx/include/mach/memory.h | 12 ----------- arch/arm/mach-kirkwood/include/mach/memory.h | 4 ---- arch/arm/mach-ks8695/include/mach/memory.h | 5 ----- arch/arm/mach-l7200/include/mach/memory.h | 3 --- arch/arm/mach-lh7a40x/include/mach/memory.h | 10 ---------- arch/arm/mach-loki/include/mach/memory.h | 4 ---- arch/arm/mach-msm/include/mach/memory.h | 4 ---- arch/arm/mach-mv78xx0/include/mach/memory.h | 4 ---- arch/arm/mach-netx/include/mach/memory.h | 10 ---------- arch/arm/mach-ns9xxx/include/mach/memory.h | 3 --- arch/arm/mach-orion5x/include/mach/memory.h | 4 ---- arch/arm/mach-pnx4008/include/mach/memory.h | 3 --- arch/arm/mach-pxa/include/mach/memory.h | 10 ---------- arch/arm/mach-realview/include/mach/memory.h | 10 ---------- arch/arm/mach-rpc/include/mach/memory.h | 7 ------- arch/arm/mach-s3c2400/include/mach/memory.h | 3 --- arch/arm/mach-s3c2410/include/mach/memory.h | 3 --- arch/arm/mach-sa1100/include/mach/memory.h | 12 ----------- arch/arm/mach-shark/include/mach/memory.h | 3 --- arch/arm/mach-versatile/include/mach/memory.h | 10 ---------- arch/arm/plat-mxc/include/mach/memory.h | 13 ------------ arch/arm/plat-omap/include/mach/memory.h | 17 +++------------- 39 files changed, 21 insertions(+), 299 deletions(-) diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 7238b3b50643..0202a7c20e62 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -178,6 +178,11 @@ static inline void *phys_to_virt(unsigned long x) * memory. Use of these is *deprecated* (and that doesn't mean * use the __ prefixed forms instead.) See dma-mapping.h. */ +#ifndef __virt_to_bus +#define __virt_to_bus __virt_to_phys +#define __bus_to_virt __phys_to_virt +#endif + static inline __deprecated unsigned long virt_to_bus(void *x) { return __virt_to_bus((unsigned long)x); diff --git a/arch/arm/mach-aaec2000/include/mach/memory.h b/arch/arm/mach-aaec2000/include/mach/memory.h index 56ae900a482e..c00822543d9f 100644 --- a/arch/arm/mach-aaec2000/include/mach/memory.h +++ b/arch/arm/mach-aaec2000/include/mach/memory.h @@ -14,9 +14,6 @@ #define PHYS_OFFSET UL(0xf0000000) -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - /* * The nodes are the followings: * diff --git a/arch/arm/mach-at91/include/mach/memory.h b/arch/arm/mach-at91/include/mach/memory.h index 9dd1b8c79b08..14f4ef4b6a9e 100644 --- a/arch/arm/mach-at91/include/mach/memory.h +++ b/arch/arm/mach-at91/include/mach/memory.h @@ -25,15 +25,4 @@ #define PHYS_OFFSET (AT91_SDRAM_BASE) - -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - #endif diff --git a/arch/arm/mach-clps711x/include/mach/memory.h b/arch/arm/mach-clps711x/include/mach/memory.h index 98ec30c97bbe..e522b20bcbc2 100644 --- a/arch/arm/mach-clps711x/include/mach/memory.h +++ b/arch/arm/mach-clps711x/include/mach/memory.h @@ -26,25 +26,7 @@ */ #define PHYS_OFFSET UL(0xc0000000) -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ - -#if defined(CONFIG_ARCH_CDB89712) - -#define __virt_to_bus(x) (x) -#define __bus_to_virt(x) (x) - -#elif defined (CONFIG_ARCH_AUTCPU12) - -#define __virt_to_bus(x) (x) -#define __bus_to_virt(x) (x) - -#else +#if !defined(CONFIG_ARCH_CDB89712) && !defined (CONFIG_ARCH_AUTCPU12) #define __virt_to_bus(x) ((x) - PAGE_OFFSET) #define __bus_to_virt(x) ((x) + PAGE_OFFSET) diff --git a/arch/arm/mach-clps7500/include/mach/memory.h b/arch/arm/mach-clps7500/include/mach/memory.h index 87b32db470c8..05ea76b6a2d0 100644 --- a/arch/arm/mach-clps7500/include/mach/memory.h +++ b/arch/arm/mach-clps7500/include/mach/memory.h @@ -19,13 +19,6 @@ */ #define PHYS_OFFSET UL(0x10000000) -/* - * These are exactly the same on the RiscPC as the - * physical memory view. - */ -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - /* * Cache flushing area - ROM */ diff --git a/arch/arm/mach-davinci/include/mach/memory.h b/arch/arm/mach-davinci/include/mach/memory.h index dd1625c23cf4..3a3353357bac 100644 --- a/arch/arm/mach-davinci/include/mach/memory.h +++ b/arch/arm/mach-davinci/include/mach/memory.h @@ -55,10 +55,4 @@ __arch_adjust_zones(int node, unsigned long *size, unsigned long *holes) #endif -/* - * Bus address is physical address - */ -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - #endif /* __ASM_ARCH_MEMORY_H */ diff --git a/arch/arm/mach-ebsa110/include/mach/memory.h b/arch/arm/mach-ebsa110/include/mach/memory.h index eea4b75b657b..0ca66d080c69 100644 --- a/arch/arm/mach-ebsa110/include/mach/memory.h +++ b/arch/arm/mach-ebsa110/include/mach/memory.h @@ -21,13 +21,6 @@ */ #define PHYS_OFFSET UL(0x00000000) -/* - * We keep this 1:1 so that we don't interfere - * with the PCMCIA memory regions - */ -#define __virt_to_bus(x) (x) -#define __bus_to_virt(x) (x) - /* * Cache flushing area - SRAM */ diff --git a/arch/arm/mach-ep93xx/include/mach/memory.h b/arch/arm/mach-ep93xx/include/mach/memory.h index f1b633590752..5c80c3c8158d 100644 --- a/arch/arm/mach-ep93xx/include/mach/memory.h +++ b/arch/arm/mach-ep93xx/include/mach/memory.h @@ -7,8 +7,4 @@ #define PHYS_OFFSET UL(0x00000000) -#define __bus_to_virt(x) __phys_to_virt(x) -#define __virt_to_bus(x) __virt_to_phys(x) - - #endif diff --git a/arch/arm/mach-footbridge/include/mach/memory.h b/arch/arm/mach-footbridge/include/mach/memory.h index 6ae2f1a07ab9..cb16e59d87b6 100644 --- a/arch/arm/mach-footbridge/include/mach/memory.h +++ b/arch/arm/mach-footbridge/include/mach/memory.h @@ -30,9 +30,18 @@ extern unsigned long __virt_to_bus(unsigned long); extern unsigned long __bus_to_virt(unsigned long); #endif +#define __virt_to_bus __virt_to_bus +#define __bus_to_virt __bus_to_virt #elif defined(CONFIG_FOOTBRIDGE_HOST) +/* + * The footbridge is programmed to expose the system RAM at the corresponding + * address. So, if PAGE_OFFSET is 0xc0000000, RAM appears at 0xe0000000. + * If 0x80000000, then its exposed at 0xa0000000 on the bus. etc. + * The only requirement is that the RAM isn't placed at bus address 0 which + * would clash with VGA cards. + */ #define __virt_to_bus(x) ((x) - 0xe0000000) #define __bus_to_virt(x) ((x) + 0xe0000000) diff --git a/arch/arm/mach-h720x/include/mach/memory.h b/arch/arm/mach-h720x/include/mach/memory.h index cb26f49cc4e1..83a2fa090e88 100644 --- a/arch/arm/mach-h720x/include/mach/memory.h +++ b/arch/arm/mach-h720x/include/mach/memory.h @@ -7,23 +7,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -/* - * Page offset: - * ( 0xc0000000UL ) - */ #define PHYS_OFFSET UL(0x40000000) -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - * - * There is something to do here later !, Mar 2000, Jungjun Kim - */ - -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - #endif diff --git a/arch/arm/mach-imx/include/mach/memory.h b/arch/arm/mach-imx/include/mach/memory.h index 5c453063c0ed..a93df7cba694 100644 --- a/arch/arm/mach-imx/include/mach/memory.h +++ b/arch/arm/mach-imx/include/mach/memory.h @@ -23,14 +23,4 @@ #define PHYS_OFFSET UL(0x08000000) -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ -#define __virt_to_bus(x) (x - PAGE_OFFSET + PHYS_OFFSET) -#define __bus_to_virt(x) (x - PHYS_OFFSET + PAGE_OFFSET) - #endif diff --git a/arch/arm/mach-integrator/include/mach/memory.h b/arch/arm/mach-integrator/include/mach/memory.h index be7e63c21d25..30d41d0e7d04 100644 --- a/arch/arm/mach-integrator/include/mach/memory.h +++ b/arch/arm/mach-integrator/include/mach/memory.h @@ -24,15 +24,8 @@ * Physical DRAM offset. */ #define PHYS_OFFSET UL(0x00000000) -#define BUS_OFFSET UL(0x80000000) -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ +#define BUS_OFFSET UL(0x80000000) #define __virt_to_bus(x) (x - PAGE_OFFSET + BUS_OFFSET) #define __bus_to_virt(x) (x - BUS_OFFSET + PAGE_OFFSET) diff --git a/arch/arm/mach-iop13xx/include/mach/memory.h b/arch/arm/mach-iop13xx/include/mach/memory.h index b82602d529bf..e012bf13c955 100644 --- a/arch/arm/mach-iop13xx/include/mach/memory.h +++ b/arch/arm/mach-iop13xx/include/mach/memory.h @@ -16,18 +16,6 @@ #define IOP13XX_PMMR_P_START (IOP13XX_PMMR_PHYS_MEM_BASE) #define IOP13XX_PMMR_P_END (IOP13XX_PMMR_PHYS_MEM_BASE + IOP13XX_PMMR_SIZE) -/* - * Virtual view <-> PCI DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ - -/* RAM has 1:1 mapping on the PCIe/x Busses */ -#define __virt_to_bus(x) (__virt_to_phys(x)) -#define __bus_to_virt(x) (__phys_to_virt(x)) - static inline dma_addr_t __virt_to_lbus(unsigned long x) { return x + IOP13XX_PMMR_PHYS_MEM_BASE - IOP13XX_PMMR_VIRT_MEM_BASE; @@ -55,7 +43,7 @@ static inline unsigned long __lbus_to_virt(dma_addr_t x) if (is_lbus_device(dev) && __is_lbus_dma(__dma)) \ __virt = __lbus_to_virt(__dma); \ else \ - __virt = __bus_to_virt(__dma); \ + __virt = __phys_to_virt(__dma); \ (void *)__virt; \ }) @@ -66,7 +54,7 @@ static inline unsigned long __lbus_to_virt(dma_addr_t x) if (is_lbus_device(dev) && __is_lbus_virt(__virt)) \ __dma = __virt_to_lbus(__virt); \ else \ - __dma = __virt_to_bus(__virt); \ + __dma = __virt_to_phys(__virt); \ __dma; \ }) diff --git a/arch/arm/mach-iop32x/include/mach/memory.h b/arch/arm/mach-iop32x/include/mach/memory.h index 42cd4bf3148c..61c7bdb26a81 100644 --- a/arch/arm/mach-iop32x/include/mach/memory.h +++ b/arch/arm/mach-iop32x/include/mach/memory.h @@ -12,15 +12,4 @@ */ #define PHYS_OFFSET UL(0xa0000000) -/* - * Virtual view <-> PCI DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ -#define __virt_to_bus(x) (__virt_to_phys(x)) -#define __bus_to_virt(x) (__phys_to_virt(x)) - - #endif diff --git a/arch/arm/mach-iop33x/include/mach/memory.h b/arch/arm/mach-iop33x/include/mach/memory.h index 2cef0bbb354f..c46c6ba30186 100644 --- a/arch/arm/mach-iop33x/include/mach/memory.h +++ b/arch/arm/mach-iop33x/include/mach/memory.h @@ -12,15 +12,4 @@ */ #define PHYS_OFFSET UL(0x00000000) -/* - * Virtual view <-> PCI DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ -#define __virt_to_bus(x) (__virt_to_phys(x)) -#define __bus_to_virt(x) (__phys_to_virt(x)) - - #endif diff --git a/arch/arm/mach-ixp2000/include/mach/memory.h b/arch/arm/mach-ixp2000/include/mach/memory.h index 241529a7c52d..aee7eb8a71b2 100644 --- a/arch/arm/mach-ixp2000/include/mach/memory.h +++ b/arch/arm/mach-ixp2000/include/mach/memory.h @@ -15,13 +15,6 @@ #define PHYS_OFFSET UL(0x00000000) -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ #include #define __virt_to_bus(v) \ diff --git a/arch/arm/mach-ixp23xx/include/mach/memory.h b/arch/arm/mach-ixp23xx/include/mach/memory.h index 9d40115f7ebe..fdd138706c70 100644 --- a/arch/arm/mach-ixp23xx/include/mach/memory.h +++ b/arch/arm/mach-ixp23xx/include/mach/memory.h @@ -19,16 +19,6 @@ */ #define PHYS_OFFSET (0x00000000) - -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ -#ifndef __ASSEMBLY__ - #define __virt_to_bus(v) \ ({ unsigned int ret; \ ret = ((__virt_to_phys(v) - 0x00000000) + \ @@ -43,6 +33,3 @@ #define arch_is_coherent() 1 #endif - - -#endif diff --git a/arch/arm/mach-ixp4xx/include/mach/memory.h b/arch/arm/mach-ixp4xx/include/mach/memory.h index c4d2830ac987..2e481db0ca58 100644 --- a/arch/arm/mach-ixp4xx/include/mach/memory.h +++ b/arch/arm/mach-ixp4xx/include/mach/memory.h @@ -25,16 +25,4 @@ void ixp4xx_adjust_zones(int node, unsigned long *size, unsigned long *holes); #endif -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - * - * These are dummies for now. - */ -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - #endif diff --git a/arch/arm/mach-kirkwood/include/mach/memory.h b/arch/arm/mach-kirkwood/include/mach/memory.h index b5fb34bdccd5..45431e131465 100644 --- a/arch/arm/mach-kirkwood/include/mach/memory.h +++ b/arch/arm/mach-kirkwood/include/mach/memory.h @@ -7,8 +7,4 @@ #define PHYS_OFFSET UL(0x00000000) -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - - #endif diff --git a/arch/arm/mach-ks8695/include/mach/memory.h b/arch/arm/mach-ks8695/include/mach/memory.h index 8fbc4c76c38b..6d5887cf5742 100644 --- a/arch/arm/mach-ks8695/include/mach/memory.h +++ b/arch/arm/mach-ks8695/include/mach/memory.h @@ -37,11 +37,6 @@ extern struct bus_type platform_bus_type; (dma_addr_t)__virt_to_phys(x) : (dma_addr_t)__virt_to_bus(x); }) #define __arch_page_to_dma(dev, x) __arch_virt_to_dma(dev, page_address(x)) -#else - -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - #endif #endif diff --git a/arch/arm/mach-l7200/include/mach/memory.h b/arch/arm/mach-l7200/include/mach/memory.h index f338cf3ffd93..9fb40ed2f03b 100644 --- a/arch/arm/mach-l7200/include/mach/memory.h +++ b/arch/arm/mach-l7200/include/mach/memory.h @@ -17,9 +17,6 @@ */ #define PHYS_OFFSET UL(0xf0000000) -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - /* * Cache flushing area - ROM */ diff --git a/arch/arm/mach-lh7a40x/include/mach/memory.h b/arch/arm/mach-lh7a40x/include/mach/memory.h index 1da14ff66c93..189d20e543e7 100644 --- a/arch/arm/mach-lh7a40x/include/mach/memory.h +++ b/arch/arm/mach-lh7a40x/include/mach/memory.h @@ -19,16 +19,6 @@ */ #define PHYS_OFFSET UL(0xc0000000) -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - #ifdef CONFIG_DISCONTIGMEM /* diff --git a/arch/arm/mach-loki/include/mach/memory.h b/arch/arm/mach-loki/include/mach/memory.h index a39533ab489d..2ed7e6e732c2 100644 --- a/arch/arm/mach-loki/include/mach/memory.h +++ b/arch/arm/mach-loki/include/mach/memory.h @@ -7,8 +7,4 @@ #define PHYS_OFFSET UL(0x00000000) -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - - #endif diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h index 63fd47f2e62e..f4698baec976 100644 --- a/arch/arm/mach-msm/include/mach/memory.h +++ b/arch/arm/mach-msm/include/mach/memory.h @@ -19,9 +19,5 @@ /* physical offset of RAM */ #define PHYS_OFFSET UL(0x10000000) -/* bus address and physical addresses are identical */ -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - #endif diff --git a/arch/arm/mach-mv78xx0/include/mach/memory.h b/arch/arm/mach-mv78xx0/include/mach/memory.h index 9e47a140ff7a..e663042d307f 100644 --- a/arch/arm/mach-mv78xx0/include/mach/memory.h +++ b/arch/arm/mach-mv78xx0/include/mach/memory.h @@ -7,8 +7,4 @@ #define PHYS_OFFSET UL(0x00000000) -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - - #endif diff --git a/arch/arm/mach-netx/include/mach/memory.h b/arch/arm/mach-netx/include/mach/memory.h index 53745a1378de..9a363f297f90 100644 --- a/arch/arm/mach-netx/include/mach/memory.h +++ b/arch/arm/mach-netx/include/mach/memory.h @@ -22,15 +22,5 @@ #define PHYS_OFFSET UL(0x80000000) -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - #endif diff --git a/arch/arm/mach-ns9xxx/include/mach/memory.h b/arch/arm/mach-ns9xxx/include/mach/memory.h index 649ee6235b94..6107193adbfe 100644 --- a/arch/arm/mach-ns9xxx/include/mach/memory.h +++ b/arch/arm/mach-ns9xxx/include/mach/memory.h @@ -21,7 +21,4 @@ #define PHYS_OFFSET UL(0x00000000) -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - #endif diff --git a/arch/arm/mach-orion5x/include/mach/memory.h b/arch/arm/mach-orion5x/include/mach/memory.h index 54dd76b013f2..52a2955d0f87 100644 --- a/arch/arm/mach-orion5x/include/mach/memory.h +++ b/arch/arm/mach-orion5x/include/mach/memory.h @@ -9,8 +9,4 @@ #define PHYS_OFFSET UL(0x00000000) -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - - #endif diff --git a/arch/arm/mach-pnx4008/include/mach/memory.h b/arch/arm/mach-pnx4008/include/mach/memory.h index 5789a2d16f5a..b38d50c156c4 100644 --- a/arch/arm/mach-pnx4008/include/mach/memory.h +++ b/arch/arm/mach-pnx4008/include/mach/memory.h @@ -18,7 +18,4 @@ */ #define PHYS_OFFSET (0x80000000) -#define __virt_to_bus(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) -#define __bus_to_virt(x) ((x) + PAGE_OFFSET - PHYS_OFFSET) - #endif diff --git a/arch/arm/mach-pxa/include/mach/memory.h b/arch/arm/mach-pxa/include/mach/memory.h index 59aef89808d6..eac491c2d741 100644 --- a/arch/arm/mach-pxa/include/mach/memory.h +++ b/arch/arm/mach-pxa/include/mach/memory.h @@ -17,16 +17,6 @@ */ #define PHYS_OFFSET UL(0xa0000000) -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - /* * The nodes are matched with the physical SDRAM banks as follows: * diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h index 0e673483a141..65a0742094f7 100644 --- a/arch/arm/mach-realview/include/mach/memory.h +++ b/arch/arm/mach-realview/include/mach/memory.h @@ -25,14 +25,4 @@ */ #define PHYS_OFFSET UL(0x00000000) -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ -#define __virt_to_bus(x) ((x) - PAGE_OFFSET) -#define __bus_to_virt(x) ((x) + PAGE_OFFSET) - #endif diff --git a/arch/arm/mach-rpc/include/mach/memory.h b/arch/arm/mach-rpc/include/mach/memory.h index 9bf7e43e2863..78191bf25192 100644 --- a/arch/arm/mach-rpc/include/mach/memory.h +++ b/arch/arm/mach-rpc/include/mach/memory.h @@ -23,13 +23,6 @@ */ #define PHYS_OFFSET UL(0x10000000) -/* - * These are exactly the same on the RiscPC as the - * physical memory view. - */ -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - /* * Cache flushing area - ROM */ diff --git a/arch/arm/mach-s3c2400/include/mach/memory.h b/arch/arm/mach-s3c2400/include/mach/memory.h index 8f4878e4f591..cf5901ffd385 100644 --- a/arch/arm/mach-s3c2400/include/mach/memory.h +++ b/arch/arm/mach-s3c2400/include/mach/memory.h @@ -17,7 +17,4 @@ #define PHYS_OFFSET UL(0x0C000000) -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - #endif diff --git a/arch/arm/mach-s3c2410/include/mach/memory.h b/arch/arm/mach-s3c2410/include/mach/memory.h index 93782628a786..6f1e5871ae4b 100644 --- a/arch/arm/mach-s3c2410/include/mach/memory.h +++ b/arch/arm/mach-s3c2410/include/mach/memory.h @@ -13,7 +13,4 @@ #define PHYS_OFFSET UL(0x30000000) -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - #endif diff --git a/arch/arm/mach-sa1100/include/mach/memory.h b/arch/arm/mach-sa1100/include/mach/memory.h index 1c127b68581d..6984034f6958 100644 --- a/arch/arm/mach-sa1100/include/mach/memory.h +++ b/arch/arm/mach-sa1100/include/mach/memory.h @@ -27,18 +27,6 @@ void sa1111_adjust_zones(int node, unsigned long *size, unsigned long *holes); #endif #endif -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - * - * On the SA1100, bus addresses are equivalent to physical addresses. - */ -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - /* * Because of the wide memory address space between physical RAM banks on the * SA1100, it's much convenient to use Linux's SparseMEM support to implement diff --git a/arch/arm/mach-shark/include/mach/memory.h b/arch/arm/mach-shark/include/mach/memory.h index b7874ad9f9f6..d00c05eabd52 100644 --- a/arch/arm/mach-shark/include/mach/memory.h +++ b/arch/arm/mach-shark/include/mach/memory.h @@ -36,9 +36,6 @@ static inline void __arch_adjust_zones(int node, unsigned long *zone_size, unsig #endif -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - /* * Cache flushing area */ diff --git a/arch/arm/mach-versatile/include/mach/memory.h b/arch/arm/mach-versatile/include/mach/memory.h index b6315c0602ac..79aeab86b903 100644 --- a/arch/arm/mach-versatile/include/mach/memory.h +++ b/arch/arm/mach-versatile/include/mach/memory.h @@ -25,14 +25,4 @@ */ #define PHYS_OFFSET UL(0x00000000) -/* - * Virtual view <-> DMA view memory address translations - * virt_to_bus: Used to translate the virtual address to an - * address suitable to be passed to set_dma_addr - * bus_to_virt: Used to convert an address for DMA operations - * to an address that the kernel can use. - */ -#define __virt_to_bus(x) ((x) - PAGE_OFFSET) -#define __bus_to_virt(x) ((x) + PAGE_OFFSET) - #endif diff --git a/arch/arm/plat-mxc/include/mach/memory.h b/arch/arm/plat-mxc/include/mach/memory.h index d7a8d3ebed57..203688e6164e 100644 --- a/arch/arm/plat-mxc/include/mach/memory.h +++ b/arch/arm/plat-mxc/include/mach/memory.h @@ -13,17 +13,4 @@ #include -/* - * Virtual view <-> DMA view memory address translations - * This macro is used to translate the virtual address to an address - * suitable to be passed to set_dma_addr() - */ -#define __virt_to_bus(a) __virt_to_phys(a) - -/* - * Used to convert an address for DMA operations to an address that the - * kernel can use. - */ -#define __bus_to_virt(a) __phys_to_virt(a) - #endif /* __ASM_ARCH_MXC_MEMORY_H__ */ diff --git a/arch/arm/plat-omap/include/mach/memory.h b/arch/arm/plat-omap/include/mach/memory.h index d40cac60b959..211c9f6619e9 100644 --- a/arch/arm/plat-omap/include/mach/memory.h +++ b/arch/arm/plat-omap/include/mach/memory.h @@ -42,19 +42,8 @@ #define PHYS_OFFSET UL(0x80000000) #endif -/* - * Conversion between SDRAM and fake PCI bus, used by USB - * NOTE: Physical address must be converted to Local Bus address - * on OMAP-1510 only - */ - /* * Bus address is physical address, except for OMAP-1510 Local Bus. - */ -#define __virt_to_bus(x) __virt_to_phys(x) -#define __bus_to_virt(x) __phys_to_virt(x) - -/* * OMAP-1510 bus address is translated into a Local Bus address if the * OMAP bus type is lbus. We do the address translation based on the * device overriding the defaults used in the dma-mapping API. @@ -74,16 +63,16 @@ #define __arch_page_to_dma(dev, page) ({is_lbus_device(dev) ? \ (dma_addr_t)virt_to_lbus(page_address(page)) : \ - (dma_addr_t)__virt_to_bus(page_address(page));}) + (dma_addr_t)__virt_to_phys(page_address(page));}) #define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \ lbus_to_virt(addr) : \ - __bus_to_virt(addr)); }) + __phys_to_virt(addr)); }) #define __arch_virt_to_dma(dev, addr) ({ unsigned long __addr = (unsigned long)(addr); \ (dma_addr_t) (is_lbus_device(dev) ? \ virt_to_lbus(__addr) : \ - __virt_to_bus(__addr)); }) + __virt_to_phys(__addr)); }) #endif /* CONFIG_ARCH_OMAP15XX */