1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Conflicts:
	arch/sparc/mm/init_64.c

Conflict was simple non-overlapping additions.

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2014-08-05 18:57:18 -07:00
commit e9011d0866
173 changed files with 1478 additions and 665 deletions

View File

@ -281,6 +281,19 @@ gestures can normally be extracted from it.
If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
device.
INPUT_PROP_TOPBUTTONPAD:
-----------------------
Some laptops, most notably the Lenovo *40 series provide a trackstick
device but do not have physical buttons associated with the trackstick
device. Instead, the top area of the touchpad is marked to show
visual/haptic areas for left, middle, right buttons intended to be used
with the trackstick.
If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
accordingly. This property does not affect kernel behavior.
The kernel does not provide button emulation for such devices but treats
them as any other INPUT_PROP_BUTTONPAD device.
Guidelines:
==========
The guidelines below ensure proper single-touch and multi-finger functionality.

View File

@ -6956,6 +6956,12 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/pinctrl/pinctrl-at91.c
PIN CONTROLLER - RENESAS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-sh@vger.kernel.org
S: Maintained
F: drivers/pinctrl/sh-pfc/
PIN CONTROLLER - SAMSUNG
M: Tomasz Figa <t.figa@samsung.com>
M: Thomas Abraham <thomas.abraham@linaro.org>
@ -8019,6 +8025,16 @@ F: drivers/ata/
F: include/linux/ata.h
F: include/linux/libata.h
SERIAL ATA AHCI PLATFORM devices support
M: Hans de Goede <hdegoede@redhat.com>
M: Tejun Heo <tj@kernel.org>
L: linux-ide@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
S: Supported
F: drivers/ata/ahci_platform.c
F: drivers/ata/libahci_platform.c
F: include/linux/ahci_platform.h
SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
L: linux-scsi@vger.kernel.org

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 16
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
@ -688,6 +688,8 @@ KBUILD_CFLAGS += -fomit-frame-pointer
endif
endif
KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
ifdef CONFIG_DEBUG_INFO
KBUILD_CFLAGS += -g
KBUILD_AFLAGS += -Wa,-gdwarf-2

View File

@ -137,7 +137,7 @@ static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
dst += AES_BLOCK_SIZE;
} while (--blocks);
}
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
@ -158,7 +158,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->dec, walk.iv);
kernel_neon_end();
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
while (walk.nbytes) {
u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
@ -182,7 +182,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
dst += AES_BLOCK_SIZE;
src += AES_BLOCK_SIZE;
} while (--blocks);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
@ -268,7 +268,7 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->enc, walk.iv);
kernel_neon_end();
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
@ -292,7 +292,7 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->dec, walk.iv);
kernel_neon_end();
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}

View File

@ -106,7 +106,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, first);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
@ -128,7 +128,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, rounds, blocks, first);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
@ -151,7 +151,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
first);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
@ -174,7 +174,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, rounds, blocks, walk.iv,
first);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
return err;
@ -243,7 +243,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_enc, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();
@ -267,7 +267,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_dec, rounds, blocks,
(u8 *)ctx->key2.key_enc, walk.iv, first);
err = blkcipher_walk_done(desc, &walk, 0);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
kernel_neon_end();

View File

@ -60,6 +60,17 @@ static int __init early_initrd(char *p)
early_param("initrd", early_initrd);
#endif
/*
* Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
* currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset.
*/
static phys_addr_t max_zone_dma_phys(void)
{
phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
return min(offset + (1ULL << 32), memblock_end_of_DRAM());
}
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
struct memblock_region *reg;
@ -70,9 +81,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA)) {
unsigned long max_dma_phys =
(unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
max_dma = PFN_DOWN(max_zone_dma_phys());
zone_size[ZONE_DMA] = max_dma - min;
}
zone_size[ZONE_NORMAL] = max - max_dma;
@ -146,7 +155,7 @@ void __init arm64_memblock_init(void)
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA))
dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1;
dma_phys_limit = max_zone_dma_phys();
dma_contiguous_reserve(dma_phys_limit);
memblock_allow_resize();

View File

@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
CONFIG_SPI_BFIN_V3=y
CONFIG_SPI_ADI_V3=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set

View File

@ -145,7 +145,7 @@ SECTIONS
.text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
#else
.init.data : AT(__data_lma + __data_len)
.init.data : AT(__data_lma + __data_len + 32)
{
__sinitdata = .;
INIT_DATA

View File

@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>

View File

@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>

View File

@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>

View File

@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>

View File

@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"),
PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", NULL, "ppi0_24b"),
PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"),
@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"),
#endif
PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"),
PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", NULL, "keys_4x4"),
PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
PIN_MAP_MUX_GROUP("bf54x-keys", "4bit", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
PIN_MAP_MUX_GROUP("bf54x-keys", "8bit", "pinctrl-adi2.0", "keys_8x8grp", "keys"),
};
static int __init ezkit_init(void)

View File

@ -44,6 +44,7 @@
#include <linux/spi/flash.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <linux/i2c-pca-platform.h>
#include <linux/delay.h>

View File

@ -18,6 +18,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>

View File

@ -14,6 +14,7 @@
#include <linux/spi/spi.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>

View File

@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
{
#define CONFIG_SMC_GCTL_VAL 0x00000010
if (!devm_pinctrl_get_select_default(&pdev->dev))
return -EBUSY;
bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
bfin_write32(SMC_B0CTL, 0x01002011);
bfin_write32(SMC_B0TIM, 0x08170977);
@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
void bf609_nor_flash_exit(struct platform_device *pdev)
{
devm_pinctrl_put(pdev->dev.pins->p);
bfin_write32(SMC_GCTL, 0);
}
@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"),
PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"),
#if IS_ENABLED(CONFIG_VIDEO_MT9M114)
PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"),
#elif IS_ENABLED(CONFIG_VIDEO_VS6624)
PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"),
#else
PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"),
#endif
PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
PIN_MAP_MUX_GROUP("bfin_display.0", "8bit", "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
PIN_MAP_MUX_GROUP("bfin_display.0", "16bit", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit", "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"),

View File

@ -10,6 +10,7 @@
#define __MACH_BF609_PM_H__
#include <linux/suspend.h>
#include <linux/platform_device.h>
extern int bfin609_pm_enter(suspend_state_t state);
extern int bf609_pm_prepare(void);
@ -19,6 +20,6 @@ void bf609_hibernate(void);
void bfin_sec_raise_irq(unsigned int sid);
void coreb_enable(void);
int bf609_nor_flash_init(void);
void bf609_nor_flash_exit(void);
int bf609_nor_flash_init(struct platform_device *pdev);
void bf609_nor_flash_exit(struct platform_device *pdev);
#endif

View File

@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static int smc_pm_syscore_suspend(void)
{
bf609_nor_flash_exit();
bf609_nor_flash_exit(NULL);
return 0;
}
static void smc_pm_syscore_resume(void)
{
bf609_nor_flash_init();
bf609_nor_flash_init(NULL);
}
static struct syscore_ops smc_pm_syscore_ops = {

View File

@ -1208,8 +1208,6 @@ int __init init_arch_irq(void)
bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
/* Enable interrupts IVG7-15 */
bfin_irq_flags |= IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |

View File

@ -69,8 +69,6 @@
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192

View File

@ -728,7 +728,6 @@ static void __init pagetable_init(void)
#endif
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
memset(empty_zero_page, 0, PAGE_SIZE);
}
static void __init gateway_init(void)

View File

@ -447,6 +447,7 @@ extern const char *powerpc_base_platform;
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \

View File

@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
return rb;
}
static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
bool is_base_size)
{
int size, a_psize;
/* Look at the 8 bit LP value */
unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
continue;
a_psize = __hpte_actual_psize(lp, size);
if (a_psize != -1)
if (a_psize != -1) {
if (is_base_size)
return 1ul << mmu_psize_defs[size].shift;
return 1ul << mmu_psize_defs[a_psize].shift;
}
}
}
return 0;
}
static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
{
return __hpte_page_size(h, l, 0);
}
static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
{
return __hpte_page_size(h, l, 1);
}
static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
{
return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;

View File

@ -22,6 +22,7 @@
*/
#include <asm/pgtable-ppc64.h>
#include <asm/bug.h>
#include <asm/processor.h>
/*
* Segment table
@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);
*/
struct subpage_prot_table {
unsigned long maxaddr; /* only addresses < this are protected */
unsigned int **protptrs[2];
unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
unsigned int *low_prot[4];
};

View File

@ -277,6 +277,8 @@ n:
.globl n; \
n:
#define _GLOBAL_TOC(name) _GLOBAL(name)
#define _KPROBE(n) \
.section ".kprobes.text","a"; \
.globl n; \

View File

@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power8 DD1: Does not support doorbell IPIs */
.pvr_mask = 0xffffff00,
.pvr_value = 0x004d0100,
.cpu_name = "POWER8 (raw)",
.cpu_features = CPU_FTRS_POWER8_DD1,
.cpu_user_features = COMMON_USER_POWER8,
.cpu_user_features2 = COMMON_USER2_POWER8,
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power8",
.oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.flush_tlb = __flush_tlb_power8,
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power8 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004d0000,

View File

@ -611,17 +611,19 @@ static void rtas_flash_firmware(int reboot_type)
for (f = flist; f; f = next) {
/* Translate data addrs to absolute */
for (i = 0; i < f->num_blocks; i++) {
f->blocks[i].data = (char *)__pa(f->blocks[i].data);
f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
image_size += f->blocks[i].length;
f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
}
next = f->next;
/* Don't translate NULL pointer for last entry */
if (f->next)
f->next = (struct flash_block_list *)__pa(f->next);
f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
else
f->next = NULL;
/* make num_blocks into the version/length field */
f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
f->num_blocks = cpu_to_be64(f->num_blocks);
}
printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);

View File

@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
goto out;
}
if (!rma_setup && is_vrma_hpte(v)) {
unsigned long psize = hpte_page_size(v, r);
unsigned long psize = hpte_base_page_size(v, r);
unsigned long senc = slb_pgsize_encoding(psize);
unsigned long lpcr;

View File

@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
r = hpte[i+1];
/*
* Check the HPTE again, including large page size
* Since we don't currently allow any MPSS (mixed
* page-size segment) page sizes, it is sufficient
* to check against the actual page size.
* Check the HPTE again, including base page size
*/
if ((v & valid) && (v & mask) == val &&
hpte_page_size(v, r) == (1ul << pshift))
hpte_base_page_size(v, r) == (1ul << pshift))
/* Return with the HPTE still locked */
return (hash << 3) + (i >> 1);

View File

@ -48,7 +48,7 @@
*
* LR = return address to continue at after eventually re-enabling MMU
*/
_GLOBAL(kvmppc_hv_entry_trampoline)
_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
mflr r0
std r0, PPC_LR_STKOFF(r1)
stdu r1, -112(r1)

View File

@ -25,7 +25,11 @@
#include <asm/exception-64s.h>
#if defined(CONFIG_PPC_BOOK3S_64)
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name)
#endif
#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
#elif defined(CONFIG_PPC_BOOK3S_32)

View File

@ -36,7 +36,11 @@
#if defined(CONFIG_PPC_BOOK3S_64)
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name)
#endif
#elif defined(CONFIG_PPC_BOOK3S_32)
@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
* On entry, r4 contains the guest shadow MSR
* MSR.EE has to be 0 when calling this function
*/
_GLOBAL(kvmppc_entry_trampoline)
_GLOBAL_TOC(kvmppc_entry_trampoline)
mfmsr r5
LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
toreal(r7)

View File

@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq, server, priority;
int rc;
if (args->nargs != 3 || args->nret != 1) {
if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
rc = -3;
goto out;
}
irq = args->args[0];
server = args->args[1];
priority = args->args[2];
irq = be32_to_cpu(args->args[0]);
server = be32_to_cpu(args->args[1]);
priority = be32_to_cpu(args->args[2]);
rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
if (rc)
rc = -3;
out:
args->rets[0] = rc;
args->rets[0] = cpu_to_be32(rc);
}
static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq, server, priority;
int rc;
if (args->nargs != 1 || args->nret != 3) {
if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
rc = -3;
goto out;
}
irq = args->args[0];
irq = be32_to_cpu(args->args[0]);
server = priority = 0;
rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
goto out;
}
args->rets[1] = server;
args->rets[2] = priority;
args->rets[1] = cpu_to_be32(server);
args->rets[2] = cpu_to_be32(priority);
out:
args->rets[0] = rc;
args->rets[0] = cpu_to_be32(rc);
}
static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq;
int rc;
if (args->nargs != 1 || args->nret != 1) {
if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
rc = -3;
goto out;
}
irq = args->args[0];
irq = be32_to_cpu(args->args[0]);
rc = kvmppc_xics_int_off(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
args->rets[0] = rc;
args->rets[0] = cpu_to_be32(rc);
}
static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq;
int rc;
if (args->nargs != 1 || args->nret != 1) {
if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
rc = -3;
goto out;
}
irq = args->args[0];
irq = be32_to_cpu(args->args[0]);
rc = kvmppc_xics_int_on(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
args->rets[0] = rc;
args->rets[0] = cpu_to_be32(rc);
}
#endif /* CONFIG_KVM_XICS */
@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
return rc;
}
static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
{
#ifdef __LITTLE_ENDIAN__
int i;
args->token = be32_to_cpu(args->token);
args->nargs = be32_to_cpu(args->nargs);
args->nret = be32_to_cpu(args->nret);
for (i = 0; i < args->nargs; i++)
args->args[i] = be32_to_cpu(args->args[i]);
#endif
}
static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
{
#ifdef __LITTLE_ENDIAN__
int i;
for (i = 0; i < args->nret; i++)
args->args[i] = cpu_to_be32(args->args[i]);
args->token = cpu_to_be32(args->token);
args->nargs = cpu_to_be32(args->nargs);
args->nret = cpu_to_be32(args->nret);
#endif
}
int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
{
struct rtas_token_definition *d;
@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
if (rc)
goto fail;
kvmppc_rtas_swap_endian_in(&args);
/*
* args->rets is a pointer into args->args. Now that we've
* copied args we need to fix it up to point into our copy,
@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
* value so we can restore it on the way out.
*/
orig_rets = args.rets;
args.rets = &args.args[args.nargs];
args.rets = &args.args[be32_to_cpu(args.nargs)];
mutex_lock(&vcpu->kvm->lock);
rc = -ENOENT;
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
if (d->token == args.token) {
if (d->token == be32_to_cpu(args.token)) {
d->handler->handler(vcpu, &args);
rc = 0;
break;
@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
if (rc == 0) {
args.rets = orig_rets;
kvmppc_rtas_swap_endian_out(&args);
rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
if (rc)
goto fail;

View File

@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
if (printk_ratelimit())
pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
__func__, (long)gfn, pfn);
return -EINVAL;
ret = -EINVAL;
goto out;
}
kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);

View File

@ -77,7 +77,7 @@ _GLOBAL(memset)
stb r4,0(r6)
blr
_GLOBAL(memmove)
_GLOBAL_TOC(memmove)
cmplw 0,r3,r4
bgt backwards_memcpy
b memcpy

View File

@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = regs->gpr[rb] & 0x3f;
ival = (signed int) regs->gpr[rd];
regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = rb;
ival = (signed int) regs->gpr[rd];
regs->gpr[ra] = ival >> sh;
if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
#ifdef __powerpc64__
case 27: /* sld */
sh = regs->gpr[rd] & 0x7f;
sh = regs->gpr[rb] & 0x7f;
if (sh < 64)
regs->gpr[ra] = regs->gpr[rd] << sh;
else
@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = regs->gpr[rb] & 0x7f;
ival = (signed long int) regs->gpr[rd];
regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = rb | ((instr & 2) << 4);
ival = (signed long int) regs->gpr[rd];
regs->gpr[ra] = ival >> sh;
if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;

View File

@ -1307,6 +1307,9 @@ static void power_pmu_enable(struct pmu *pmu)
out_enable:
pmao_restore_workaround(ebb);
if (ppmu->flags & PPMU_ARCH_207S)
mtspr(SPRN_MMCR2, 0);
mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
mb();
@ -1315,9 +1318,6 @@ static void power_pmu_enable(struct pmu *pmu)
write_mmcr0(cpuhw, mmcr0);
if (ppmu->flags & PPMU_ARCH_207S)
mtspr(SPRN_MMCR2, 0);
/*
* Enable instruction sampling if necessary
*/

View File

@ -249,7 +249,7 @@ static void elog_work_fn(struct work_struct *work)
rc = opal_get_elog_size(&id, &size, &type);
if (rc != OPAL_SUCCESS) {
pr_err("ELOG: Opal log read failed\n");
pr_err("ELOG: OPAL log info read failed\n");
return;
}
@ -257,7 +257,7 @@ static void elog_work_fn(struct work_struct *work)
log_id = be64_to_cpu(id);
elog_type = be64_to_cpu(type);
BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
elog_size = OPAL_MAX_ERRLOG_SIZE;

View File

@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
}
of_node_set_flag(dn, OF_DYNAMIC);
of_node_init(dn);
return dn;
}

View File

@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
np->properties = proplist;
of_node_set_flag(np, OF_DYNAMIC);
of_node_init(np);
np->parent = derive_parent(path);
if (IS_ERR(np->parent)) {

View File

@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc)
return 0;
asm volatile(
"0: lfpc %1\n"
" la %0,0\n"
" lfpc %1\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));

View File

@ -437,11 +437,11 @@ ENTRY(startup_kdump)
#if defined(CONFIG_64BIT)
#if defined(CONFIG_MARCH_ZEC12)
.long 3, 0xc100efea, 0xf46ce800, 0x00400000
.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
#elif defined(CONFIG_MARCH_Z196)
.long 2, 0xc100efea, 0xf46c0000
.long 2, 0xc100eff2, 0xf46c0000
#elif defined(CONFIG_MARCH_Z10)
.long 2, 0xc100efea, 0xf0680000
.long 2, 0xc100eff2, 0xf0680000
#elif defined(CONFIG_MARCH_Z9_109)
.long 1, 0xc100efc2
#elif defined(CONFIG_MARCH_Z990)

View File

@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
unsigned long mask = PSW_MASK_USER;
mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
if ((data & ~mask) != PSW_USER_BITS)
if ((data ^ PSW_USER_BITS) & ~mask)
/* Invalid psw mask. */
return -EINVAL;
if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
/* Invalid address-space-control bits */
return -EINVAL;
if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
/* Invalid addressing mode bits */
return -EINVAL;
}
*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child,
mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
/* Build a 64 bit psw mask from 31 bit mask. */
if ((tmp & ~mask) != PSW32_USER_BITS)
if ((tmp ^ PSW32_USER_BITS) & ~mask)
/* Invalid psw mask. */
return -EINVAL;
if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
/* Invalid address-space-control bits */
return -EINVAL;
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(regs->psw.mask & PSW_MASK_BA) |
(__u64)(tmp & mask) << 32;

View File

@ -48,13 +48,10 @@
static LIST_HEAD(zpci_list);
static DEFINE_SPINLOCK(zpci_list_lock);
static void zpci_enable_irq(struct irq_data *data);
static void zpci_disable_irq(struct irq_data *data);
static struct irq_chip zpci_irq_chip = {
.name = "zPCI",
.irq_unmask = zpci_enable_irq,
.irq_mask = zpci_disable_irq,
.irq_unmask = unmask_msi_irq,
.irq_mask = mask_msi_irq,
};
static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
return rc;
}
static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
{
int offset, pos;
u32 mask_bits;
if (msi->msi_attrib.is_msix) {
offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL;
msi->masked = readl(msi->mask_base + offset);
writel(flag, msi->mask_base + offset);
} else if (msi->msi_attrib.maskbit) {
pos = (long) msi->mask_base;
pci_read_config_dword(msi->dev, pos, &mask_bits);
mask_bits &= ~(mask);
mask_bits |= flag & mask;
pci_write_config_dword(msi->dev, pos, mask_bits);
} else
return 0;
msi->msi_attrib.maskbit = !!flag;
return 1;
}
static void zpci_enable_irq(struct irq_data *data)
{
struct msi_desc *msi = irq_get_msi_desc(data->irq);
zpci_msi_set_mask_bits(msi, 1, 0);
}
static void zpci_disable_irq(struct irq_data *data)
{
struct msi_desc *msi = irq_get_msi_desc(data->irq);
zpci_msi_set_mask_bits(msi, 1, 1);
}
void pcibios_fixup_bus(struct pci_bus *bus)
{
}
@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
/* Release MSI interrupts */
list_for_each_entry(msi, &pdev->msi_list, list) {
zpci_msi_set_mask_bits(msi, 1, 1);
if (msi->msi_attrib.is_msix)
default_msix_mask_irq(msi, 1);
else
default_msi_mask_irq(msi, 1, 1);
irq_set_msi_desc(msi->irq, NULL);
irq_free_desc(msi->irq);
msi->msg.address_lo = 0;

View File

@ -32,7 +32,8 @@ endif
cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
$(call cc-option,-m2a-nofpu,)
$(call cc-option,-m2a-nofpu,) \
$(call cc-option,-m4-nofpu,)
cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,)
cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
$(call cc-option,-mno-implicit-fp,-m4-nofpu)

View File

@ -68,6 +68,9 @@ all: zImage
image zImage uImage tftpboot.img vmlinux.aout: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
install:
$(Q)$(MAKE) $(build)=$(boot) $@
archclean:
$(Q)$(MAKE) $(clean)=$(boot)

View File

@ -69,3 +69,7 @@ $(obj)/image: vmlinux FORCE
$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback System.map $(ROOT_IMG) FORCE
$(call if_changed,elftoaout)
$(call if_changed,piggy)
install:
sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/zImage \
System.map "$(INSTALL_PATH)"

View File

@ -0,0 +1,50 @@
#!/bin/sh
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1995 by Linus Torvalds
#
# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
#
# "make install" script for SPARC architecture
#
# Arguments:
# $1 - kernel version
# $2 - kernel image file
# $3 - kernel map file
# $4 - default install path (blank if root directory)
#
verify () {
if [ ! -f "$1" ]; then
echo "" 1>&2
echo " *** Missing file: $1" 1>&2
echo ' *** You need to run "make" before "make install".' 1>&2
echo "" 1>&2
exit 1
fi
}
# Make sure the files actually exist
verify "$2"
verify "$3"
# User may have a custom install script
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
# Default install - same as make zlilo
if [ -f $4/vmlinuz ]; then
mv $4/vmlinuz $4/vmlinuz.old
fi
if [ -f $4/System.map ]; then
mv $4/System.map $4/System.old
fi
cat $2 > $4/vmlinuz
cp $3 $4/System.map

View File

@ -34,6 +34,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
{
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
void flush_tlb_pending(void);
@ -48,11 +50,6 @@ void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifndef CONFIG_SMP
#define flush_tlb_kernel_range(start,end) \
do { flush_tsb_kernel_range(start,end); \
__flush_tlb_kernel_range(start,end); \
} while (0)
static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
{
__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
@ -63,11 +60,6 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad
void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
#define flush_tlb_kernel_range(start, end) \
do { flush_tsb_kernel_range(start,end); \
smp_flush_tlb_kernel_range(start, end); \
} while (0)
#define global_flush_tlb_page(mm, vaddr) \
smp_flush_tlb_page(mm, vaddr)

View File

@ -410,8 +410,9 @@
#define __NR_finit_module 342
#define __NR_sched_setattr 343
#define __NR_sched_getattr 344
#define __NR_renameat2 345
#define NR_syscalls 345
#define NR_syscalls 346
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001

View File

@ -1336,7 +1336,7 @@ int ldc_connect(struct ldc_channel *lp)
if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
!(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
lp->hs_state != LDC_HS_OPEN)
err = -EINVAL;
err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
else
err = start_handshake(lp);

View File

@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)
SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
.globl sys32_mmap2
sys32_mmap2:

View File

@ -86,3 +86,4 @@ sys_call_table:
/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
/*345*/ .long sys_renameat2

View File

@ -87,6 +87,7 @@ sys_call_table32:
/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys32_renameat2
#endif /* CONFIG_COMPAT */
@ -165,3 +166,4 @@ sys_call_table:
/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys_renameat2

View File

@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
case 0: fsr = *pfsr;
if (IR == -1) IR = 2;
/* fcc is always fcc0 */
fsr &= ~0xc00; fsr |= (IR << 10); break;
fsr &= ~0xc00; fsr |= (IR << 10);
*pfsr = fsr;
break;
case 1: rd->s = IR; break;

View File

@ -352,6 +352,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
mm = vma->vm_mm;
/* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
if (!pte_accessible(mm, pte))
return;
spin_lock_irqsave(&mm->context.lock, flags);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
@ -2620,6 +2624,10 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pte = pmd_val(entry);
/* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
if (!(pte & _PAGE_VALID))
return;
/* We are fabricating 8MB pages using 4MB real hw pages. */
pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
@ -2764,3 +2772,27 @@ static int __init report_memory(void)
return 0;
}
device_initcall(report_memory);
#ifdef CONFIG_SMP
#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
#else
#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
#endif
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
if (start < LOW_OBP_ADDRESS) {
flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
}
if (end > HI_OBP_ADDRESS) {
flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
}
} else {
flush_tsb_kernel_range(start, end);
do_flush_tlb_kernel_range(start, end);
}
}
>>>>>>> c78f77e20d2ba5d4d5e478e85a6fb42556893e2d

View File

@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
*/
detect_extended_topology(c);
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
/*
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
* detection.
*/
c->x86_max_cores = intel_num_cpu_cores(c);
#ifdef CONFIG_X86_32
detect_ht(c);
#endif
}
l2 = init_intel_cacheinfo(c);
if (c->cpuid_level > 9) {
unsigned eax = cpuid_eax(10);
@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_P3);
#endif
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
/*
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
* detection.
*/
c->x86_max_cores = intel_num_cpu_cores(c);
#ifdef CONFIG_X86_32
detect_ht(c);
#endif
}
/* Work around errata */
srat_detect_node(c);

View File

@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
#endif
}
#ifdef CONFIG_X86_HT
/*
* If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
* turns means that the only possibility is SMT (as indicated in
* cpuid1). Since cpuid2 doesn't specify shared caches, and we know
* that SMT shares all caches, we can unconditionally set cpu_llc_id to
* c->phys_proc_id.
*/
if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
#endif
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
return l2;

View File

@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void)
for_each_online_cpu(i) {
err = mce_device_create(i);
if (err) {
/*
* Register notifier anyway (and do not unreg it) so
* that we don't leave undeleted timers, see notifier
* callback above.
*/
__register_hotcpu_notifier(&mce_cpu_notifier);
cpu_notifier_register_done();
goto err_device_create;
}
@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void)
err_register:
unregister_syscore_ops(&mce_syscore_ops);
cpu_notifier_register_begin();
__unregister_hotcpu_notifier(&mce_cpu_notifier);
cpu_notifier_register_done();
err_device_create:
/*
* We didn't keep track of which devices were created above, but

View File

@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
continue;
if (event->attr.config1 & ~er->valid_mask)
return -EINVAL;
/* Check if the extra msrs can be safely accessed*/
if (!er->extra_msr_access)
return -ENXIO;
reg->idx = er->idx;
reg->config = event->attr.config1;

View File

@ -295,14 +295,16 @@ struct extra_reg {
u64 config_mask;
u64 valid_mask;
int idx; /* per_xxx->regs[] reg index */
bool extra_msr_access;
};
#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
.event = (e), \
.msr = (ms), \
.config_mask = (m), \
.valid_mask = (vm), \
.idx = EXTRA_REG_##i, \
.event = (e), \
.msr = (ms), \
.config_mask = (m), \
.valid_mask = (vm), \
.idx = EXTRA_REG_##i, \
.extra_msr_access = true, \
}
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \

View File

@ -2182,6 +2182,41 @@ static void intel_snb_check_microcode(void)
}
}
/*
* Under certain circumstances, access certain MSR may cause #GP.
* The function tests if the input MSR can be safely accessed.
*/
static bool check_msr(unsigned long msr, u64 mask)
{
u64 val_old, val_new, val_tmp;
/*
* Read the current value, change it and read it back to see if it
* matches, this is needed to detect certain hardware emulators
* (qemu/kvm) that don't trap on the MSR access and always return 0s.
*/
if (rdmsrl_safe(msr, &val_old))
return false;
/*
* Only change the bits which can be updated by wrmsrl.
*/
val_tmp = val_old ^ mask;
if (wrmsrl_safe(msr, val_tmp) ||
rdmsrl_safe(msr, &val_new))
return false;
if (val_new != val_tmp)
return false;
/* Here it's sure that the MSR can be safely accessed.
* Restore the old value and return.
*/
wrmsrl(msr, val_old);
return true;
}
static __init void intel_sandybridge_quirk(void)
{
x86_pmu.check_microcode = intel_snb_check_microcode;
@ -2271,7 +2306,8 @@ __init int intel_pmu_init(void)
union cpuid10_ebx ebx;
struct event_constraint *c;
unsigned int unused;
int version;
struct extra_reg *er;
int version, i;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) {
@ -2474,6 +2510,9 @@ __init int intel_pmu_init(void)
case 62: /* IvyBridge EP */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
/* dTLB-load-misses on IVB is different than SNB */
hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
@ -2574,6 +2613,34 @@ __init int intel_pmu_init(void)
}
}
/*
* Access LBR MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support LBR MSR
* Check all LBT MSR here.
* Disable LBR access if any LBR MSRs can not be accessed.
*/
if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
x86_pmu.lbr_nr = 0;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
x86_pmu.lbr_nr = 0;
}
/*
* Access extra MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support offcore event
* Check all extra_regs here.
*/
if (x86_pmu.extra_regs) {
for (er = x86_pmu.extra_regs; er->msr; er++) {
er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
/* Disable LBR select mapping */
if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
x86_pmu.lbr_sel_map = NULL;
}
}
/* Support full width counters using alternative MSR range */
if (x86_pmu.intel_cap.full_width_write) {
x86_pmu.max_period = x86_pmu.cntval_mask;

View File

@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu)
if (!x86_pmu.bts)
return 0;
buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node);
if (unlikely(!buffer))
buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
if (unlikely(!buffer)) {
WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
return -ENOMEM;
}
max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
thresh = max / 16;

View File

@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),

View File

@ -425,8 +425,8 @@ sysenter_do_call:
cmpl $(NR_syscalls), %eax
jae sysenter_badsys
call *sys_call_table(,%eax,4)
movl %eax,PT_EAX(%esp)
sysenter_after_call:
movl %eax,PT_EAX(%esp)
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
@ -502,6 +502,7 @@ ENTRY(system_call)
jae syscall_badsys
syscall_call:
call *sys_call_table(,%eax,4)
syscall_after_call:
movl %eax,PT_EAX(%esp) # store the return value
syscall_exit:
LOCKDEP_SYS_EXIT
@ -675,12 +676,12 @@ syscall_fault:
END(syscall_fault)
syscall_badsys:
movl $-ENOSYS,PT_EAX(%esp)
jmp syscall_exit
movl $-ENOSYS,%eax
jmp syscall_after_call
END(syscall_badsys)
sysenter_badsys:
movl $-ENOSYS,PT_EAX(%esp)
movl $-ENOSYS,%eax
jmp sysenter_after_call
END(syscall_badsys)
CFI_ENDPROC

View File

@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
struct kprobe *p;
struct kprobe_ctlblk *kcb;
if (user_mode_vm(regs))
return 0;
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
/*
* We don't want to be preempted for the entire

View File

@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
kvm_x86_ops->set_nmi(vcpu);
}
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
/*
* Because interrupts can be injected asynchronously, we are
* calling check_nested_events again here to avoid a race condition.
* See https://lkml.org/lkml/2014/7/2/60 for discussion about this
* proposal and current concerns. Perhaps we should be setting
* KVM_REQ_EVENT only on certain events and not unconditionally?
*/
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
if (r != 0)
return r;
}
if (kvm_x86_ops->interrupt_allowed(vcpu)) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
false);

View File

@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
beqz a2, 1f # if at start of vector, don't restore
addi a0, a0, -128
bbsi a0, 8, 1f # don't restore except for overflow 8 and 12
bbsi a0, 7, 2f
bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
/*
* This fixup handler is for the extremely unlikely case where the
* overflow handler's reference thru a0 gets a hardware TLB refill
* that bumps out the (distinct, aliasing) TLB entry that mapped its
* prior references thru a9/a13, and where our reference now thru
* a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
*/
movi a2, window_overflow_restore_a0_fixup
s32i a2, a3, EXC_TABLE_FIXUP
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a3, excsave1
bbsi.l a0, 7, 2f
/*
* Restore a0 as saved by _WindowOverflow8().
*
* FIXME: we really need a fixup handler for this L32E,
* for the extremely unlikely case where the overflow handler's
* reference thru a0 gets a hardware TLB refill that bumps out
* the (distinct, aliasing) TLB entry that mapped its prior
* references thru a9, and where our reference now thru a9
* gets a 2nd-level miss exception (not hardware TLB refill).
*/
l32e a2, a9, -16
wsr a2, depc # replace the saved a0
j 1f
l32e a0, a9, -16
wsr a0, depc # replace the saved a0
j 3f
2:
/*
* Restore a0 as saved by _WindowOverflow12().
*
* FIXME: we really need a fixup handler for this L32E,
* for the extremely unlikely case where the overflow handler's
* reference thru a0 gets a hardware TLB refill that bumps out
* the (distinct, aliasing) TLB entry that mapped its prior
* references thru a13, and where our reference now thru a13
* gets a 2nd-level miss exception (not hardware TLB refill).
*/
l32e a2, a13, -16
wsr a2, depc # replace the saved a0
l32e a0, a13, -16
wsr a0, depc # replace the saved a0
3:
xsr a3, excsave1
movi a0, 0
s32i a0, a3, EXC_TABLE_FIXUP
s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
1:
/*
* Restore WindowBase while leaving all address registers restored.
@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
s32i a0, a2, PT_DEPC
_DoubleExceptionVector_handle_exception:
addx4 a0, a0, a3
l32i a0, a0, EXC_TABLE_FAST_USER
xsr a3, excsave1
@ -464,10 +469,119 @@ _DoubleExceptionVector_WindowOverflow:
rotw -3
j 1b
.end literal_prefix
ENDPROC(_DoubleExceptionVector)
/*
* Fixup handler for TLB miss in double exception handler for window owerflow.
* We get here with windowbase set to the window that was being spilled and
* a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
* (bit set) window.
*
* We do the following here:
* - go to the original window retaining a0 value;
* - set up exception stack to return back to appropriate a0 restore code
* (we'll need to rotate window back and there's no place to save this
* information, use different return address for that);
* - handle the exception;
* - go to the window that was being spilled;
* - set up window_overflow_restore_a0_fixup as a fixup routine;
* - reload a0;
* - restore the original window;
* - reset the default fixup routine;
* - return to user. By the time we get to this fixup handler all information
* about the conditions of the original double exception that happened in
* the window overflow handler is lost, so we just return to userspace to
* retry overflow from start.
*
* a0: value of depc, original value in depc
* a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
* a3: exctable, original value in excsave1
*/
ENTRY(window_overflow_restore_a0_fixup)
rsr a0, ps
extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
rsr a2, windowbase
sub a0, a2, a0
extui a0, a0, 0, 3
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a3, excsave1
_beqi a0, 1, .Lhandle_1
_beqi a0, 3, .Lhandle_3
.macro overflow_fixup_handle_exception_pane n
rsr a0, depc
rotw -\n
xsr a3, excsave1
wsr a2, depc
l32i a2, a3, EXC_TABLE_KSTK
s32i a0, a2, PT_AREG0
movi a0, .Lrestore_\n
s32i a0, a2, PT_DEPC
rsr a0, exccause
j _DoubleExceptionVector_handle_exception
.endm
overflow_fixup_handle_exception_pane 2
.Lhandle_1:
overflow_fixup_handle_exception_pane 1
.Lhandle_3:
overflow_fixup_handle_exception_pane 3
.macro overflow_fixup_restore_a0_pane n
rotw \n
/* Need to preserve a0 value here to be able to handle exception
* that may occur on a0 reload from stack. It may occur because
* TLB miss handler may not be atomic and pointer to page table
* may be lost before we get here. There are no free registers,
* so we need to use EXC_TABLE_DOUBLE_SAVE area.
*/
xsr a3, excsave1
s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
movi a2, window_overflow_restore_a0_fixup
s32i a2, a3, EXC_TABLE_FIXUP
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a3, excsave1
bbsi.l a0, 7, 1f
l32e a0, a9, -16
j 2f
1:
l32e a0, a13, -16
2:
rotw -\n
.endm
.Lrestore_2:
overflow_fixup_restore_a0_pane 2
.Lset_default_fixup:
xsr a3, excsave1
s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
movi a2, 0
s32i a2, a3, EXC_TABLE_FIXUP
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a3, excsave1
rfe
.Lrestore_1:
overflow_fixup_restore_a0_pane 1
j .Lset_default_fixup
.Lrestore_3:
overflow_fixup_restore_a0_pane 3
j .Lset_default_fixup
ENDPROC(window_overflow_restore_a0_fixup)
.end literal_prefix
/*
* Debug interrupt vector
*

View File

@ -269,13 +269,13 @@ SECTIONS
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal,
DOUBLEEXC_VECTOR_VADDR - 16,
DOUBLEEXC_VECTOR_VADDR - 40,
SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
DOUBLEEXC_VECTOR_VADDR,
32,
40,
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;

View File

@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
return -EINVAL;
}
if (it && start - it->start < bank_sz) {
if (it && start - it->start <= bank_sz) {
if (start == it->start) {
if (end - it->start < bank_sz) {
it->start = end;

View File

@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
/*
* @q could be exiting and already have destroyed all blkgs as
* indicated by NULL root_blkg. If so, don't confuse policies.
*/
if (!q->root_blkg)
return;
blk_throtl_drain(q);
}

View File

@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
EXPORT_SYMBOL(blk_queue_find_tag);
/**
* __blk_free_tags - release a given set of tag maintenance info
* blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
* Tries to free the specified @bqt. Returns true if it was
* actually freed and false if there are still references using it
* Drop the reference count on @bqt and frees it when the last reference
* is dropped.
*/
static int __blk_free_tags(struct blk_queue_tag *bqt)
void blk_free_tags(struct blk_queue_tag *bqt)
{
int retval;
retval = atomic_dec_and_test(&bqt->refcnt);
if (retval) {
if (atomic_dec_and_test(&bqt->refcnt)) {
BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
bqt->max_depth);
@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
kfree(bqt);
}
return retval;
}
EXPORT_SYMBOL(blk_free_tags);
/**
* __blk_queue_free_tags - release tag maintenance info
@ -69,27 +65,12 @@ void __blk_queue_free_tags(struct request_queue *q)
if (!bqt)
return;
__blk_free_tags(bqt);
blk_free_tags(bqt);
q->queue_tags = NULL;
queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
}
/**
* blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
* For externally managed @bqt frees the map. Callers of this
* function must guarantee to have released all the queues that
* might have been using this tag map.
*/
void blk_free_tags(struct blk_queue_tag *bqt)
{
if (unlikely(!__blk_free_tags(bqt)))
BUG();
}
EXPORT_SYMBOL(blk_free_tags);
/**
* blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device

View File

@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKROSET:
case BLKDISCARD:
case BLKSECDISCARD:
case BLKZEROOUT:
/*
* the ones below are implemented in blkdev_locked_ioctl,
* but we call blkdev_ioctl, which gets the lock for us

View File

@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
/* Asmedia */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */

View File

@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
* ata_qc_new - Request an available ATA command, for queueing
* @ap: target port
*
* Some ATA host controllers may implement a queue depth which is less
* than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
* the hardware limitation.
*
* LOCKING:
* None.
*/
@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
{
struct ata_queued_cmd *qc = NULL;
unsigned int max_queue = ap->host->n_tags;
unsigned int i, tag;
/* no command while frozen */
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
return NULL;
for (i = 0; i < ATA_MAX_QUEUE; i++) {
tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
tag = tag < max_queue ? tag : 0;
/* the last tag is reserved for internal command. */
if (tag == ATA_TAG_INTERNAL)
@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
{
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
host->n_tags = ATA_MAX_QUEUE - 1;
host->dev = dev;
host->ops = ops;
}
@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
{
int i, rc;
host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
/* host must have been started */
if (!(host->flags & ATA_HOST_STARTED)) {
dev_err(host->dev, "BUG: trying to register unstarted host\n");

View File

@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
case ATA_DEV_ATA:
if (err & ATA_ICRC)
qc->err_mask |= AC_ERR_ATA_BUS;
if (err & ATA_UNC)
if (err & (ATA_UNC | ATA_AMNF))
qc->err_mask |= AC_ERR_MEDIA;
if (err & ATA_IDNF)
qc->err_mask |= AC_ERR_INVALID;
@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link)
}
if (cmd->command != ATA_CMD_PACKET &&
(res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
ATA_ABORTED)))
ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
(res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
ATA_IDNF | ATA_ABORTED)))
ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
res->feature & ATA_ICRC ? "ICRC " : "",
res->feature & ATA_UNC ? "UNC " : "",
res->feature & ATA_AMNF ? "AMNF " : "",
res->feature & ATA_IDNF ? "IDNF " : "",
res->feature & ATA_ABORTED ? "ABRT " : "");
#endif

View File

@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
struct ep93xx_pata_data *drv_data;
struct ata_host *host;
struct ata_port *ap;
unsigned int irq;
int irq;
struct resource *mem_res;
void __iomem *ide_base;
int err;

View File

@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
struct task_struct *opa;
kref_get(&connection->kref);
/* We may just have force_sig()'ed this thread
* to get it out of some blocking network function.
* Clear signals; otherwise kthread_run(), which internally uses
* wait_on_completion_killable(), will mistake our pending signal
* for a new fatal signal and fail. */
flush_signals(current);
opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
if (IS_ERR(opa)) {
drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");

View File

@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
if (reset_capacity) {
if (reset_capacity)
set_capacity(zram->disk, 0);
revalidate_disk(zram->disk);
}
up_write(&zram->init_lock);
/*
* Revalidate disk out of the init_lock to avoid lockdep splat.
* It's okay because disk's capacity is protected by init_lock
* so that revalidate_disk always sees up-to-date capacity.
*/
if (reset_capacity)
revalidate_disk(zram->disk);
}
static ssize_t disksize_store(struct device *dev,
@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
revalidate_disk(zram->disk);
up_write(&zram->init_lock);
/*
* Revalidate disk out of the init_lock to avoid lockdep splat.
* It's okay because disk's capacity is protected by init_lock
* so that revalidate_disk always sees up-to-date capacity.
*/
revalidate_disk(zram->disk);
return len;
out_destroy_comp:

View File

@ -336,10 +336,10 @@ static const struct {
QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
0},
QUIRK_NO_MSI},
{PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},

View File

@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
.map = gpio_rcar_irq_domain_map,
.xlate = irq_domain_xlate_twocell,
};
struct gpio_rcar_info {

View File

@ -1616,22 +1616,6 @@ out:
return ret;
}
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
struct i915_vma *vma;
/*
* Only the global gtt is relevant for gtt memory mappings, so restrict
* list traversal to objects bound into the global address space. Note
* that the active list should be empty, but better safe than sorry.
*/
WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
i915_gem_release_mmap(vma->obj);
list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
i915_gem_release_mmap(vma->obj);
}
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
obj->fault_mappable = false;
}
void
i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
i915_gem_release_mmap(obj);
}
uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{

View File

@ -31,7 +31,7 @@
struct i915_render_state {
struct drm_i915_gem_object *obj;
unsigned long ggtt_offset;
void *batch;
u32 *batch;
u32 size;
u32 len;
};
@ -80,7 +80,7 @@ free:
static void render_state_free(struct i915_render_state *so)
{
kunmap(so->batch);
kunmap(kmap_to_page(so->batch));
i915_gem_object_ggtt_unpin(so->obj);
drm_gem_object_unreference(&so->obj->base);
kfree(so);

View File

@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *signaller;
u32 seqno, ctl;
u32 seqno;
ring->hangcheck.deadlock++;
@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
return -1;
/* cursory check for an unkickable deadlock */
ctl = I915_READ_CTL(signaller);
if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
return -1;
if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
return 1;
if (signaller->hangcheck.deadlock)
/* cursory check for an unkickable deadlock */
if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
semaphore_passed(signaller) < 0)
return -1;
return 0;

View File

@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 8) {
@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}

View File

@ -4756,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}

View File

@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}

View File

@ -449,6 +449,7 @@ struct radeon_bo_va {
/* protected by vm mutex */
struct list_head vm_list;
struct list_head vm_status;
/* constant after initialization */
struct radeon_vm *vm;
@ -867,6 +868,9 @@ struct radeon_vm {
struct list_head va;
unsigned id;
/* BOs freed, but not yet updated in the PT */
struct list_head freed;
/* contains the page directory */
struct radeon_bo *page_directory;
uint64_t pd_gpu_addr;
@ -875,6 +879,8 @@ struct radeon_vm {
/* array of page tables, one for each page directory entry */
struct radeon_vm_pt *page_tables;
struct radeon_bo_va *ib_bo_va;
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
@ -2832,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_update_page_directory(struct radeon_device *rdev,
struct radeon_vm *vm);
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
@ -2847,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
uint64_t offset,
uint32_t flags);
int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va);
void radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va);
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);

View File

@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
struct radeon_vm *vm)
{
struct radeon_device *rdev = p->rdev;
struct radeon_bo_va *bo_va;
int i, r;
r = radeon_vm_update_page_directory(rdev, vm);
if (r)
return r;
r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
r = radeon_vm_clear_freed(rdev, vm);
if (r)
return r;
if (vm->ib_bo_va == NULL) {
DRM_ERROR("Tmp BO not in VM!\n");
return -EINVAL;
}
r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
&rdev->ring_tmp_bo.bo->tbo.mem);
if (r)
return r;
@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
continue;
bo = p->relocs[i].robj;
r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
return -EINVAL;
}
r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r)
return r;
}

View File

@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
if (!radeon_check_pot_argument(radeon_vm_size)) {
dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
radeon_vm_size);
radeon_vm_size = 4096;
radeon_vm_size = 4;
}
if (radeon_vm_size < 4) {
dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
if (radeon_vm_size < 1) {
dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
radeon_vm_size);
radeon_vm_size = 4096;
radeon_vm_size = 4;
}
/*
* Max GPUVM size for Cayman, SI and CI are 40 bits.
*/
if (radeon_vm_size > 1024*1024) {
dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
if (radeon_vm_size > 1024) {
dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
radeon_vm_size);
radeon_vm_size = 4096;
radeon_vm_size = 4;
}
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
if (radeon_vm_block_size < 9) {
dev_warn(rdev->dev, "VM page table size (%d) to small\n",
dev_warn(rdev->dev, "VM page table size (%d) too small\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
if (radeon_vm_block_size > 24 ||
radeon_vm_size < (1ull << radeon_vm_block_size)) {
dev_warn(rdev->dev, "VM page table size (%d) to large\n",
(radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
dev_warn(rdev->dev, "VM page table size (%d) too large\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Adjust VM size here.
* Max GPUVM size for cayman+ is 40 bits.
*/
rdev->vm_manager.max_pfn = radeon_vm_size << 8;
rdev->vm_manager.max_pfn = radeon_vm_size << 18;
/* Set asic functions */
r = radeon_asic_init(rdev);

View File

@ -173,7 +173,7 @@ int radeon_dpm = -1;
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
int radeon_hard_reset = 0;
int radeon_vm_size = 4096;
int radeon_vm_size = 4;
int radeon_vm_block_size = 9;
int radeon_deep_color = 0;
@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
module_param_named(hard_reset, radeon_hard_reset, int, 0444);
MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
module_param_named(vm_size, radeon_vm_size, int, 0444);
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");

View File

@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
struct radeon_bo_va *bo_va;
struct radeon_vm *vm;
int r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
r = radeon_vm_init(rdev, &fpriv->vm);
vm = &fpriv->vm;
r = radeon_vm_init(rdev, vm);
if (r) {
kfree(fpriv);
return r;
@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) {
radeon_vm_fini(rdev, &fpriv->vm);
radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
/* map the ib pool buffer read only into
* virtual address space */
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
rdev->ring_tmp_bo.bo);
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
rdev->ring_tmp_bo.bo);
r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
radeon_vm_fini(rdev, &fpriv->vm);
radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
struct radeon_fpriv *fpriv = file_priv->driver_priv;
struct radeon_bo_va *bo_va;
struct radeon_vm *vm = &fpriv->vm;
int r;
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
bo_va = radeon_vm_bo_find(&fpriv->vm,
rdev->ring_tmp_bo.bo);
if (bo_va)
radeon_vm_bo_rmv(rdev, bo_va);
if (vm->ib_bo_va)
radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
}
}
radeon_vm_fini(rdev, &fpriv->vm);
radeon_vm_fini(rdev, vm);
kfree(fpriv);
file_priv->driver_priv = NULL;
}

View File

@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->vm_list);
INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
list_add(&bo_va->vm_list, &vm->va);
@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
head = &tmp->vm_list;
}
if (bo_va->soffset) {
/* add a clone of the bo_va to clear the old address */
tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
if (!tmp) {
mutex_unlock(&vm->mutex);
return -ENOMEM;
}
tmp->soffset = bo_va->soffset;
tmp->eoffset = bo_va->eoffset;
tmp->vm = vm;
list_add(&tmp->vm_status, &vm->freed);
}
bo_va->soffset = soffset;
bo_va->eoffset = eoffset;
bo_va->flags = flags;
@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
* Object have to be reserved and mutex must be locked!
*/
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem)
{
struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib;
struct radeon_bo_va *bo_va;
unsigned nptes, ndw;
uint64_t addr;
int r;
bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
return -EINVAL;
}
if (!bo_va->soffset) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
bo, vm);
bo_va->bo, vm);
return -EINVAL;
}
@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
trace_radeon_vm_bo_update(bo_va);
nptes = radeon_bo_ngpu_pages(bo);
nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
/* padding, etc. */
ndw = 64;
@ -910,6 +918,34 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
return 0;
}
/**
* radeon_vm_clear_freed - clear freed BOs in the PT
*
* @rdev: radeon_device pointer
* @vm: requested vm
*
* Make sure all freed BOs are cleared in the PT.
* Returns 0 for success.
*
* PTs have to be reserved and mutex must be locked!
*/
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va, *tmp;
int r;
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
list_del(&bo_va->vm_status);
r = radeon_vm_bo_update(rdev, bo_va, NULL);
kfree(bo_va);
if (r)
return r;
}
return 0;
}
/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
@ -917,27 +953,27 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
* @bo_va: requested bo_va
*
* Remove @bo_va->bo from the requested vm (cayman+).
* Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
* remove the ptes for @bo_va in the page table.
* Returns 0 for success.
*
* Object have to be reserved!
*/
int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va)
void radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va)
{
int r = 0;
struct radeon_vm *vm = bo_va->vm;
mutex_lock(&bo_va->vm->mutex);
if (bo_va->soffset)
r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
list_del(&bo_va->vm_list);
mutex_unlock(&bo_va->vm->mutex);
list_del(&bo_va->bo_list);
kfree(bo_va);
return r;
mutex_lock(&vm->mutex);
list_del(&bo_va->vm_list);
if (bo_va->soffset) {
bo_va->bo = NULL;
list_add(&bo_va->vm_status, &vm->freed);
} else {
kfree(bo_va);
}
mutex_unlock(&vm->mutex);
}
/**
@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
int r;
vm->id = 0;
vm->ib_bo_va = NULL;
vm->fence = NULL;
vm->last_flush = NULL;
vm->last_id_use = NULL;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->va);
INIT_LIST_HEAD(&vm->freed);
pd_size = radeon_vm_directory_size(rdev);
pd_entries = radeon_vm_num_pdes(rdev);
@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
kfree(bo_va);
}
}
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
kfree(bo_va);
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_bo_unref(&vm->page_tables[i].bo);

View File

@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}

View File

@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
/* There are stability issues reported on latops with
* bapm installed when switching between AC and battery
* power. At the same time, some desktop boards hang
* if it's not enabled and dpm is enabled.
/* There are stability issues reported on with
* bapm enabled when switching between AC and battery
* power. At the same time, some MSI boards hang
* if it's not enabled and dpm is enabled. Just enable
* it for MSI boards right now.
*/
if (rdev->flags & RADEON_IS_MOBILITY)
pi->enable_bapm = false;
else
if (rdev->pdev->subsystem_vendor == 0x1462)
pi->enable_bapm = true;
else
pi->enable_bapm = false;
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;

View File

@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
*/
static inline s8 TEMP_TO_REG(int val)
{
return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
}
static inline int TEMP_FROM_REG(s8 val)
@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
err = kstrtoul(buf, 10, &val);
if (err)
return err;
if (val > 255)
return -EINVAL;
data->vrm = val;
return count;

View File

@ -416,6 +416,7 @@ config BLK_DEV_CY82C693
config BLK_DEV_CS5520
tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
depends on X86_32 || COMPILE_TEST
select BLK_DEV_IDEDMA_PCI
help
Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@ -426,6 +427,7 @@ config BLK_DEV_CS5520
config BLK_DEV_CS5530
tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
depends on X86_32 || COMPILE_TEST
select BLK_DEV_IDEDMA_PCI
help
Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
@ -435,7 +437,7 @@ config BLK_DEV_CS5530
config BLK_DEV_CS5535
tristate "AMD CS5535 chipset support"
depends on X86 && !X86_64
depends on X86_32
select BLK_DEV_IDEDMA_PCI
help
Include support for UDMA on the NSC/AMD CS5535 companion chipset.
@ -486,6 +488,7 @@ config BLK_DEV_JMICRON
config BLK_DEV_SC1200
tristate "National SCx200 chipset support"
depends on X86_32 || COMPILE_TEST
select BLK_DEV_IDEDMA_PCI
help
This driver adds support for the on-board IDE controller on the

View File

@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif)
if (irq_handler == NULL)
irq_handler = ide_intr;
if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
goto out_up;
if (!host->get_lock)
if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
goto out_up;
#if !defined(__mc68000__)
printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif)
ide_proc_unregister_port(hwif);
free_irq(hwif->irq, hwif);
if (!hwif->host->get_lock)
free_irq(hwif->irq, hwif);
device_unregister(hwif->portdev);
device_unregister(&hwif->gendev);

View File

@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
}
static int input_get_disposition(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
unsigned int type, unsigned int code, int *pval)
{
int disposition = INPUT_IGNORE_EVENT;
int value = *pval;
switch (type) {
@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
break;
}
*pval = value;
return disposition;
}
@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
{
int disposition;
disposition = input_get_disposition(dev, type, code, value);
disposition = input_get_disposition(dev, type, code, &value);
if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
dev->event(dev, type, code, value);

Some files were not shown because too many files have changed in this diff Show More