1
0
Fork 0

Merge branch 'sched/urgent' into sched/core, to merge fixes before applying new changes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Ingo Molnar 2014-07-28 10:03:00 +02:00
commit ca5bc6cd5d
390 changed files with 3594 additions and 2047 deletions

View File

@ -60,12 +60,6 @@ If the driver needs to perform more complex initialization like getting and
configuring GPIOs it can get its ACPI handle and extract this information
from ACPI tables.
Currently the kernel is not able to automatically determine from which ACPI
device it should make the corresponding platform device so we need to add
the ACPI device explicitly to acpi_platform_device_ids list defined in
drivers/acpi/acpi_platform.c. This limitation is only for the platform
devices, SPI and I2C devices are created automatically as described below.
DMA support
~~~~~~~~~~~
DMA controllers enumerated via ACPI should be registered in the system to

View File

@ -8,10 +8,12 @@ Both required and optional properties listed below must be defined
under node /cpus/cpu@0.
Required properties:
- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt
for details
- None
Optional properties:
- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt for
details. OPPs *must* be supplied either via DT, i.e. this property, or
populated at runtime.
- clock-latency: Specify the possible maximum transition latency for clock,
in unit of nanoseconds.
- voltage-tolerance: Specify the CPU voltage tolerance in percentage.

View File

@ -281,6 +281,19 @@ gestures can normally be extracted from it.
If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
device.
INPUT_PROP_TOPBUTTONPAD:
-----------------------
Some laptops, most notably the Lenovo *40 series provide a trackstick
device but do not have physical buttons associated with the trackstick
device. Instead, the top area of the touchpad is marked to show
visual/haptic areas for left, middle, right buttons intended to be used
with the trackstick.
If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
accordingly. This property does not affect kernel behavior.
The kernel does not provide button emulation for such devices but treats
them as any other INPUT_PROP_BUTTONPAD device.
Guidelines:
==========
The guidelines below ensure proper single-touch and multi-finger functionality.

View File

@ -2790,6 +2790,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
leaf rcu_node structure. Useful for very large
systems.
rcutree.jiffies_till_sched_qs= [KNL]
Set required age in jiffies for a
given grace period before RCU starts
soliciting quiescent-state help from
rcu_note_context_switch().
rcutree.jiffies_till_first_fqs= [KNL]
Set delay from grace-period initialization to
first attempt to force quiescent states.
@ -3526,7 +3532,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the allocated input device; If set to 0, video driver
will only send out the event without touching backlight
brightness level.
default: 0
default: 1
virtio_mmio.device=
[VMMIO] Memory mapped virtio (platform) device.

View File

@ -156,7 +156,6 @@ F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER
M: Realtek linux nic maintainers <nic_swsd@realtek.com>
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/realtek/r8169.c
@ -4511,8 +4510,7 @@ S: Supported
F: drivers/idle/i7300_idle.c
IEEE 802.15.4 SUBSYSTEM
M: Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
M: Alexander Aring <alex.aring@gmail.com>
L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
W: http://apps.sourceforge.net/trac/linux-zigbee
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
@ -6958,6 +6956,12 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/pinctrl/pinctrl-at91.c
PIN CONTROLLER - RENESAS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-sh@vger.kernel.org
S: Maintained
F: drivers/pinctrl/sh-pfc/
PIN CONTROLLER - SAMSUNG
M: Tomasz Figa <t.figa@samsung.com>
M: Thomas Abraham <thomas.abraham@linaro.org>
@ -8021,6 +8025,16 @@ F: drivers/ata/
F: include/linux/ata.h
F: include/linux/libata.h
SERIAL ATA AHCI PLATFORM devices support
M: Hans de Goede <hdegoede@redhat.com>
M: Tejun Heo <tj@kernel.org>
L: linux-ide@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
S: Supported
F: drivers/ata/ahci_platform.c
F: drivers/ata/libahci_platform.c
F: include/linux/ahci_platform.h
SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
L: linux-scsi@vger.kernel.org

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 16
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc7
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
@ -688,6 +688,8 @@ KBUILD_CFLAGS += -fomit-frame-pointer
endif
endif
KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
ifdef CONFIG_DEBUG_INFO
KBUILD_CFLAGS += -g
KBUILD_AFLAGS += -Wa,-gdwarf-2

View File

@ -6,6 +6,7 @@ config ARM
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_IPC_PARSE_VERSION

View File

@ -925,7 +925,7 @@
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00500000 0x00100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
<&uhpck>;
clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
status = "disabled";

View File

@ -1124,6 +1124,7 @@
compatible = "atmel,at91sam9rl-pwm";
reg = <0xf8034000 0x300>;
interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>;
clocks = <&pwm_clk>;
#pwm-cells = <3>;
status = "disabled";
};
@ -1155,8 +1156,7 @@
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00600000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
<&uhpck>;
clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
status = "disabled";
};

View File

@ -275,7 +275,7 @@ void store_cpu_topology(unsigned int cpuid)
cpu_topology[cpuid].socket_id, mpidr);
}
static inline const int cpu_corepower_flags(void)
static inline int cpu_corepower_flags(void)
{
return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
}

View File

@ -40,15 +40,17 @@ static inline void cpu_leave_lowpower(void)
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
{
u32 mpidr = cpu_logical_map(cpu);
u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
for (;;) {
/* make cpu1 to be turned off at next WFI command */
if (cpu == 1)
exynos_cpu_power_down(cpu);
/* Turn the CPU off on next WFI instruction. */
exynos_cpu_power_down(core_id);
wfi();
if (pen_release == cpu_logical_map(cpu)) {
if (pen_release == core_id) {
/*
* OK, proper wakeup, we're done
*/

View File

@ -90,7 +90,8 @@ static void exynos_secondary_init(unsigned int cpu)
static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
unsigned long phys_cpu = cpu_logical_map(cpu);
u32 mpidr = cpu_logical_map(cpu);
u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
int ret = -ENOSYS;
/*
@ -104,17 +105,18 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
* the holding pen - release it, then wait for it to flag
* that it has been released by resetting pen_release.
*
* Note that "pen_release" is the hardware CPU ID, whereas
* Note that "pen_release" is the hardware CPU core ID, whereas
* "cpu" is Linux's internal ID.
*/
write_pen_release(phys_cpu);
write_pen_release(core_id);
if (!exynos_cpu_power_state(cpu)) {
exynos_cpu_power_up(cpu);
if (!exynos_cpu_power_state(core_id)) {
exynos_cpu_power_up(core_id);
timeout = 10;
/* wait max 10 ms until cpu1 is on */
while (exynos_cpu_power_state(cpu) != S5P_CORE_LOCAL_PWR_EN) {
while (exynos_cpu_power_state(core_id)
!= S5P_CORE_LOCAL_PWR_EN) {
if (timeout-- == 0)
break;
@ -145,20 +147,20 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Try to set boot address using firmware first
* and fall back to boot register if it fails.
*/
ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr);
ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
if (ret && ret != -ENOSYS)
goto fail;
if (ret == -ENOSYS) {
void __iomem *boot_reg = cpu_boot_reg(phys_cpu);
void __iomem *boot_reg = cpu_boot_reg(core_id);
if (IS_ERR(boot_reg)) {
ret = PTR_ERR(boot_reg);
goto fail;
}
__raw_writel(boot_addr, cpu_boot_reg(phys_cpu));
__raw_writel(boot_addr, cpu_boot_reg(core_id));
}
call_firmware_op(cpu_boot, phys_cpu);
call_firmware_op(cpu_boot, core_id);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
@ -227,22 +229,24 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
* boot register if it fails.
*/
for (i = 1; i < max_cpus; ++i) {
unsigned long phys_cpu;
unsigned long boot_addr;
u32 mpidr;
u32 core_id;
int ret;
phys_cpu = cpu_logical_map(i);
mpidr = cpu_logical_map(i);
core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
boot_addr = virt_to_phys(exynos4_secondary_startup);
ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr);
ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
if (ret && ret != -ENOSYS)
break;
if (ret == -ENOSYS) {
void __iomem *boot_reg = cpu_boot_reg(phys_cpu);
void __iomem *boot_reg = cpu_boot_reg(core_id);
if (IS_ERR(boot_reg))
break;
__raw_writel(boot_addr, cpu_boot_reg(phys_cpu));
__raw_writel(boot_addr, cpu_boot_reg(core_id));
}
}
}

View File

@ -70,7 +70,7 @@ static const char *cko_sels[] = { "cko1", "cko2", };
static const char *lvds_sels[] = {
"dummy", "dummy", "dummy", "dummy", "dummy", "dummy",
"pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
"pcie_ref", "sata_ref",
"pcie_ref_125m", "sata_ref_100m",
};
enum mx6q_clks {
@ -491,7 +491,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
/* All existing boards with PCIe use LVDS1 */
if (IS_ENABLED(CONFIG_PCI_IMX6))
clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
clk_set_parent(clk[lvds1_sel], clk[sata_ref_100m]);
/* Set initial power mode */
imx6q_set_lpm(WAIT_CLOCKED);

View File

@ -292,6 +292,10 @@ static struct notifier_block mvebu_hwcc_nb = {
.notifier_call = mvebu_hwcc_notifier,
};
static struct notifier_block mvebu_hwcc_pci_nb = {
.notifier_call = mvebu_hwcc_notifier,
};
static void __init armada_370_coherency_init(struct device_node *np)
{
struct resource res;
@ -427,7 +431,7 @@ static int __init coherency_pci_init(void)
{
if (coherency_available())
bus_register_notifier(&pci_bus_type,
&mvebu_hwcc_nb);
&mvebu_hwcc_pci_nb);
return 0;
}

View File

@ -15,6 +15,8 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
__CPUINIT
#define CPU_RESUME_ADDR_REG 0xf10182d4
@ -22,13 +24,18 @@
.global armada_375_smp_cpu1_enable_code_end
armada_375_smp_cpu1_enable_code_start:
ldr r0, [pc, #4]
ARM_BE8(setend be)
adr r0, 1f
ldr r0, [r0]
ldr r1, [r0]
ARM_BE8(rev r1, r1)
mov pc, r1
1:
.word CPU_RESUME_ADDR_REG
armada_375_smp_cpu1_enable_code_end:
ENTRY(mvebu_cortex_a9_secondary_startup)
ARM_BE8(setend be)
bl v7_invalidate_l1
b secondary_startup
ENDPROC(mvebu_cortex_a9_secondary_startup)

View File

@ -201,12 +201,12 @@ static noinline int do_armada_370_xp_cpu_suspend(unsigned long deepidle)
/* Test the CR_C bit and set it if it was cleared */
asm volatile(
"mrc p15, 0, %0, c1, c0, 0 \n\t"
"tst %0, #(1 << 2) \n\t"
"orreq %0, %0, #(1 << 2) \n\t"
"mcreq p15, 0, %0, c1, c0, 0 \n\t"
"mrc p15, 0, r0, c1, c0, 0 \n\t"
"tst r0, #(1 << 2) \n\t"
"orreq r0, r0, #(1 << 2) \n\t"
"mcreq p15, 0, r0, c1, c0, 0 \n\t"
"isb "
: : "r" (0));
: : : "r0");
pr_warn("Failed to suspend the system\n");

View File

@ -4,6 +4,7 @@ config ARM64
select ARCH_HAS_OPP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS

View File

@ -12,8 +12,6 @@
#include <linux/efi.h>
#include <linux/libfdt.h>
#include <asm/sections.h>
#include <generated/compile.h>
#include <generated/utsrelease.h>
/*
* AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from

View File

@ -60,6 +60,17 @@ static int __init early_initrd(char *p)
early_param("initrd", early_initrd);
#endif
/*
* Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
* currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset.
*/
static phys_addr_t max_zone_dma_phys(void)
{
phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
return min(offset + (1ULL << 32), memblock_end_of_DRAM());
}
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
struct memblock_region *reg;
@ -70,9 +81,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA)) {
unsigned long max_dma_phys =
(unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
max_dma = PFN_DOWN(max_zone_dma_phys());
zone_size[ZONE_DMA] = max_dma - min;
}
zone_size[ZONE_NORMAL] = max - max_dma;
@ -146,7 +155,7 @@ void __init arm64_memblock_init(void)
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA))
dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1;
dma_phys_limit = max_zone_dma_phys();
dma_contiguous_reserve(dma_phys_limit);
memblock_allow_resize();

View File

@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
CONFIG_SPI_BFIN_V3=y
CONFIG_SPI_ADI_V3=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set

View File

@ -145,7 +145,7 @@ SECTIONS
.text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
#else
.init.data : AT(__data_lma + __data_len)
.init.data : AT(__data_lma + __data_len + 32)
{
__sinitdata = .;
INIT_DATA

View File

@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>

View File

@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>

View File

@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>

View File

@ -21,6 +21,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>

View File

@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"),
PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", NULL, "ppi0_24b"),
PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"),
@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"),
#endif
PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"),
PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", NULL, "keys_4x4"),
PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
PIN_MAP_MUX_GROUP("bf54x-keys", "4bit", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
PIN_MAP_MUX_GROUP("bf54x-keys", "8bit", "pinctrl-adi2.0", "keys_8x8grp", "keys"),
};
static int __init ezkit_init(void)

View File

@ -44,6 +44,7 @@
#include <linux/spi/flash.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <linux/i2c-pca-platform.h>
#include <linux/delay.h>

View File

@ -18,6 +18,7 @@
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>

View File

@ -14,6 +14,7 @@
#include <linux/spi/spi.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>

View File

@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
{
#define CONFIG_SMC_GCTL_VAL 0x00000010
if (!devm_pinctrl_get_select_default(&pdev->dev))
return -EBUSY;
bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
bfin_write32(SMC_B0CTL, 0x01002011);
bfin_write32(SMC_B0TIM, 0x08170977);
@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
void bf609_nor_flash_exit(struct platform_device *pdev)
{
devm_pinctrl_put(pdev->dev.pins->p);
bfin_write32(SMC_GCTL, 0);
}
@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"),
PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"),
#if IS_ENABLED(CONFIG_VIDEO_MT9M114)
PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"),
#elif IS_ENABLED(CONFIG_VIDEO_VS6624)
PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"),
#else
PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"),
#endif
PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
PIN_MAP_MUX_GROUP("bfin_display.0", "8bit", "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
PIN_MAP_MUX_GROUP("bfin_display.0", "16bit", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit", "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"),

View File

@ -10,6 +10,7 @@
#define __MACH_BF609_PM_H__
#include <linux/suspend.h>
#include <linux/platform_device.h>
extern int bfin609_pm_enter(suspend_state_t state);
extern int bf609_pm_prepare(void);
@ -19,6 +20,6 @@ void bf609_hibernate(void);
void bfin_sec_raise_irq(unsigned int sid);
void coreb_enable(void);
int bf609_nor_flash_init(void);
void bf609_nor_flash_exit(void);
int bf609_nor_flash_init(struct platform_device *pdev);
void bf609_nor_flash_exit(struct platform_device *pdev);
#endif

View File

@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static int smc_pm_syscore_suspend(void)
{
bf609_nor_flash_exit();
bf609_nor_flash_exit(NULL);
return 0;
}
static void smc_pm_syscore_resume(void)
{
bf609_nor_flash_init();
bf609_nor_flash_init(NULL);
}
static struct syscore_ops smc_pm_syscore_ops = {

View File

@ -1208,8 +1208,6 @@ int __init init_arch_irq(void)
bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
/* Enable interrupts IVG7-15 */
bfin_irq_flags |= IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |

View File

@ -69,8 +69,6 @@
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192

View File

@ -728,7 +728,6 @@ static void __init pagetable_init(void)
#endif
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
memset(empty_zero_page, 0, PAGE_SIZE);
}
static void __init gateway_init(void)

View File

@ -145,6 +145,7 @@ config PPC
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN

View File

@ -447,6 +447,7 @@ extern const char *powerpc_base_platform;
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \

View File

@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
return rb;
}
static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
bool is_base_size)
{
int size, a_psize;
/* Look at the 8 bit LP value */
unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
continue;
a_psize = __hpte_actual_psize(lp, size);
if (a_psize != -1)
if (a_psize != -1) {
if (is_base_size)
return 1ul << mmu_psize_defs[size].shift;
return 1ul << mmu_psize_defs[a_psize].shift;
}
}
}
return 0;
}
static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
{
return __hpte_page_size(h, l, 0);
}
static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
{
return __hpte_page_size(h, l, 1);
}
static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
{
return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;

View File

@ -22,6 +22,7 @@
*/
#include <asm/pgtable-ppc64.h>
#include <asm/bug.h>
#include <asm/processor.h>
/*
* Segment table
@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);
*/
struct subpage_prot_table {
unsigned long maxaddr; /* only addresses < this are protected */
unsigned int **protptrs[2];
unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
unsigned int *low_prot[4];
};

View File

@ -277,6 +277,8 @@ n:
.globl n; \
n:
#define _GLOBAL_TOC(name) _GLOBAL(name)
#define _KPROBE(n) \
.section ".kprobes.text","a"; \
.globl n; \

View File

@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power8 DD1: Does not support doorbell IPIs */
.pvr_mask = 0xffffff00,
.pvr_value = 0x004d0100,
.cpu_name = "POWER8 (raw)",
.cpu_features = CPU_FTRS_POWER8_DD1,
.cpu_user_features = COMMON_USER_POWER8,
.cpu_user_features2 = COMMON_USER2_POWER8,
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power8",
.oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.flush_tlb = __flush_tlb_power8,
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power8 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004d0000,

View File

@ -747,7 +747,7 @@ int setup_profiling_timer(unsigned int multiplier)
#ifdef CONFIG_SCHED_SMT
/* cpumask of CPUs with asymetric SMT dependancy */
static const int powerpc_smt_flags(void)
static int powerpc_smt_flags(void)
{
int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;

View File

@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
goto out;
}
if (!rma_setup && is_vrma_hpte(v)) {
unsigned long psize = hpte_page_size(v, r);
unsigned long psize = hpte_base_page_size(v, r);
unsigned long senc = slb_pgsize_encoding(psize);
unsigned long lpcr;

View File

@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
r = hpte[i+1];
/*
* Check the HPTE again, including large page size
* Since we don't currently allow any MPSS (mixed
* page-size segment) page sizes, it is sufficient
* to check against the actual page size.
* Check the HPTE again, including base page size
*/
if ((v & valid) && (v & mask) == val &&
hpte_page_size(v, r) == (1ul << pshift))
hpte_base_page_size(v, r) == (1ul << pshift))
/* Return with the HPTE still locked */
return (hash << 3) + (i >> 1);

View File

@ -48,7 +48,7 @@
*
* LR = return address to continue at after eventually re-enabling MMU
*/
_GLOBAL(kvmppc_hv_entry_trampoline)
_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
mflr r0
std r0, PPC_LR_STKOFF(r1)
stdu r1, -112(r1)

View File

@ -25,7 +25,11 @@
#include <asm/exception-64s.h>
#if defined(CONFIG_PPC_BOOK3S_64)
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name)
#endif
#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
#elif defined(CONFIG_PPC_BOOK3S_32)

View File

@ -36,7 +36,11 @@
#if defined(CONFIG_PPC_BOOK3S_64)
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name)
#endif
#elif defined(CONFIG_PPC_BOOK3S_32)
@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
* On entry, r4 contains the guest shadow MSR
* MSR.EE has to be 0 when calling this function
*/
_GLOBAL(kvmppc_entry_trampoline)
_GLOBAL_TOC(kvmppc_entry_trampoline)
mfmsr r5
LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
toreal(r7)

View File

@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq, server, priority;
int rc;
if (args->nargs != 3 || args->nret != 1) {
if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
rc = -3;
goto out;
}
irq = args->args[0];
server = args->args[1];
priority = args->args[2];
irq = be32_to_cpu(args->args[0]);
server = be32_to_cpu(args->args[1]);
priority = be32_to_cpu(args->args[2]);
rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
if (rc)
rc = -3;
out:
args->rets[0] = rc;
args->rets[0] = cpu_to_be32(rc);
}
static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq, server, priority;
int rc;
if (args->nargs != 1 || args->nret != 3) {
if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
rc = -3;
goto out;
}
irq = args->args[0];
irq = be32_to_cpu(args->args[0]);
server = priority = 0;
rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
goto out;
}
args->rets[1] = server;
args->rets[2] = priority;
args->rets[1] = cpu_to_be32(server);
args->rets[2] = cpu_to_be32(priority);
out:
args->rets[0] = rc;
args->rets[0] = cpu_to_be32(rc);
}
static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq;
int rc;
if (args->nargs != 1 || args->nret != 1) {
if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
rc = -3;
goto out;
}
irq = args->args[0];
irq = be32_to_cpu(args->args[0]);
rc = kvmppc_xics_int_off(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
args->rets[0] = rc;
args->rets[0] = cpu_to_be32(rc);
}
static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq;
int rc;
if (args->nargs != 1 || args->nret != 1) {
if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
rc = -3;
goto out;
}
irq = args->args[0];
irq = be32_to_cpu(args->args[0]);
rc = kvmppc_xics_int_on(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
args->rets[0] = rc;
args->rets[0] = cpu_to_be32(rc);
}
#endif /* CONFIG_KVM_XICS */
@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
return rc;
}
static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
{
#ifdef __LITTLE_ENDIAN__
int i;
args->token = be32_to_cpu(args->token);
args->nargs = be32_to_cpu(args->nargs);
args->nret = be32_to_cpu(args->nret);
for (i = 0; i < args->nargs; i++)
args->args[i] = be32_to_cpu(args->args[i]);
#endif
}
static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
{
#ifdef __LITTLE_ENDIAN__
int i;
for (i = 0; i < args->nret; i++)
args->args[i] = cpu_to_be32(args->args[i]);
args->token = cpu_to_be32(args->token);
args->nargs = cpu_to_be32(args->nargs);
args->nret = cpu_to_be32(args->nret);
#endif
}
int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
{
struct rtas_token_definition *d;
@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
if (rc)
goto fail;
kvmppc_rtas_swap_endian_in(&args);
/*
* args->rets is a pointer into args->args. Now that we've
* copied args we need to fix it up to point into our copy,
@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
* value so we can restore it on the way out.
*/
orig_rets = args.rets;
args.rets = &args.args[args.nargs];
args.rets = &args.args[be32_to_cpu(args.nargs)];
mutex_lock(&vcpu->kvm->lock);
rc = -ENOENT;
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
if (d->token == args.token) {
if (d->token == be32_to_cpu(args.token)) {
d->handler->handler(vcpu, &args);
rc = 0;
break;
@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
if (rc == 0) {
args.rets = orig_rets;
kvmppc_rtas_swap_endian_out(&args);
rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
if (rc)
goto fail;

View File

@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
if (printk_ratelimit())
pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
__func__, (long)gfn, pfn);
return -EINVAL;
ret = -EINVAL;
goto out;
}
kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);

View File

@ -77,7 +77,7 @@ _GLOBAL(memset)
stb r4,0(r6)
blr
_GLOBAL(memmove)
_GLOBAL_TOC(memmove)
cmplw 0,r3,r4
bgt backwards_memcpy
b memcpy

View File

@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = regs->gpr[rb] & 0x3f;
ival = (signed int) regs->gpr[rd];
regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = rb;
ival = (signed int) regs->gpr[rd];
regs->gpr[ra] = ival >> sh;
if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
#ifdef __powerpc64__
case 27: /* sld */
sh = regs->gpr[rd] & 0x7f;
sh = regs->gpr[rb] & 0x7f;
if (sh < 64)
regs->gpr[ra] = regs->gpr[rd] << sh;
else
@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = regs->gpr[rb] & 0x7f;
ival = (signed long int) regs->gpr[rd];
regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;
@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
sh = rb | ((instr & 2) << 4);
ival = (signed long int) regs->gpr[rd];
regs->gpr[ra] = ival >> sh;
if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
regs->xer |= XER_CA;
else
regs->xer &= ~XER_CA;

View File

@ -390,12 +390,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
case BPF_ANC | SKF_AD_VLAN_TAG:
case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
vlan_tci));
if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
else
if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
} else {
PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
PPC_SRWI(r_A, r_A, 12);
}
break;
case BPF_ANC | SKF_AD_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,

View File

@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
}
of_node_set_flag(dn, OF_DYNAMIC);
of_node_init(dn);
return dn;
}

View File

@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
np->properties = proplist;
of_node_set_flag(np, OF_DYNAMIC);
of_node_init(np);
np->parent = derive_parent(path);
if (IS_ERR(np->parent)) {

View File

@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc)
return 0;
asm volatile(
"0: lfpc %1\n"
" la %0,0\n"
" lfpc %1\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));

View File

@ -437,11 +437,11 @@ ENTRY(startup_kdump)
#if defined(CONFIG_64BIT)
#if defined(CONFIG_MARCH_ZEC12)
.long 3, 0xc100efea, 0xf46ce800, 0x00400000
.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
#elif defined(CONFIG_MARCH_Z196)
.long 2, 0xc100efea, 0xf46c0000
.long 2, 0xc100eff2, 0xf46c0000
#elif defined(CONFIG_MARCH_Z10)
.long 2, 0xc100efea, 0xf0680000
.long 2, 0xc100eff2, 0xf0680000
#elif defined(CONFIG_MARCH_Z9_109)
.long 1, 0xc100efc2
#elif defined(CONFIG_MARCH_Z990)

View File

@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
unsigned long mask = PSW_MASK_USER;
mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
if ((data & ~mask) != PSW_USER_BITS)
if ((data ^ PSW_USER_BITS) & ~mask)
/* Invalid psw mask. */
return -EINVAL;
if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
/* Invalid address-space-control bits */
return -EINVAL;
if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
/* Invalid addressing mode bits */
return -EINVAL;
}
*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child,
mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
/* Build a 64 bit psw mask from 31 bit mask. */
if ((tmp & ~mask) != PSW32_USER_BITS)
if ((tmp ^ PSW32_USER_BITS) & ~mask)
/* Invalid psw mask. */
return -EINVAL;
if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
/* Invalid address-space-control bits */
return -EINVAL;
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(regs->psw.mask & PSW_MASK_BA) |
(__u64)(tmp & mask) << 32;

View File

@ -48,13 +48,10 @@
static LIST_HEAD(zpci_list);
static DEFINE_SPINLOCK(zpci_list_lock);
static void zpci_enable_irq(struct irq_data *data);
static void zpci_disable_irq(struct irq_data *data);
static struct irq_chip zpci_irq_chip = {
.name = "zPCI",
.irq_unmask = zpci_enable_irq,
.irq_mask = zpci_disable_irq,
.irq_unmask = unmask_msi_irq,
.irq_mask = mask_msi_irq,
};
static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
return rc;
}
static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
{
int offset, pos;
u32 mask_bits;
if (msi->msi_attrib.is_msix) {
offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL;
msi->masked = readl(msi->mask_base + offset);
writel(flag, msi->mask_base + offset);
} else if (msi->msi_attrib.maskbit) {
pos = (long) msi->mask_base;
pci_read_config_dword(msi->dev, pos, &mask_bits);
mask_bits &= ~(mask);
mask_bits |= flag & mask;
pci_write_config_dword(msi->dev, pos, mask_bits);
} else
return 0;
msi->msi_attrib.maskbit = !!flag;
return 1;
}
static void zpci_enable_irq(struct irq_data *data)
{
struct msi_desc *msi = irq_get_msi_desc(data->irq);
zpci_msi_set_mask_bits(msi, 1, 0);
}
static void zpci_disable_irq(struct irq_data *data)
{
struct msi_desc *msi = irq_get_msi_desc(data->irq);
zpci_msi_set_mask_bits(msi, 1, 1);
}
void pcibios_fixup_bus(struct pci_bus *bus)
{
}
@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
/* Release MSI interrupts */
list_for_each_entry(msi, &pdev->msi_list, list) {
zpci_msi_set_mask_bits(msi, 1, 1);
if (msi->msi_attrib.is_msix)
default_msix_mask_irq(msi, 1);
else
default_msi_mask_irq(msi, 1, 1);
irq_set_msi_desc(msi->irq, NULL);
irq_free_desc(msi->irq);
msi->msg.address_lo = 0;

View File

@ -32,7 +32,8 @@ endif
cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
$(call cc-option,-m2a-nofpu,)
$(call cc-option,-m2a-nofpu,) \
$(call cc-option,-m4-nofpu,)
cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,)
cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
$(call cc-option,-mno-implicit-fp,-m4-nofpu)

View File

@ -78,6 +78,7 @@ config SPARC64
select HAVE_C_RECORDMCOUNT
select NO_BOOTMEM
select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
config ARCH_DEFCONFIG
string

View File

@ -410,8 +410,9 @@
#define __NR_finit_module 342
#define __NR_sched_setattr 343
#define __NR_sched_getattr 344
#define __NR_renameat2 345
#define NR_syscalls 345
#define NR_syscalls 346
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001

View File

@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)
SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
.globl sys32_mmap2
sys32_mmap2:

View File

@ -86,3 +86,4 @@ sys_call_table:
/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
/*345*/ .long sys_renameat2

View File

@ -87,6 +87,7 @@ sys_call_table32:
/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys32_renameat2
#endif /* CONFIG_COMPAT */
@ -165,3 +166,4 @@ sys_call_table:
/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys_renameat2

View File

@ -12,6 +12,7 @@
#include <mem_user.h>
#include <os.h>
#include <skas.h>
#include <kern_util.h>
struct host_vm_change {
struct host_vm_op {
@ -124,6 +125,9 @@ static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
if ((addr >= STUB_START) && (addr < STUB_END))
return -EINVAL;
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MUNMAP) &&
@ -283,8 +287,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
/* This is not an else because ret is modified above */
if (ret) {
printk(KERN_ERR "fix_range_common: failed, killing current "
"process\n");
"process: %d\n", task_tgid_vnr(current));
/* We are under mmap_sem, release it such that current can terminate */
up_write(&current->mm->mmap_sem);
force_sig(SIGKILL, current);
do_signal();
}
}

View File

@ -206,7 +206,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
int is_write = FAULT_WRITE(fi);
unsigned long address = FAULT_ADDRESS(fi);
if (regs)
if (!is_user && regs)
current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
if (!is_user && (address >= start_vm) && (address < end_vm)) {

View File

@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid)
void wait_stub_done(int pid)
{
int n, status, err, bad_stop = 0;
int n, status, err;
while (1) {
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@ -74,8 +74,6 @@ void wait_stub_done(int pid)
if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
return;
else
bad_stop = 1;
bad_wait:
err = ptrace_dump_regs(pid);
@ -85,10 +83,7 @@ bad_wait:
printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
"pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
status);
if (bad_stop)
kill(pid, SIGKILL);
else
fatal_sigsegv();
fatal_sigsegv();
}
extern unsigned long current_stub_stack(void);

View File

@ -131,6 +131,7 @@ config X86
select HAVE_CC_STACKPROTECTOR
select GENERIC_CPU_AUTOPROBE
select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
config INSTRUCTION_DECODER
def_bool y

View File

@ -91,10 +91,9 @@ bs_die:
.section ".bsdata", "a"
bugger_off_msg:
.ascii "Direct floppy boot is not supported. "
.ascii "Use a boot loader program instead.\r\n"
.ascii "Use a boot loader.\r\n"
.ascii "\n"
.ascii "Remove disk and press any key to reboot ...\r\n"
.ascii "Remove disk and press any key to reboot...\r\n"
.byte 0
#ifdef CONFIG_EFI_STUB
@ -108,7 +107,7 @@ coff_header:
#else
.word 0x8664 # x86-64
#endif
.word 3 # nr_sections
.word 4 # nr_sections
.long 0 # TimeDateStamp
.long 0 # PointerToSymbolTable
.long 1 # NumberOfSymbols
@ -250,6 +249,25 @@ section_table:
.word 0 # NumberOfLineNumbers
.long 0x60500020 # Characteristics (section flags)
#
# The offset & size fields are filled in by build.c.
#
.ascii ".bss"
.byte 0
.byte 0
.byte 0
.byte 0
.long 0
.long 0x0
.long 0 # Size of initialized data
# on disk
.long 0x0
.long 0 # PointerToRelocations
.long 0 # PointerToLineNumbers
.word 0 # NumberOfRelocations
.word 0 # NumberOfLineNumbers
.long 0xc8000080 # Characteristics (section flags)
#endif /* CONFIG_EFI_STUB */
# Kernel attributes; used by setup. This is part 1 of the

View File

@ -143,7 +143,7 @@ static void usage(void)
#ifdef CONFIG_EFI_STUB
static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
{
unsigned int pe_header;
unsigned short num_sections;
@ -164,10 +164,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
put_unaligned_le32(size, section + 0x8);
/* section header vma field */
put_unaligned_le32(offset, section + 0xc);
put_unaligned_le32(vma, section + 0xc);
/* section header 'size of initialised data' field */
put_unaligned_le32(size, section + 0x10);
put_unaligned_le32(datasz, section + 0x10);
/* section header 'file offset' field */
put_unaligned_le32(offset, section + 0x14);
@ -179,6 +179,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
}
}
static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
{
update_pecoff_section_header_fields(section_name, offset, size, size, offset);
}
static void update_pecoff_setup_and_reloc(unsigned int size)
{
u32 setup_offset = 0x200;
@ -203,9 +208,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
pe_header = get_unaligned_le32(&buf[0x3c]);
/* Size of image */
put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
/*
* Size of code: Subtract the size of the first sector (512 bytes)
* which includes the header.
@ -220,6 +222,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
update_pecoff_section_header(".text", text_start, text_sz);
}
static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
{
unsigned int pe_header;
unsigned int bss_sz = init_sz - file_sz;
pe_header = get_unaligned_le32(&buf[0x3c]);
/* Size of uninitialized data */
put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
/* Size of image */
put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
}
static int reserve_pecoff_reloc_section(int c)
{
/* Reserve 0x20 bytes for .reloc section */
@ -259,6 +277,8 @@ static void efi_stub_entry_update(void)
static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
static inline void update_pecoff_text(unsigned int text_start,
unsigned int file_sz) {}
static inline void update_pecoff_bss(unsigned int file_sz,
unsigned int init_sz) {}
static inline void efi_stub_defaults(void) {}
static inline void efi_stub_entry_update(void) {}
@ -310,7 +330,7 @@ static void parse_zoffset(char *fname)
int main(int argc, char ** argv)
{
unsigned int i, sz, setup_sectors;
unsigned int i, sz, setup_sectors, init_sz;
int c;
u32 sys_size;
struct stat sb;
@ -376,7 +396,9 @@ int main(int argc, char ** argv)
buf[0x1f1] = setup_sectors-1;
put_unaligned_le32(sys_size, &buf[0x1f4]);
update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
init_sz = get_unaligned_le32(&buf[0x260]);
update_pecoff_bss(i + (sys_size * 16), init_sz);
efi_stub_entry_update();

View File

@ -841,7 +841,6 @@ static int apm_do_idle(void)
u32 eax;
u8 ret = 0;
int idled = 0;
int polling;
int err = 0;
if (!need_resched()) {

View File

@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
*/
detect_extended_topology(c);
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
/*
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
* detection.
*/
c->x86_max_cores = intel_num_cpu_cores(c);
#ifdef CONFIG_X86_32
detect_ht(c);
#endif
}
l2 = init_intel_cacheinfo(c);
if (c->cpuid_level > 9) {
unsigned eax = cpuid_eax(10);
@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_P3);
#endif
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
/*
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
* detection.
*/
c->x86_max_cores = intel_num_cpu_cores(c);
#ifdef CONFIG_X86_32
detect_ht(c);
#endif
}
/* Work around errata */
srat_detect_node(c);

View File

@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
#endif
}
#ifdef CONFIG_X86_HT
/*
* If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
* turns means that the only possibility is SMT (as indicated in
* cpuid1). Since cpuid2 doesn't specify shared caches, and we know
* that SMT shares all caches, we can unconditionally set cpu_llc_id to
* c->phys_proc_id.
*/
if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
#endif
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
return l2;

View File

@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void)
for_each_online_cpu(i) {
err = mce_device_create(i);
if (err) {
/*
* Register notifier anyway (and do not unreg it) so
* that we don't leave undeleted timers, see notifier
* callback above.
*/
__register_hotcpu_notifier(&mce_cpu_notifier);
cpu_notifier_register_done();
goto err_device_create;
}
@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void)
err_register:
unregister_syscore_ops(&mce_syscore_ops);
cpu_notifier_register_begin();
__unregister_hotcpu_notifier(&mce_cpu_notifier);
cpu_notifier_register_done();
err_device_create:
/*
* We didn't keep track of which devices were created above, but

View File

@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
continue;
if (event->attr.config1 & ~er->valid_mask)
return -EINVAL;
/* Check if the extra msrs can be safely accessed*/
if (!er->extra_msr_access)
return -ENXIO;
reg->idx = er->idx;
reg->config = event->attr.config1;

View File

@ -295,14 +295,16 @@ struct extra_reg {
u64 config_mask;
u64 valid_mask;
int idx; /* per_xxx->regs[] reg index */
bool extra_msr_access;
};
#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
.event = (e), \
.msr = (ms), \
.config_mask = (m), \
.valid_mask = (vm), \
.idx = EXTRA_REG_##i, \
.event = (e), \
.msr = (ms), \
.config_mask = (m), \
.valid_mask = (vm), \
.idx = EXTRA_REG_##i, \
.extra_msr_access = true, \
}
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \

View File

@ -1381,6 +1381,15 @@ again:
intel_pmu_lbr_read();
/*
* CondChgd bit 63 doesn't mean any overflow status. Ignore
* and clear the bit.
*/
if (__test_and_clear_bit(63, (unsigned long *)&status)) {
if (!status)
goto done;
}
/*
* PEBS overflow sets bit 62 in the global status register
*/
@ -2173,6 +2182,41 @@ static void intel_snb_check_microcode(void)
}
}
/*
* Under certain circumstances, access certain MSR may cause #GP.
* The function tests if the input MSR can be safely accessed.
*/
static bool check_msr(unsigned long msr, u64 mask)
{
u64 val_old, val_new, val_tmp;
/*
* Read the current value, change it and read it back to see if it
* matches, this is needed to detect certain hardware emulators
* (qemu/kvm) that don't trap on the MSR access and always return 0s.
*/
if (rdmsrl_safe(msr, &val_old))
return false;
/*
* Only change the bits which can be updated by wrmsrl.
*/
val_tmp = val_old ^ mask;
if (wrmsrl_safe(msr, val_tmp) ||
rdmsrl_safe(msr, &val_new))
return false;
if (val_new != val_tmp)
return false;
/* Here it's sure that the MSR can be safely accessed.
* Restore the old value and return.
*/
wrmsrl(msr, val_old);
return true;
}
static __init void intel_sandybridge_quirk(void)
{
x86_pmu.check_microcode = intel_snb_check_microcode;
@ -2262,7 +2306,8 @@ __init int intel_pmu_init(void)
union cpuid10_ebx ebx;
struct event_constraint *c;
unsigned int unused;
int version;
struct extra_reg *er;
int version, i;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) {
@ -2465,6 +2510,9 @@ __init int intel_pmu_init(void)
case 62: /* IvyBridge EP */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
/* dTLB-load-misses on IVB is different than SNB */
hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
@ -2565,6 +2613,34 @@ __init int intel_pmu_init(void)
}
}
/*
* Access LBR MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support LBR MSR
* Check all LBT MSR here.
* Disable LBR access if any LBR MSRs can not be accessed.
*/
if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
x86_pmu.lbr_nr = 0;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
x86_pmu.lbr_nr = 0;
}
/*
* Access extra MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support offcore event
* Check all extra_regs here.
*/
if (x86_pmu.extra_regs) {
for (er = x86_pmu.extra_regs; er->msr; er++) {
er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
/* Disable LBR select mapping */
if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
x86_pmu.lbr_sel_map = NULL;
}
}
/* Support full width counters using alternative MSR range */
if (x86_pmu.intel_cap.full_width_write) {
x86_pmu.max_period = x86_pmu.cntval_mask;

View File

@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu)
if (!x86_pmu.bts)
return 0;
buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node);
if (unlikely(!buffer))
buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
if (unlikely(!buffer)) {
WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
return -ENOMEM;
}
max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
thresh = max / 16;

View File

@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),

View File

@ -425,8 +425,8 @@ sysenter_do_call:
cmpl $(NR_syscalls), %eax
jae sysenter_badsys
call *sys_call_table(,%eax,4)
movl %eax,PT_EAX(%esp)
sysenter_after_call:
movl %eax,PT_EAX(%esp)
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
@ -502,6 +502,7 @@ ENTRY(system_call)
jae syscall_badsys
syscall_call:
call *sys_call_table(,%eax,4)
syscall_after_call:
movl %eax,PT_EAX(%esp) # store the return value
syscall_exit:
LOCKDEP_SYS_EXIT
@ -675,12 +676,12 @@ syscall_fault:
END(syscall_fault)
syscall_badsys:
movl $-ENOSYS,PT_EAX(%esp)
jmp syscall_exit
movl $-ENOSYS,%eax
jmp syscall_after_call
END(syscall_badsys)
sysenter_badsys:
movl $-ENOSYS,PT_EAX(%esp)
movl $-ENOSYS,%eax
jmp sysenter_after_call
END(syscall_badsys)
CFI_ENDPROC

View File

@ -175,7 +175,7 @@ void init_espfix_ap(void)
if (!pud_present(pud)) {
pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PUD_CLONES; n++)
set_pud(&pud_p[n], pud);
}
@ -185,7 +185,7 @@ void init_espfix_ap(void)
if (!pmd_present(pmd)) {
pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PMD_CLONES; n++)
set_pmd(&pmd_p[n], pmd);
}
@ -193,7 +193,6 @@ void init_espfix_ap(void)
pte_p = pte_offset_kernel(&pmd, addr);
stack_page = (void *)__get_free_page(GFP_KERNEL);
pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PTE_CLONES; n++)
set_pte(&pte_p[n*PTE_STRIDE], pte);

View File

@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
struct kprobe *p;
struct kprobe_ctlblk *kcb;
if (user_mode_vm(regs))
return 0;
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
/*
* We don't want to be preempted for the entire

View File

@ -920,9 +920,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
mark_tsc_unstable("cpufreq changes");
}
set_cyc2ns_scale(tsc_khz, freq->cpu);
set_cyc2ns_scale(tsc_khz, freq->cpu);
}
return 0;
}

View File

@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
kvm_x86_ops->set_nmi(vcpu);
}
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
/*
* Because interrupts can be injected asynchronously, we are
* calling check_nested_events again here to avoid a race condition.
* See https://lkml.org/lkml/2014/7/2/60 for discussion about this
* proposal and current concerns. Perhaps we should be setting
* KVM_REQ_EVENT only on certain events and not unconditionally?
*/
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
if (r != 0)
return r;
}
if (kvm_x86_ops->interrupt_allowed(vcpu)) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
false);

View File

@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
beqz a2, 1f # if at start of vector, don't restore
addi a0, a0, -128
bbsi a0, 8, 1f # don't restore except for overflow 8 and 12
bbsi a0, 7, 2f
bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
/*
* This fixup handler is for the extremely unlikely case where the
* overflow handler's reference thru a0 gets a hardware TLB refill
* that bumps out the (distinct, aliasing) TLB entry that mapped its
* prior references thru a9/a13, and where our reference now thru
* a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
*/
movi a2, window_overflow_restore_a0_fixup
s32i a2, a3, EXC_TABLE_FIXUP
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a3, excsave1
bbsi.l a0, 7, 2f
/*
* Restore a0 as saved by _WindowOverflow8().
*
* FIXME: we really need a fixup handler for this L32E,
* for the extremely unlikely case where the overflow handler's
* reference thru a0 gets a hardware TLB refill that bumps out
* the (distinct, aliasing) TLB entry that mapped its prior
* references thru a9, and where our reference now thru a9
* gets a 2nd-level miss exception (not hardware TLB refill).
*/
l32e a2, a9, -16
wsr a2, depc # replace the saved a0
j 1f
l32e a0, a9, -16
wsr a0, depc # replace the saved a0
j 3f
2:
/*
* Restore a0 as saved by _WindowOverflow12().
*
* FIXME: we really need a fixup handler for this L32E,
* for the extremely unlikely case where the overflow handler's
* reference thru a0 gets a hardware TLB refill that bumps out
* the (distinct, aliasing) TLB entry that mapped its prior
* references thru a13, and where our reference now thru a13
* gets a 2nd-level miss exception (not hardware TLB refill).
*/
l32e a2, a13, -16
wsr a2, depc # replace the saved a0
l32e a0, a13, -16
wsr a0, depc # replace the saved a0
3:
xsr a3, excsave1
movi a0, 0
s32i a0, a3, EXC_TABLE_FIXUP
s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
1:
/*
* Restore WindowBase while leaving all address registers restored.
@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
s32i a0, a2, PT_DEPC
_DoubleExceptionVector_handle_exception:
addx4 a0, a0, a3
l32i a0, a0, EXC_TABLE_FAST_USER
xsr a3, excsave1
@ -464,10 +469,119 @@ _DoubleExceptionVector_WindowOverflow:
rotw -3
j 1b
.end literal_prefix
ENDPROC(_DoubleExceptionVector)
/*
* Fixup handler for TLB miss in double exception handler for window owerflow.
* We get here with windowbase set to the window that was being spilled and
* a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
* (bit set) window.
*
* We do the following here:
* - go to the original window retaining a0 value;
* - set up exception stack to return back to appropriate a0 restore code
* (we'll need to rotate window back and there's no place to save this
* information, use different return address for that);
* - handle the exception;
* - go to the window that was being spilled;
* - set up window_overflow_restore_a0_fixup as a fixup routine;
* - reload a0;
* - restore the original window;
* - reset the default fixup routine;
* - return to user. By the time we get to this fixup handler all information
* about the conditions of the original double exception that happened in
* the window overflow handler is lost, so we just return to userspace to
* retry overflow from start.
*
* a0: value of depc, original value in depc
* a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
* a3: exctable, original value in excsave1
*/
ENTRY(window_overflow_restore_a0_fixup)
rsr a0, ps
extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
rsr a2, windowbase
sub a0, a2, a0
extui a0, a0, 0, 3
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a3, excsave1
_beqi a0, 1, .Lhandle_1
_beqi a0, 3, .Lhandle_3
.macro overflow_fixup_handle_exception_pane n
rsr a0, depc
rotw -\n
xsr a3, excsave1
wsr a2, depc
l32i a2, a3, EXC_TABLE_KSTK
s32i a0, a2, PT_AREG0
movi a0, .Lrestore_\n
s32i a0, a2, PT_DEPC
rsr a0, exccause
j _DoubleExceptionVector_handle_exception
.endm
overflow_fixup_handle_exception_pane 2
.Lhandle_1:
overflow_fixup_handle_exception_pane 1
.Lhandle_3:
overflow_fixup_handle_exception_pane 3
.macro overflow_fixup_restore_a0_pane n
rotw \n
/* Need to preserve a0 value here to be able to handle exception
* that may occur on a0 reload from stack. It may occur because
* TLB miss handler may not be atomic and pointer to page table
* may be lost before we get here. There are no free registers,
* so we need to use EXC_TABLE_DOUBLE_SAVE area.
*/
xsr a3, excsave1
s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
movi a2, window_overflow_restore_a0_fixup
s32i a2, a3, EXC_TABLE_FIXUP
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a3, excsave1
bbsi.l a0, 7, 1f
l32e a0, a9, -16
j 2f
1:
l32e a0, a13, -16
2:
rotw -\n
.endm
.Lrestore_2:
overflow_fixup_restore_a0_pane 2
.Lset_default_fixup:
xsr a3, excsave1
s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
movi a2, 0
s32i a2, a3, EXC_TABLE_FIXUP
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
xsr a3, excsave1
rfe
.Lrestore_1:
overflow_fixup_restore_a0_pane 1
j .Lset_default_fixup
.Lrestore_3:
overflow_fixup_restore_a0_pane 3
j .Lset_default_fixup
ENDPROC(window_overflow_restore_a0_fixup)
.end literal_prefix
/*
* Debug interrupt vector
*

View File

@ -269,13 +269,13 @@ SECTIONS
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal,
DOUBLEEXC_VECTOR_VADDR - 16,
DOUBLEEXC_VECTOR_VADDR - 40,
SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
DOUBLEEXC_VECTOR_VADDR,
32,
40,
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;

View File

@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
return -EINVAL;
}
if (it && start - it->start < bank_sz) {
if (it && start - it->start <= bank_sz) {
if (start == it->start) {
if (end - it->start < bank_sz) {
it->start = end;

View File

@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
/*
* @q could be exiting and already have destroyed all blkgs as
* indicated by NULL root_blkg. If so, don't confuse policies.
*/
if (!q->root_blkg)
return;
blk_throtl_drain(q);
}

View File

@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
EXPORT_SYMBOL(blk_queue_find_tag);
/**
* __blk_free_tags - release a given set of tag maintenance info
* blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
* Tries to free the specified @bqt. Returns true if it was
* actually freed and false if there are still references using it
* Drop the reference count on @bqt and frees it when the last reference
* is dropped.
*/
static int __blk_free_tags(struct blk_queue_tag *bqt)
void blk_free_tags(struct blk_queue_tag *bqt)
{
int retval;
retval = atomic_dec_and_test(&bqt->refcnt);
if (retval) {
if (atomic_dec_and_test(&bqt->refcnt)) {
BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
bqt->max_depth);
@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
kfree(bqt);
}
return retval;
}
EXPORT_SYMBOL(blk_free_tags);
/**
* __blk_queue_free_tags - release tag maintenance info
@ -69,27 +65,12 @@ void __blk_queue_free_tags(struct request_queue *q)
if (!bqt)
return;
__blk_free_tags(bqt);
blk_free_tags(bqt);
q->queue_tags = NULL;
queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
}
/**
* blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
* For externally managed @bqt frees the map. Callers of this
* function must guarantee to have released all the queues that
* might have been using this tag map.
*/
void blk_free_tags(struct blk_queue_tag *bqt)
{
if (unlikely(!__blk_free_tags(bqt)))
BUG();
}
EXPORT_SYMBOL(blk_free_tags);
/**
* blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device

View File

@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKROSET:
case BLKDISCARD:
case BLKSECDISCARD:
case BLKZEROOUT:
/*
* the ones below are implemented in blkdev_locked_ioctl,
* but we call blkdev_ioctl, which gets the lock for us

View File

@ -68,7 +68,7 @@ MODULE_AUTHOR("Bruno Ducrot");
MODULE_DESCRIPTION("ACPI Video Driver");
MODULE_LICENSE("GPL");
static bool brightness_switch_enabled;
static bool brightness_switch_enabled = 1;
module_param(brightness_switch_enabled, bool, 0644);
/*
@ -581,6 +581,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
{
.callback = video_set_use_native_backlight,
.ident = "HP ProBook 4540s",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "HP ProBook 2013 models",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),

View File

@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
/* Asmedia */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */

View File

@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
* ata_qc_new - Request an available ATA command, for queueing
* @ap: target port
*
* Some ATA host controllers may implement a queue depth which is less
* than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
* the hardware limitation.
*
* LOCKING:
* None.
*/
@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
{
struct ata_queued_cmd *qc = NULL;
unsigned int max_queue = ap->host->n_tags;
unsigned int i, tag;
/* no command while frozen */
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
return NULL;
for (i = 0; i < ATA_MAX_QUEUE; i++) {
tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
tag = tag < max_queue ? tag : 0;
/* the last tag is reserved for internal command. */
if (tag == ATA_TAG_INTERNAL)
@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
{
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
host->n_tags = ATA_MAX_QUEUE - 1;
host->dev = dev;
host->ops = ops;
}
@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
{
int i, rc;
host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
/* host must have been started */
if (!(host->flags & ATA_HOST_STARTED)) {
dev_err(host->dev, "BUG: trying to register unstarted host\n");

View File

@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
case ATA_DEV_ATA:
if (err & ATA_ICRC)
qc->err_mask |= AC_ERR_ATA_BUS;
if (err & ATA_UNC)
if (err & (ATA_UNC | ATA_AMNF))
qc->err_mask |= AC_ERR_MEDIA;
if (err & ATA_IDNF)
qc->err_mask |= AC_ERR_INVALID;
@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link)
}
if (cmd->command != ATA_CMD_PACKET &&
(res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
ATA_ABORTED)))
ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
(res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
ATA_IDNF | ATA_ABORTED)))
ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
res->feature & ATA_ICRC ? "ICRC " : "",
res->feature & ATA_UNC ? "UNC " : "",
res->feature & ATA_AMNF ? "AMNF " : "",
res->feature & ATA_IDNF ? "IDNF " : "",
res->feature & ATA_ABORTED ? "ABRT " : "");
#endif

View File

@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
struct ep93xx_pata_data *drv_data;
struct ata_host *host;
struct ata_port *ap;
unsigned int irq;
int irq;
struct resource *mem_res;
void __iomem *ide_base;
int err;

View File

@ -89,8 +89,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
return dev->archdata.irqs[num];
#else
struct resource *r;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
return of_irq_get(dev->dev.of_node, num);
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
int ret;
ret = of_irq_get(dev->dev.of_node, num);
if (ret >= 0 || ret == -EPROBE_DEFER)
return ret;
}
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
@ -133,8 +138,13 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
{
struct resource *r;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
return of_irq_get_byname(dev->dev.of_node, name);
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
int ret;
ret = of_irq_get_byname(dev->dev.of_node, name);
if (ret >= 0 || ret == -EPROBE_DEFER)
return ret;
}
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
return r ? r->start : -ENXIO;

View File

@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
struct task_struct *opa;
kref_get(&connection->kref);
/* We may just have force_sig()'ed this thread
* to get it out of some blocking network function.
* Clear signals; otherwise kthread_run(), which internally uses
* wait_on_completion_killable(), will mistake our pending signal
* for a new fatal signal and fail. */
flush_signals(current);
opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
if (IS_ERR(opa)) {
drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");

View File

@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
if (reset_capacity) {
if (reset_capacity)
set_capacity(zram->disk, 0);
revalidate_disk(zram->disk);
}
up_write(&zram->init_lock);
/*
* Revalidate disk out of the init_lock to avoid lockdep splat.
* It's okay because disk's capacity is protected by init_lock
* so that revalidate_disk always sees up-to-date capacity.
*/
if (reset_capacity)
revalidate_disk(zram->disk);
}
static ssize_t disksize_store(struct device *dev,
@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
revalidate_disk(zram->disk);
up_write(&zram->init_lock);
/*
* Revalidate disk out of the init_lock to avoid lockdep splat.
* It's okay because disk's capacity is protected by init_lock
* so that revalidate_disk always sees up-to-date capacity.
*/
revalidate_disk(zram->disk);
return len;
out_destroy_comp:

View File

@ -90,7 +90,6 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0b05, 0x17d0) },
{ USB_DEVICE(0x0CF3, 0x0036) },
{ USB_DEVICE(0x0CF3, 0x3004) },
{ USB_DEVICE(0x0CF3, 0x3005) },
{ USB_DEVICE(0x0CF3, 0x3008) },
{ USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x0CF3, 0x311E) },
@ -140,7 +139,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },

View File

@ -162,7 +162,6 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },

Some files were not shown because too many files have changed in this diff Show More