This is the 5.4.73 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl+ahE8ACgkQONu9yGCS aT4j1A/9HzkKKoqZ2vXYQ1/uEnUqZech9ly1KxpNTBrSZYAtx3MaWY7tGDEx2BqD y6iw9x4MymhHEbpwLg6YmmdWuMQLNNYJGoyLiPJgWhkE4c7zHadhNz1DcPEI8F7z bSlUJ3Oebr8gzv0FvUmeVXw7Z2EuOqM1zGgTAZfnKY3DkYHbLnrzUJ4AiI8TNeba pPIhjfIJ1TvhF+s5ggf2m8OtSWLZ0doCWCPmCFe2WyERX2WYCzPgsm0yL7L7oXME ZqWpOcClBsiYekBNcZ4kxozhJtArCnv24n9VoXJ/YJIlWKvCA6uC8r527nGN/z08 dfFelj1nDs7/VrCSP4+109EjxLQnSYGgIWP0g0OsC+9wOmrQsYJ1azP1eNjm+NuC hPa8uYVEZxwVyJuEfu4ZB4NMZBlD2qnHoskvBKbyZ8yaVnbvlMp552XMwsmJBpCs 8wArzabrJEz396LUUIYG829D7NBDuRav1Miu+FTzlbn+xZ/Y/S8OmhoG2stWa4wV y5x0M0DWgrqiZ9rMkz9A03UNnCInQVTfIBoMl63xFitW4/0vLsln3+CjzlKm7H46 rD/tKACUoCDjR5DN+JwQzmTdL9zBb4p1cXwWjWb6rON3BkXmO0JVAxzurxI9PfX0 ZWDydZ3HNmrm0d3J12zf3kTX56PfPFAGWUsEc4Ntb5zdWXSQJsE= =fZ3T -----END PGP SIGNATURE----- gpgsig -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEdQaENiSDAlGTDEbB7G51OISzHs0FAl+bPfIACgkQ7G51OISz Hs1LaA/9GOp34DrKBM/z7eN7gcbU4rJUhnggZ3vCXShRs3vtjEJ7wptzJb7lErXX 6JCS/OjZpQpZJcHdBX0Kxovf8LVgvDrsbAhRhQkdFr0dESQQ4UY+vT5me9Y9Ot1F biG2z2HduLAxBgrYB2uA7VeRqlLiAa7ELS5EWB90xjY49w7gy5kK7AFrBRQdme9G r4fY89RD9sJVzo4sxgQwUYXuNJi5OmwbN+wrkwk8HXyL0tAB9SNQJ7A962Gxamao AIXT0CvNpNSkR/4JeqDXbJu54fMZxaF4A7a9mgL42fWe45jQs2zYSNx3vdZskzK8 8z+4FCmShNkGMMLV5k6Ds/lJ8uF1yOkUBJJeiHJxnpZw93xKVWZfOVmgm+WO/mNq POmJVfALFFzwvNllyMX8D++0yhORunzfhzQyKgVAthwmScGQ5TK0cerwAa9VEz1T 40e7AqsNKUxRPnoZYQwM0Y2Vskn6qZ8pOW3rSSQx2YI2lhQHGeUMAugHzYivERkF 8d5hDPaQgfJXmS+S8Xp45zafeMDjoNQFQhZLAptmoF9+NGXdJduPSPJxQ0R/29GT 2LEHNsneGotslpXwluk8x2VlXryf/7okEdR7RLq9kjEyM5BGOc2wcZrD6GXvnAf0 JorbYZPriCaNHrGDdEiRFZmRlKaiR4CbcR3JtYkFJebirmIc1N0= =8w9z -----END PGP SIGNATURE----- Merge tag 'v5.4.73' into 5.4-2.2.x-imx This is the 5.4.73 stable release Conflicts: - arch/arm/boot/dts/imx6sl.dtsi: Commit [5.4-rM2-2.2.x-imx-squasheda1767c9019
] in NXP tree is now covered with commit [5c4c2f437c
] from upstream. - drivers/gpu/drm/mxsfb/mxsfb_drv.c: Resolve merge hunk for patch [ed8b90d303
] from upstream - drivers/media/i2c/ov5640.c: Patch [aa4bb8b883
] in NXP tree is now covered by patches [79ec0578c7
] and [b2f8546056
] from upstream. Changes from NXP patch [99aa4c8c18
] are covered in upstream version as well. - drivers/net/ethernet/freescale/fec_main.c: Fix merge fuzz for patch [9e70485b40
] from upstream. - drivers/usb/cdns3/gadget.c: Keep NXP version of the file, upstream version is not compatible. - drivers/usb/dwc3/core.c: - drivers/usb/dwc3/core.h: Fix merge fuzz of patch [08045050c6
] together wth NXP patch [b30e41dc1e
] - sound/soc/fsl/fsl_sai.c: - sound/soc/fsl/fsl_sai.h: Commit [2ea70e51eb72a] in NXP tree is now covered with commit [1ad7f52fe6
] from upstream. Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
commit
4c7342a1d4
|
@ -567,7 +567,7 @@
|
|||
loops can be debugged more effectively on production
|
||||
systems.
|
||||
|
||||
clearcpuid=BITNUM [X86]
|
||||
clearcpuid=BITNUM[,BITNUM...] [X86]
|
||||
Disable CPUID feature X for the kernel. See
|
||||
arch/x86/include/asm/cpufeatures.h for the valid bit
|
||||
numbers. Note the Linux specific bits are not necessarily
|
||||
|
|
|
@ -1000,12 +1000,14 @@ icmp_ratelimit - INTEGER
|
|||
icmp_msgs_per_sec - INTEGER
|
||||
Limit maximal number of ICMP packets sent per second from this host.
|
||||
Only messages whose type matches icmp_ratemask (see below) are
|
||||
controlled by this limit.
|
||||
controlled by this limit. For security reasons, the precise count
|
||||
of messages per second is randomized.
|
||||
Default: 1000
|
||||
|
||||
icmp_msgs_burst - INTEGER
|
||||
icmp_msgs_per_sec controls number of ICMP packets sent per second,
|
||||
while icmp_msgs_burst controls the burst size of these packets.
|
||||
For security reasons, the precise burst size is randomized.
|
||||
Default: 50
|
||||
|
||||
icmp_ratemask - INTEGER
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 72
|
||||
SUBLEVEL = 73
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
|
|
@ -8,5 +8,6 @@ menuconfig ARC_SOC_HSDK
|
|||
select ARC_HAS_ACCL_REGS
|
||||
select ARC_IRQ_NO_AUTOSAVE
|
||||
select CLK_HSDK
|
||||
select RESET_CONTROLLER
|
||||
select RESET_HSDK
|
||||
select HAVE_PCI
|
||||
|
|
|
@ -997,7 +997,7 @@
|
|||
};
|
||||
|
||||
rngb: rngb@21b4000 {
|
||||
compatible = "fsl,imx25-rngb";
|
||||
compatible = "fsl,imx6sl-rngb", "fsl,imx25-rngb";
|
||||
reg = <0x021b4000 0x4000>;
|
||||
interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks IMX6SL_CLK_DUMMY>;
|
||||
|
|
|
@ -230,8 +230,6 @@
|
|||
<GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
|
||||
|
|
|
@ -84,21 +84,21 @@
|
|||
global_timer: timer@b0020200 {
|
||||
compatible = "arm,cortex-a9-global-timer";
|
||||
reg = <0xb0020200 0x100>;
|
||||
interrupts = <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
||||
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
twd_timer: timer@b0020600 {
|
||||
compatible = "arm,cortex-a9-twd-timer";
|
||||
reg = <0xb0020600 0x20>;
|
||||
interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
||||
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
twd_wdt: wdt@b0020620 {
|
||||
compatible = "arm,cortex-a9-twd-wdt";
|
||||
reg = <0xb0020620 0xe0>;
|
||||
interrupts = <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
||||
interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
|
|
@ -223,16 +223,16 @@
|
|||
};
|
||||
|
||||
®_dc1sw {
|
||||
regulator-min-microvolt = <3000000>;
|
||||
regulator-max-microvolt = <3000000>;
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-name = "vcc-gmac-phy";
|
||||
};
|
||||
|
||||
®_dcdc1 {
|
||||
regulator-always-on;
|
||||
regulator-min-microvolt = <3000000>;
|
||||
regulator-max-microvolt = <3000000>;
|
||||
regulator-name = "vcc-3v0";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-name = "vcc-3v3";
|
||||
};
|
||||
|
||||
®_dcdc2 {
|
||||
|
|
|
@ -777,6 +777,7 @@ static void __init at91_pm_init(void (*pm_idle)(void))
|
|||
|
||||
pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
|
||||
soc_pm.data.pmc = of_iomap(pmc_np, 0);
|
||||
of_node_put(pmc_np);
|
||||
if (!soc_pm.data.pmc) {
|
||||
pr_err("AT91: PM not supported, PMC not found\n");
|
||||
return;
|
||||
|
|
|
@ -174,8 +174,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
|||
*/
|
||||
if (mpuss_can_lose_context) {
|
||||
error = cpu_cluster_pm_enter();
|
||||
if (error)
|
||||
if (error) {
|
||||
omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
|
||||
goto cpu_cluster_pm_out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ static struct gpiod_lookup_table at2440evb_mci_gpio_table = {
|
|||
.dev_id = "s3c2410-sdi",
|
||||
.table = {
|
||||
/* Card detect S3C2410_GPG(10) */
|
||||
GPIO_LOOKUP("GPG", 10, "cd", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("GPIOG", 10, "cd", GPIO_ACTIVE_LOW),
|
||||
{ },
|
||||
},
|
||||
};
|
||||
|
|
|
@ -468,9 +468,9 @@ static struct gpiod_lookup_table h1940_mmc_gpio_table = {
|
|||
.dev_id = "s3c2410-sdi",
|
||||
.table = {
|
||||
/* Card detect S3C2410_GPF(5) */
|
||||
GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
|
||||
/* Write protect S3C2410_GPH(8) */
|
||||
GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
|
||||
{ },
|
||||
},
|
||||
};
|
||||
|
|
|
@ -244,9 +244,9 @@ static struct gpiod_lookup_table mini2440_mmc_gpio_table = {
|
|||
.dev_id = "s3c2410-sdi",
|
||||
.table = {
|
||||
/* Card detect S3C2410_GPG(8) */
|
||||
GPIO_LOOKUP("GPG", 8, "cd", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("GPIOG", 8, "cd", GPIO_ACTIVE_LOW),
|
||||
/* Write protect S3C2410_GPH(8) */
|
||||
GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_HIGH),
|
||||
GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_HIGH),
|
||||
{ },
|
||||
},
|
||||
};
|
||||
|
|
|
@ -359,9 +359,9 @@ static struct gpiod_lookup_table n30_mci_gpio_table = {
|
|||
.dev_id = "s3c2410-sdi",
|
||||
.table = {
|
||||
/* Card detect S3C2410_GPF(1) */
|
||||
GPIO_LOOKUP("GPF", 1, "cd", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("GPIOF", 1, "cd", GPIO_ACTIVE_LOW),
|
||||
/* Write protect S3C2410_GPG(10) */
|
||||
GPIO_LOOKUP("GPG", 10, "wp", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("GPIOG", 10, "wp", GPIO_ACTIVE_LOW),
|
||||
{ },
|
||||
},
|
||||
};
|
||||
|
|
|
@ -567,9 +567,9 @@ static struct gpiod_lookup_table rx1950_mmc_gpio_table = {
|
|||
.dev_id = "s3c2410-sdi",
|
||||
.table = {
|
||||
/* Card detect S3C2410_GPF(5) */
|
||||
GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
|
||||
/* Write protect S3C2410_GPH(8) */
|
||||
GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
|
||||
{ },
|
||||
},
|
||||
};
|
||||
|
|
|
@ -1254,20 +1254,28 @@ static void __init l2c310_of_parse(const struct device_node *np,
|
|||
|
||||
ret = of_property_read_u32(np, "prefetch-data", &val);
|
||||
if (ret == 0) {
|
||||
if (val)
|
||||
if (val) {
|
||||
prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
|
||||
else
|
||||
*aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
|
||||
} else {
|
||||
prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
|
||||
*aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
|
||||
}
|
||||
*aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
|
||||
} else if (ret != -EINVAL) {
|
||||
pr_err("L2C-310 OF prefetch-data property value is missing\n");
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(np, "prefetch-instr", &val);
|
||||
if (ret == 0) {
|
||||
if (val)
|
||||
if (val) {
|
||||
prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
|
||||
else
|
||||
*aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
|
||||
} else {
|
||||
prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
|
||||
*aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
|
||||
}
|
||||
*aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
|
||||
} else if (ret != -EINVAL) {
|
||||
pr_err("L2C-310 OF prefetch-instr property value is missing\n");
|
||||
}
|
||||
|
|
|
@ -231,7 +231,7 @@
|
|||
|
||||
pinctrl: pinctrl@e01b0000 {
|
||||
compatible = "actions,s700-pinctrl";
|
||||
reg = <0x0 0xe01b0000 0x0 0x1000>;
|
||||
reg = <0x0 0xe01b0000 0x0 0x100>;
|
||||
clocks = <&cmu CLK_GPIO>;
|
||||
gpio-controller;
|
||||
gpio-ranges = <&pinctrl 0 0 136>;
|
||||
|
|
|
@ -155,8 +155,7 @@
|
|||
<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
|
||||
<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "gp",
|
||||
"gpmmu",
|
||||
"pp",
|
||||
|
@ -167,8 +166,7 @@
|
|||
"pp2",
|
||||
"ppmmu2",
|
||||
"pp3",
|
||||
"ppmmu3",
|
||||
"pmu";
|
||||
"ppmmu3";
|
||||
clocks = <&ccu CLK_BUS_GPU>, <&ccu CLK_GPU>;
|
||||
clock-names = "bus", "core";
|
||||
resets = <&ccu RST_BUS_GPU>;
|
||||
|
|
|
@ -43,13 +43,13 @@
|
|||
|
||||
white {
|
||||
label = "vim3:white:sys";
|
||||
gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>;
|
||||
gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_HIGH>;
|
||||
linux,default-trigger = "heartbeat";
|
||||
};
|
||||
|
||||
red {
|
||||
label = "vim3:red";
|
||||
gpios = <&gpio_expander 5 GPIO_ACTIVE_LOW>;
|
||||
gpios = <&gpio_expander 5 GPIO_ACTIVE_HIGH>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -651,6 +651,7 @@
|
|||
gpc: gpc@303a0000 {
|
||||
compatible = "fsl,imx8mq-gpc";
|
||||
reg = <0x303a0000 0x10000>;
|
||||
interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupt-controller;
|
||||
broken-wake-request-signals;
|
||||
|
|
|
@ -175,14 +175,14 @@
|
|||
};
|
||||
|
||||
thermal-zones {
|
||||
cpu0_1-thermal {
|
||||
cpu0-1-thermal {
|
||||
polling-delay-passive = <250>;
|
||||
polling-delay = <1000>;
|
||||
|
||||
thermal-sensors = <&tsens 4>;
|
||||
|
||||
trips {
|
||||
cpu0_1_alert0: trip-point@0 {
|
||||
cpu0_1_alert0: trip-point0 {
|
||||
temperature = <75000>;
|
||||
hysteresis = <2000>;
|
||||
type = "passive";
|
||||
|
@ -205,7 +205,7 @@
|
|||
};
|
||||
};
|
||||
|
||||
cpu2_3-thermal {
|
||||
cpu2-3-thermal {
|
||||
polling-delay-passive = <250>;
|
||||
polling-delay = <1000>;
|
||||
|
||||
|
@ -934,7 +934,7 @@
|
|||
reg-names = "mdp_phys";
|
||||
|
||||
interrupt-parent = <&mdss>;
|
||||
interrupts = <0 0>;
|
||||
interrupts = <0>;
|
||||
|
||||
clocks = <&gcc GCC_MDSS_AHB_CLK>,
|
||||
<&gcc GCC_MDSS_AXI_CLK>,
|
||||
|
@ -966,7 +966,7 @@
|
|||
reg-names = "dsi_ctrl";
|
||||
|
||||
interrupt-parent = <&mdss>;
|
||||
interrupts = <4 0>;
|
||||
interrupts = <4>;
|
||||
|
||||
assigned-clocks = <&gcc BYTE0_CLK_SRC>,
|
||||
<&gcc PCLK0_CLK_SRC>;
|
||||
|
|
|
@ -113,7 +113,7 @@
|
|||
|
||||
wcd_codec: codec@f000 {
|
||||
compatible = "qcom,pm8916-wcd-analog-codec";
|
||||
reg = <0xf000 0x200>;
|
||||
reg = <0xf000>;
|
||||
reg-names = "pmic-codec-core";
|
||||
clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
|
||||
clock-names = "mclk";
|
||||
|
|
|
@ -1212,9 +1212,8 @@
|
|||
reg = <0 0xe6ea0000 0 0x0064>;
|
||||
interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&cpg CPG_MOD 210>;
|
||||
dmas = <&dmac1 0x43>, <&dmac1 0x42>,
|
||||
<&dmac2 0x43>, <&dmac2 0x42>;
|
||||
dma-names = "tx", "rx", "tx", "rx";
|
||||
dmas = <&dmac0 0x43>, <&dmac0 0x42>;
|
||||
dma-names = "tx", "rx";
|
||||
power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
|
||||
resets = <&cpg 210>;
|
||||
#address-cells = <1>;
|
||||
|
|
|
@ -1168,9 +1168,8 @@
|
|||
reg = <0 0xe6ea0000 0 0x0064>;
|
||||
interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&cpg CPG_MOD 210>;
|
||||
dmas = <&dmac1 0x43>, <&dmac1 0x42>,
|
||||
<&dmac2 0x43>, <&dmac2 0x42>;
|
||||
dma-names = "tx", "rx", "tx", "rx";
|
||||
dmas = <&dmac0 0x43>, <&dmac0 0x42>;
|
||||
dma-names = "tx", "rx";
|
||||
power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
|
||||
resets = <&cpg 210>;
|
||||
#address-cells = <1>;
|
||||
|
|
|
@ -419,7 +419,7 @@
|
|||
};
|
||||
|
||||
i2c0: i2c@ff020000 {
|
||||
compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
|
||||
compatible = "cdns,i2c-r1p14";
|
||||
status = "disabled";
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 17 4>;
|
||||
|
@ -429,7 +429,7 @@
|
|||
};
|
||||
|
||||
i2c1: i2c@ff030000 {
|
||||
compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
|
||||
compatible = "cdns,i2c-r1p14";
|
||||
status = "disabled";
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 18 4>;
|
||||
|
|
|
@ -13,20 +13,19 @@
|
|||
*/
|
||||
#define MAX_EA_BITS_PER_CONTEXT 46
|
||||
|
||||
#define REGION_SHIFT (MAX_EA_BITS_PER_CONTEXT - 2)
|
||||
|
||||
/*
|
||||
* Our page table limit us to 64TB. Hence for the kernel mapping,
|
||||
* each MAP area is limited to 16 TB.
|
||||
* The four map areas are: linear mapping, vmap, IO and vmemmap
|
||||
* Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB
|
||||
* of vmemmap space. To better support sparse memory layout, we use 61TB
|
||||
* linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap.
|
||||
*/
|
||||
#define REGION_SHIFT (40)
|
||||
#define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT)
|
||||
|
||||
/*
|
||||
* Define the address range of the kernel non-linear virtual area
|
||||
* 16TB
|
||||
* Define the address range of the kernel non-linear virtual area (61TB)
|
||||
*/
|
||||
#define H_KERN_VIRT_START ASM_CONST(0xc000100000000000)
|
||||
#define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
|
||||
|
|
|
@ -8,14 +8,13 @@
|
|||
#ifndef _ASM_POWERPC_LMB_H
|
||||
#define _ASM_POWERPC_LMB_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
struct drmem_lmb {
|
||||
u64 base_addr;
|
||||
u32 drc_index;
|
||||
u32 aa_index;
|
||||
u32 flags;
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
int nid;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct drmem_lmb_info {
|
||||
|
@ -26,8 +25,22 @@ struct drmem_lmb_info {
|
|||
|
||||
extern struct drmem_lmb_info *drmem_info;
|
||||
|
||||
static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
|
||||
const struct drmem_lmb *start)
|
||||
{
|
||||
/*
|
||||
* DLPAR code paths can take several milliseconds per element
|
||||
* when interacting with firmware. Ensure that we don't
|
||||
* unfairly monopolize the CPU.
|
||||
*/
|
||||
if (((++lmb - start) % 16) == 0)
|
||||
cond_resched();
|
||||
|
||||
return lmb;
|
||||
}
|
||||
|
||||
#define for_each_drmem_lmb_in_range(lmb, start, end) \
|
||||
for ((lmb) = (start); (lmb) < (end); (lmb)++)
|
||||
for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
|
||||
|
||||
#define for_each_drmem_lmb(lmb) \
|
||||
for_each_drmem_lmb_in_range((lmb), \
|
||||
|
@ -103,22 +116,4 @@ static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
|
|||
lmb->aa_index = 0xffffffff;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
static inline void lmb_set_nid(struct drmem_lmb *lmb)
|
||||
{
|
||||
lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr);
|
||||
}
|
||||
static inline void lmb_clear_nid(struct drmem_lmb *lmb)
|
||||
{
|
||||
lmb->nid = -1;
|
||||
}
|
||||
#else
|
||||
static inline void lmb_set_nid(struct drmem_lmb *lmb)
|
||||
{
|
||||
}
|
||||
static inline void lmb_clear_nid(struct drmem_lmb *lmb)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_POWERPC_LMB_H */
|
||||
|
|
|
@ -796,7 +796,7 @@
|
|||
#define THRM1_TIN (1 << 31)
|
||||
#define THRM1_TIV (1 << 30)
|
||||
#define THRM1_THRES(x) ((x&0x7f)<<23)
|
||||
#define THRM3_SITV(x) ((x&0x3fff)<<1)
|
||||
#define THRM3_SITV(x) ((x & 0x1fff) << 1)
|
||||
#define THRM1_TID (1<<2)
|
||||
#define THRM1_TIE (1<<1)
|
||||
#define THRM1_V (1<<0)
|
||||
|
|
|
@ -67,19 +67,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
|
|||
return false;
|
||||
return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
|
||||
}
|
||||
static inline void mm_reset_thread_local(struct mm_struct *mm)
|
||||
{
|
||||
WARN_ON(atomic_read(&mm->context.copros) > 0);
|
||||
/*
|
||||
* It's possible for mm_access to take a reference on mm_users to
|
||||
* access the remote mm from another thread, but it's not allowed
|
||||
* to set mm_cpumask, so mm_users may be > 1 here.
|
||||
*/
|
||||
WARN_ON(current->mm != mm);
|
||||
atomic_set(&mm->context.active_cpus, 1);
|
||||
cpumask_clear(mm_cpumask(mm));
|
||||
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
|
||||
}
|
||||
#else /* CONFIG_PPC_BOOK3S_64 */
|
||||
static inline int mm_is_thread_local(struct mm_struct *mm)
|
||||
{
|
||||
|
|
|
@ -13,13 +13,14 @@
|
|||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/reg.h>
|
||||
|
@ -39,9 +40,7 @@ static struct tau_temp
|
|||
unsigned char grew;
|
||||
} tau[NR_CPUS];
|
||||
|
||||
struct timer_list tau_timer;
|
||||
|
||||
#undef DEBUG
|
||||
static bool tau_int_enable;
|
||||
|
||||
/* TODO: put these in a /proc interface, with some sanity checks, and maybe
|
||||
* dynamic adjustment to minimize # of interrupts */
|
||||
|
@ -50,72 +49,49 @@ struct timer_list tau_timer;
|
|||
#define step_size 2 /* step size when temp goes out of range */
|
||||
#define window_expand 1 /* expand the window by this much */
|
||||
/* configurable values for shrinking the window */
|
||||
#define shrink_timer 2*HZ /* period between shrinking the window */
|
||||
#define shrink_timer 2000 /* period between shrinking the window */
|
||||
#define min_window 2 /* minimum window size, degrees C */
|
||||
|
||||
static void set_thresholds(unsigned long cpu)
|
||||
{
|
||||
#ifdef CONFIG_TAU_INT
|
||||
/*
|
||||
* setup THRM1,
|
||||
* threshold, valid bit, enable interrupts, interrupt when below threshold
|
||||
*/
|
||||
mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
|
||||
u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0;
|
||||
|
||||
/* setup THRM2,
|
||||
* threshold, valid bit, enable interrupts, interrupt when above threshold
|
||||
*/
|
||||
mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
|
||||
#else
|
||||
/* same thing but don't enable interrupts */
|
||||
mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
|
||||
mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
|
||||
#endif
|
||||
/* setup THRM1, threshold, valid bit, interrupt when below threshold */
|
||||
mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID);
|
||||
|
||||
/* setup THRM2, threshold, valid bit, interrupt when above threshold */
|
||||
mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie);
|
||||
}
|
||||
|
||||
static void TAUupdate(int cpu)
|
||||
{
|
||||
unsigned thrm;
|
||||
|
||||
#ifdef DEBUG
|
||||
printk("TAUupdate ");
|
||||
#endif
|
||||
u32 thrm;
|
||||
u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V;
|
||||
|
||||
/* if both thresholds are crossed, the step_sizes cancel out
|
||||
* and the window winds up getting expanded twice. */
|
||||
if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
|
||||
if(thrm & THRM1_TIN){ /* crossed low threshold */
|
||||
if (tau[cpu].low >= step_size){
|
||||
tau[cpu].low -= step_size;
|
||||
tau[cpu].high -= (step_size - window_expand);
|
||||
}
|
||||
tau[cpu].grew = 1;
|
||||
#ifdef DEBUG
|
||||
printk("low threshold crossed ");
|
||||
#endif
|
||||
thrm = mfspr(SPRN_THRM1);
|
||||
if ((thrm & bits) == bits) {
|
||||
mtspr(SPRN_THRM1, 0);
|
||||
|
||||
if (tau[cpu].low >= step_size) {
|
||||
tau[cpu].low -= step_size;
|
||||
tau[cpu].high -= (step_size - window_expand);
|
||||
}
|
||||
tau[cpu].grew = 1;
|
||||
pr_debug("%s: low threshold crossed\n", __func__);
|
||||
}
|
||||
if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
|
||||
if(thrm & THRM1_TIN){ /* crossed high threshold */
|
||||
if (tau[cpu].high <= 127-step_size){
|
||||
tau[cpu].low += (step_size - window_expand);
|
||||
tau[cpu].high += step_size;
|
||||
}
|
||||
tau[cpu].grew = 1;
|
||||
#ifdef DEBUG
|
||||
printk("high threshold crossed ");
|
||||
#endif
|
||||
thrm = mfspr(SPRN_THRM2);
|
||||
if ((thrm & bits) == bits) {
|
||||
mtspr(SPRN_THRM2, 0);
|
||||
|
||||
if (tau[cpu].high <= 127 - step_size) {
|
||||
tau[cpu].low += (step_size - window_expand);
|
||||
tau[cpu].high += step_size;
|
||||
}
|
||||
tau[cpu].grew = 1;
|
||||
pr_debug("%s: high threshold crossed\n", __func__);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
printk("grew = %d\n", tau[cpu].grew);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
|
||||
set_thresholds(cpu);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TAU_INT
|
||||
|
@ -140,17 +116,16 @@ void TAUException(struct pt_regs * regs)
|
|||
static void tau_timeout(void * info)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
int size;
|
||||
int shrink;
|
||||
|
||||
/* disabling interrupts *should* be okay */
|
||||
local_irq_save(flags);
|
||||
cpu = smp_processor_id();
|
||||
|
||||
#ifndef CONFIG_TAU_INT
|
||||
TAUupdate(cpu);
|
||||
#endif
|
||||
if (!tau_int_enable)
|
||||
TAUupdate(cpu);
|
||||
|
||||
/* Stop thermal sensor comparisons and interrupts */
|
||||
mtspr(SPRN_THRM3, 0);
|
||||
|
||||
size = tau[cpu].high - tau[cpu].low;
|
||||
if (size > min_window && ! tau[cpu].grew) {
|
||||
|
@ -173,32 +148,26 @@ static void tau_timeout(void * info)
|
|||
|
||||
set_thresholds(cpu);
|
||||
|
||||
/*
|
||||
* Do the enable every time, since otherwise a bunch of (relatively)
|
||||
* complex sleep code needs to be added. One mtspr every time
|
||||
* tau_timeout is called is probably not a big deal.
|
||||
*
|
||||
* Enable thermal sensor and set up sample interval timer
|
||||
* need 20 us to do the compare.. until a nice 'cpu_speed' function
|
||||
* call is implemented, just assume a 500 mhz clock. It doesn't really
|
||||
* matter if we take too long for a compare since it's all interrupt
|
||||
* driven anyway.
|
||||
*
|
||||
* use a extra long time.. (60 us @ 500 mhz)
|
||||
/* Restart thermal sensor comparisons and interrupts.
|
||||
* The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet"
|
||||
* recommends that "the maximum value be set in THRM3 under all
|
||||
* conditions."
|
||||
*/
|
||||
mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
|
||||
|
||||
local_irq_restore(flags);
|
||||
mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E);
|
||||
}
|
||||
|
||||
static void tau_timeout_smp(struct timer_list *unused)
|
||||
static struct workqueue_struct *tau_workq;
|
||||
|
||||
static void tau_work_func(struct work_struct *work)
|
||||
{
|
||||
|
||||
/* schedule ourselves to be run again */
|
||||
mod_timer(&tau_timer, jiffies + shrink_timer) ;
|
||||
msleep(shrink_timer);
|
||||
on_each_cpu(tau_timeout, NULL, 0);
|
||||
/* schedule ourselves to be run again */
|
||||
queue_work(tau_workq, work);
|
||||
}
|
||||
|
||||
DECLARE_WORK(tau_work, tau_work_func);
|
||||
|
||||
/*
|
||||
* setup the TAU
|
||||
*
|
||||
|
@ -231,21 +200,19 @@ static int __init TAU_init(void)
|
|||
return 1;
|
||||
}
|
||||
|
||||
tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
|
||||
!strcmp(cur_cpu_spec->platform, "ppc750");
|
||||
|
||||
/* first, set up the window shrinking timer */
|
||||
timer_setup(&tau_timer, tau_timeout_smp, 0);
|
||||
tau_timer.expires = jiffies + shrink_timer;
|
||||
add_timer(&tau_timer);
|
||||
tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0);
|
||||
if (!tau_workq)
|
||||
return -ENOMEM;
|
||||
|
||||
on_each_cpu(TAU_init_smp, NULL, 0);
|
||||
|
||||
printk("Thermal assist unit ");
|
||||
#ifdef CONFIG_TAU_INT
|
||||
printk("using interrupts, ");
|
||||
#else
|
||||
printk("using timers, ");
|
||||
#endif
|
||||
printk("shrink_timer: %d jiffies\n", shrink_timer);
|
||||
queue_work(tau_workq, &tau_work);
|
||||
|
||||
pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n",
|
||||
tau_int_enable ? "interrupts" : "workqueue", shrink_timer);
|
||||
tau_initialized = 1;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -639,19 +639,29 @@ static void do_exit_flush_lazy_tlb(void *arg)
|
|||
struct mm_struct *mm = arg;
|
||||
unsigned long pid = mm->context.id;
|
||||
|
||||
/*
|
||||
* A kthread could have done a mmget_not_zero() after the flushing CPU
|
||||
* checked mm_is_singlethreaded, and be in the process of
|
||||
* kthread_use_mm when interrupted here. In that case, current->mm will
|
||||
* be set to mm, because kthread_use_mm() setting ->mm and switching to
|
||||
* the mm is done with interrupts off.
|
||||
*/
|
||||
if (current->mm == mm)
|
||||
return; /* Local CPU */
|
||||
goto out_flush;
|
||||
|
||||
if (current->active_mm == mm) {
|
||||
/*
|
||||
* Must be a kernel thread because sender is single-threaded.
|
||||
*/
|
||||
BUG_ON(current->mm);
|
||||
WARN_ON_ONCE(current->mm != NULL);
|
||||
/* Is a kernel thread and is using mm as the lazy tlb */
|
||||
mmgrab(&init_mm);
|
||||
switch_mm(mm, &init_mm, current);
|
||||
current->active_mm = &init_mm;
|
||||
switch_mm_irqs_off(mm, &init_mm, current);
|
||||
mmdrop(mm);
|
||||
}
|
||||
|
||||
atomic_dec(&mm->context.active_cpus);
|
||||
cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm));
|
||||
|
||||
out_flush:
|
||||
_tlbiel_pid(pid, RIC_FLUSH_ALL);
|
||||
}
|
||||
|
||||
|
@ -666,7 +676,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
|
|||
*/
|
||||
smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
|
||||
(void *)mm, 1);
|
||||
mm_reset_thread_local(mm);
|
||||
}
|
||||
|
||||
void radix__flush_tlb_mm(struct mm_struct *mm)
|
||||
|
|
|
@ -362,10 +362,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
|
|||
if (!drmem_info->lmbs)
|
||||
return;
|
||||
|
||||
for_each_drmem_lmb(lmb) {
|
||||
for_each_drmem_lmb(lmb)
|
||||
read_drconf_v1_cell(lmb, &prop);
|
||||
lmb_set_nid(lmb);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init init_drmem_v2_lmbs(const __be32 *prop)
|
||||
|
@ -410,8 +408,6 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
|
|||
|
||||
lmb->aa_index = dr_cell.aa_index;
|
||||
lmb->flags = dr_cell.flags;
|
||||
|
||||
lmb_set_nid(lmb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ REQUEST(__field(0, 8, partition_id)
|
|||
|
||||
#define REQUEST_NAME system_performance_capabilities
|
||||
#define REQUEST_NUM 0x40
|
||||
#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
|
||||
#define REQUEST_IDX_KIND "starting_index=0xffffffff"
|
||||
#include I(REQUEST_BEGIN)
|
||||
REQUEST(__field(0, 1, perf_collect_privileged)
|
||||
__field(0x1, 1, capability_mask)
|
||||
|
@ -223,7 +223,7 @@ REQUEST(__field(0, 2, partition_id)
|
|||
|
||||
#define REQUEST_NAME system_hypervisor_times
|
||||
#define REQUEST_NUM 0xF0
|
||||
#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
|
||||
#define REQUEST_IDX_KIND "starting_index=0xffffffff"
|
||||
#include I(REQUEST_BEGIN)
|
||||
REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
|
||||
__count(0x8, 8, time_spent_processing_virtual_processor_timers)
|
||||
|
@ -234,7 +234,7 @@ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
|
|||
|
||||
#define REQUEST_NAME system_tlbie_count_and_time
|
||||
#define REQUEST_NUM 0xF4
|
||||
#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
|
||||
#define REQUEST_IDX_KIND "starting_index=0xffffffff"
|
||||
#include I(REQUEST_BEGIN)
|
||||
REQUEST(__count(0, 8, tlbie_instructions_issued)
|
||||
/*
|
||||
|
|
|
@ -269,6 +269,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
|
|||
|
||||
mask |= CNST_PMC_MASK(pmc);
|
||||
value |= CNST_PMC_VAL(pmc);
|
||||
|
||||
/*
|
||||
* PMC5 and PMC6 are used to count cycles and instructions and
|
||||
* they do not support most of the constraint bits. Add a check
|
||||
* to exclude PMC5/6 from most of the constraints except for
|
||||
* EBB/BHRB.
|
||||
*/
|
||||
if (pmc >= 5)
|
||||
goto ebb_bhrb;
|
||||
}
|
||||
|
||||
if (pmc <= 4) {
|
||||
|
@ -335,6 +344,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
|
|||
}
|
||||
}
|
||||
|
||||
ebb_bhrb:
|
||||
if (!pmc && ebb)
|
||||
/* EBB events must specify the PMC */
|
||||
return -1;
|
||||
|
|
|
@ -219,12 +219,11 @@ config TAU
|
|||
temperature within 2-4 degrees Celsius. This option shows the current
|
||||
on-die temperature in /proc/cpuinfo if the cpu supports it.
|
||||
|
||||
Unfortunately, on some chip revisions, this sensor is very inaccurate
|
||||
and in many cases, does not work at all, so don't assume the cpu
|
||||
temp is actually what /proc/cpuinfo says it is.
|
||||
Unfortunately, this sensor is very inaccurate when uncalibrated, so
|
||||
don't assume the cpu temp is actually what /proc/cpuinfo says it is.
|
||||
|
||||
config TAU_INT
|
||||
bool "Interrupt driven TAU driver (DANGEROUS)"
|
||||
bool "Interrupt driven TAU driver (EXPERIMENTAL)"
|
||||
depends on TAU
|
||||
---help---
|
||||
The TAU supports an interrupt driven mode which causes an interrupt
|
||||
|
@ -232,12 +231,7 @@ config TAU_INT
|
|||
to get notified the temp has exceeded a range. With this option off,
|
||||
a timer is used to re-check the temperature periodically.
|
||||
|
||||
However, on some cpus it appears that the TAU interrupt hardware
|
||||
is buggy and can cause a situation which would lead unexplained hard
|
||||
lockups.
|
||||
|
||||
Unless you are extending the TAU driver, or enjoy kernel/hardware
|
||||
debugging, leave this option off.
|
||||
If in doubt, say N here.
|
||||
|
||||
config TAU_AVERAGE
|
||||
bool "Average high and low temp"
|
||||
|
|
|
@ -318,15 +318,14 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
|
|||
return count;
|
||||
}
|
||||
|
||||
static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
|
||||
uint32_t type)
|
||||
static void create_dump_obj(uint32_t id, size_t size, uint32_t type)
|
||||
{
|
||||
struct dump_obj *dump;
|
||||
int rc;
|
||||
|
||||
dump = kzalloc(sizeof(*dump), GFP_KERNEL);
|
||||
if (!dump)
|
||||
return NULL;
|
||||
return;
|
||||
|
||||
dump->kobj.kset = dump_kset;
|
||||
|
||||
|
@ -346,21 +345,39 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
|
|||
rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
|
||||
if (rc) {
|
||||
kobject_put(&dump->kobj);
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* As soon as the sysfs file for this dump is created/activated there is
|
||||
* a chance the opal_errd daemon (or any userspace) might read and
|
||||
* acknowledge the dump before kobject_uevent() is called. If that
|
||||
* happens then there is a potential race between
|
||||
* dump_ack_store->kobject_put() and kobject_uevent() which leads to a
|
||||
* use-after-free of a kernfs object resulting in a kernel crash.
|
||||
*
|
||||
* To avoid that, we need to take a reference on behalf of the bin file,
|
||||
* so that our reference remains valid while we call kobject_uevent().
|
||||
* We then drop our reference before exiting the function, leaving the
|
||||
* bin file to drop the last reference (if it hasn't already).
|
||||
*/
|
||||
|
||||
/* Take a reference for the bin file */
|
||||
kobject_get(&dump->kobj);
|
||||
rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
|
||||
if (rc) {
|
||||
if (rc == 0) {
|
||||
kobject_uevent(&dump->kobj, KOBJ_ADD);
|
||||
|
||||
pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
|
||||
__func__, dump->id, dump->size);
|
||||
} else {
|
||||
/* Drop reference count taken for bin file */
|
||||
kobject_put(&dump->kobj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
|
||||
__func__, dump->id, dump->size);
|
||||
|
||||
kobject_uevent(&dump->kobj, KOBJ_ADD);
|
||||
|
||||
return dump;
|
||||
/* Drop our reference */
|
||||
kobject_put(&dump->kobj);
|
||||
return;
|
||||
}
|
||||
|
||||
static irqreturn_t process_dump(int irq, void *data)
|
||||
|
|
|
@ -376,25 +376,32 @@ static int dlpar_add_lmb(struct drmem_lmb *);
|
|||
|
||||
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
|
||||
{
|
||||
struct memory_block *mem_block;
|
||||
unsigned long block_sz;
|
||||
int rc;
|
||||
|
||||
if (!lmb_is_removable(lmb))
|
||||
return -EINVAL;
|
||||
|
||||
mem_block = lmb_to_memblock(lmb);
|
||||
if (mem_block == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
rc = dlpar_offline_lmb(lmb);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
put_device(&mem_block->dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
block_sz = pseries_memory_block_size();
|
||||
|
||||
__remove_memory(lmb->nid, lmb->base_addr, block_sz);
|
||||
__remove_memory(mem_block->nid, lmb->base_addr, block_sz);
|
||||
put_device(&mem_block->dev);
|
||||
|
||||
/* Update memory regions for memory remove */
|
||||
memblock_remove(lmb->base_addr, block_sz);
|
||||
|
||||
invalidate_lmb_associativity_index(lmb);
|
||||
lmb_clear_nid(lmb);
|
||||
lmb->flags &= ~DRCONF_MEM_ASSIGNED;
|
||||
|
||||
return 0;
|
||||
|
@ -651,7 +658,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
|
|||
static int dlpar_add_lmb(struct drmem_lmb *lmb)
|
||||
{
|
||||
unsigned long block_sz;
|
||||
int rc;
|
||||
int nid, rc;
|
||||
|
||||
if (lmb->flags & DRCONF_MEM_ASSIGNED)
|
||||
return -EINVAL;
|
||||
|
@ -662,11 +669,13 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
|
|||
return rc;
|
||||
}
|
||||
|
||||
lmb_set_nid(lmb);
|
||||
block_sz = memory_block_size_bytes();
|
||||
|
||||
/* Find the node id for this address. */
|
||||
nid = memory_add_physaddr_to_nid(lmb->base_addr);
|
||||
|
||||
/* Add the memory */
|
||||
rc = __add_memory(lmb->nid, lmb->base_addr, block_sz);
|
||||
rc = __add_memory(nid, lmb->base_addr, block_sz);
|
||||
if (rc) {
|
||||
invalidate_lmb_associativity_index(lmb);
|
||||
return rc;
|
||||
|
@ -674,9 +683,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
|
|||
|
||||
rc = dlpar_online_lmb(lmb);
|
||||
if (rc) {
|
||||
__remove_memory(lmb->nid, lmb->base_addr, block_sz);
|
||||
__remove_memory(nid, lmb->base_addr, block_sz);
|
||||
invalidate_lmb_associativity_index(lmb);
|
||||
lmb_clear_nid(lmb);
|
||||
} else {
|
||||
lmb->flags |= DRCONF_MEM_ASSIGNED;
|
||||
}
|
||||
|
|
|
@ -494,18 +494,55 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
|
|||
return 0; /* need to perform reset */
|
||||
}
|
||||
|
||||
static int mce_handle_err_realmode(int disposition, u8 error_type)
|
||||
{
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
if (disposition == RTAS_DISP_NOT_RECOVERED) {
|
||||
switch (error_type) {
|
||||
case MC_ERROR_TYPE_SLB:
|
||||
case MC_ERROR_TYPE_ERAT:
|
||||
/*
|
||||
* Store the old slb content in paca before flushing.
|
||||
* Print this when we go to virtual mode.
|
||||
* There are chances that we may hit MCE again if there
|
||||
* is a parity error on the SLB entry we trying to read
|
||||
* for saving. Hence limit the slb saving to single
|
||||
* level of recursion.
|
||||
*/
|
||||
if (local_paca->in_mce == 1)
|
||||
slb_save_contents(local_paca->mce_faulty_slbs);
|
||||
flush_and_reload_slb();
|
||||
disposition = RTAS_DISP_FULLY_RECOVERED;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
|
||||
/* Platform corrected itself but could be degraded */
|
||||
pr_err("MCE: limited recovery, system may be degraded\n");
|
||||
disposition = RTAS_DISP_FULLY_RECOVERED;
|
||||
}
|
||||
#endif
|
||||
return disposition;
|
||||
}
|
||||
|
||||
static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
|
||||
static int mce_handle_err_virtmode(struct pt_regs *regs,
|
||||
struct rtas_error_log *errp,
|
||||
struct pseries_mc_errorlog *mce_log,
|
||||
int disposition)
|
||||
{
|
||||
struct mce_error_info mce_err = { 0 };
|
||||
unsigned long eaddr = 0, paddr = 0;
|
||||
struct pseries_errorlog *pseries_log;
|
||||
struct pseries_mc_errorlog *mce_log;
|
||||
int disposition = rtas_error_disposition(errp);
|
||||
int initiator = rtas_error_initiator(errp);
|
||||
int severity = rtas_error_severity(errp);
|
||||
unsigned long eaddr = 0, paddr = 0;
|
||||
u8 error_type, err_sub_type;
|
||||
|
||||
if (!mce_log)
|
||||
goto out;
|
||||
|
||||
error_type = mce_log->error_type;
|
||||
err_sub_type = rtas_mc_error_sub_type(mce_log);
|
||||
|
||||
if (initiator == RTAS_INITIATOR_UNKNOWN)
|
||||
mce_err.initiator = MCE_INITIATOR_UNKNOWN;
|
||||
else if (initiator == RTAS_INITIATOR_CPU)
|
||||
|
@ -544,18 +581,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
|
|||
mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
|
||||
mce_err.error_class = MCE_ECLASS_UNKNOWN;
|
||||
|
||||
if (!rtas_error_extended(errp))
|
||||
goto out;
|
||||
|
||||
pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
|
||||
if (pseries_log == NULL)
|
||||
goto out;
|
||||
|
||||
mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
|
||||
error_type = mce_log->error_type;
|
||||
err_sub_type = rtas_mc_error_sub_type(mce_log);
|
||||
|
||||
switch (mce_log->error_type) {
|
||||
switch (error_type) {
|
||||
case MC_ERROR_TYPE_UE:
|
||||
mce_err.error_type = MCE_ERROR_TYPE_UE;
|
||||
switch (err_sub_type) {
|
||||
|
@ -652,37 +678,31 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
|
|||
mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
if (disposition == RTAS_DISP_NOT_RECOVERED) {
|
||||
switch (error_type) {
|
||||
case MC_ERROR_TYPE_SLB:
|
||||
case MC_ERROR_TYPE_ERAT:
|
||||
/*
|
||||
* Store the old slb content in paca before flushing.
|
||||
* Print this when we go to virtual mode.
|
||||
* There are chances that we may hit MCE again if there
|
||||
* is a parity error on the SLB entry we trying to read
|
||||
* for saving. Hence limit the slb saving to single
|
||||
* level of recursion.
|
||||
*/
|
||||
if (local_paca->in_mce == 1)
|
||||
slb_save_contents(local_paca->mce_faulty_slbs);
|
||||
flush_and_reload_slb();
|
||||
disposition = RTAS_DISP_FULLY_RECOVERED;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
|
||||
/* Platform corrected itself but could be degraded */
|
||||
printk(KERN_ERR "MCE: limited recovery, system may "
|
||||
"be degraded\n");
|
||||
disposition = RTAS_DISP_FULLY_RECOVERED;
|
||||
}
|
||||
#endif
|
||||
|
||||
out:
|
||||
save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
|
||||
&mce_err, regs->nip, eaddr, paddr);
|
||||
return disposition;
|
||||
}
|
||||
|
||||
static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
|
||||
{
|
||||
struct pseries_errorlog *pseries_log;
|
||||
struct pseries_mc_errorlog *mce_log = NULL;
|
||||
int disposition = rtas_error_disposition(errp);
|
||||
u8 error_type;
|
||||
|
||||
if (!rtas_error_extended(errp))
|
||||
goto out;
|
||||
|
||||
pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
|
||||
if (!pseries_log)
|
||||
goto out;
|
||||
|
||||
mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
|
||||
error_type = mce_log->error_type;
|
||||
|
||||
disposition = mce_handle_err_realmode(disposition, error_type);
|
||||
|
||||
/*
|
||||
* Enable translation as we will be accessing per-cpu variables
|
||||
* in save_mce_event() which may fall outside RMO region, also
|
||||
|
@ -693,10 +713,10 @@ out:
|
|||
* Note: All the realmode handling like flushing SLB entries for
|
||||
* SLB multihit is done by now.
|
||||
*/
|
||||
out:
|
||||
mtmsr(mfmsr() | MSR_IR | MSR_DR);
|
||||
save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
|
||||
&mce_err, regs->nip, eaddr, paddr);
|
||||
|
||||
disposition = mce_handle_err_virtmode(regs, errp, mce_log,
|
||||
disposition);
|
||||
return disposition;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ static __init int rng_init(void)
|
|||
|
||||
ppc_md.get_random_seed = pseries_get_random_long;
|
||||
|
||||
of_node_put(dn);
|
||||
return 0;
|
||||
}
|
||||
machine_subsys_initcall(pseries, rng_init);
|
||||
|
|
|
@ -174,6 +174,7 @@ int icp_hv_init(void)
|
|||
|
||||
icp_ops = &icp_hv_ops;
|
||||
|
||||
of_node_put(np);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -5,15 +5,6 @@
|
|||
#include "pgtable.h"
|
||||
#include "../string.h"
|
||||
|
||||
/*
|
||||
* __force_order is used by special_insns.h asm code to force instruction
|
||||
* serialization.
|
||||
*
|
||||
* It is not referenced from the code, but GCC < 5 with -fPIE would fail
|
||||
* due to an undefined symbol. Define it to make these ancient GCCs work.
|
||||
*/
|
||||
unsigned long __force_order;
|
||||
|
||||
#define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
|
||||
#define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */
|
||||
|
||||
|
|
|
@ -379,7 +379,7 @@ static __init int _init_events_attrs(void)
|
|||
while (amd_iommu_v2_event_descs[i].attr.attr.name)
|
||||
i++;
|
||||
|
||||
attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
|
||||
attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
|
||||
if (!attrs)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -669,9 +669,7 @@ unlock:
|
|||
|
||||
static inline void intel_pmu_drain_pebs_buffer(void)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
|
||||
x86_pmu.drain_pebs(®s);
|
||||
x86_pmu.drain_pebs(NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1736,6 +1734,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
|||
struct x86_perf_regs perf_regs;
|
||||
struct pt_regs *regs = &perf_regs.regs;
|
||||
void *at = get_next_pebs_record_by_bit(base, top, bit);
|
||||
struct pt_regs dummy_iregs;
|
||||
|
||||
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
|
||||
/*
|
||||
|
@ -1748,6 +1747,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
|||
} else if (!intel_pmu_save_and_restart(event))
|
||||
return;
|
||||
|
||||
if (!iregs)
|
||||
iregs = &dummy_iregs;
|
||||
|
||||
while (count > 1) {
|
||||
setup_sample(event, iregs, at, &data, regs);
|
||||
perf_event_output(event, &data, regs);
|
||||
|
@ -1757,16 +1759,22 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
|||
}
|
||||
|
||||
setup_sample(event, iregs, at, &data, regs);
|
||||
|
||||
/*
|
||||
* All but the last records are processed.
|
||||
* The last one is left to be able to call the overflow handler.
|
||||
*/
|
||||
if (perf_event_overflow(event, &data, regs)) {
|
||||
x86_pmu_stop(event, 0);
|
||||
return;
|
||||
if (iregs == &dummy_iregs) {
|
||||
/*
|
||||
* The PEBS records may be drained in the non-overflow context,
|
||||
* e.g., large PEBS + context switch. Perf should treat the
|
||||
* last record the same as other PEBS records, and doesn't
|
||||
* invoke the generic overflow handler.
|
||||
*/
|
||||
perf_event_output(event, &data, regs);
|
||||
} else {
|
||||
/*
|
||||
* All but the last records are processed.
|
||||
* The last one is left to be able to call the overflow handler.
|
||||
*/
|
||||
if (perf_event_overflow(event, &data, regs))
|
||||
x86_pmu_stop(event, 0);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
|
||||
|
|
|
@ -110,6 +110,10 @@
|
|||
#define ICL_UNC_CBO_0_PER_CTR0 0x702
|
||||
#define ICL_UNC_CBO_MSR_OFFSET 0x8
|
||||
|
||||
/* ICL ARB register */
|
||||
#define ICL_UNC_ARB_PER_CTR 0x3b1
|
||||
#define ICL_UNC_ARB_PERFEVTSEL 0x3b3
|
||||
|
||||
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
|
||||
|
@ -297,15 +301,21 @@ void skl_uncore_cpu_init(void)
|
|||
snb_uncore_arb.ops = &skl_uncore_msr_ops;
|
||||
}
|
||||
|
||||
static struct intel_uncore_ops icl_uncore_msr_ops = {
|
||||
.disable_event = snb_uncore_msr_disable_event,
|
||||
.enable_event = snb_uncore_msr_enable_event,
|
||||
.read_counter = uncore_msr_read_counter,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type icl_uncore_cbox = {
|
||||
.name = "cbox",
|
||||
.num_counters = 4,
|
||||
.num_counters = 2,
|
||||
.perf_ctr_bits = 44,
|
||||
.perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
|
||||
.event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
|
||||
.event_mask = SNB_UNC_RAW_EVENT_MASK,
|
||||
.msr_offset = ICL_UNC_CBO_MSR_OFFSET,
|
||||
.ops = &skl_uncore_msr_ops,
|
||||
.ops = &icl_uncore_msr_ops,
|
||||
.format_group = &snb_uncore_format_group,
|
||||
};
|
||||
|
||||
|
@ -334,13 +344,25 @@ static struct intel_uncore_type icl_uncore_clockbox = {
|
|||
.single_fixed = 1,
|
||||
.event_mask = SNB_UNC_CTL_EV_SEL_MASK,
|
||||
.format_group = &icl_uncore_clock_format_group,
|
||||
.ops = &skl_uncore_msr_ops,
|
||||
.ops = &icl_uncore_msr_ops,
|
||||
.event_descs = icl_uncore_events,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type icl_uncore_arb = {
|
||||
.name = "arb",
|
||||
.num_counters = 1,
|
||||
.num_boxes = 1,
|
||||
.perf_ctr_bits = 44,
|
||||
.perf_ctr = ICL_UNC_ARB_PER_CTR,
|
||||
.event_ctl = ICL_UNC_ARB_PERFEVTSEL,
|
||||
.event_mask = SNB_UNC_RAW_EVENT_MASK,
|
||||
.ops = &icl_uncore_msr_ops,
|
||||
.format_group = &snb_uncore_format_group,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type *icl_msr_uncores[] = {
|
||||
&icl_uncore_cbox,
|
||||
&snb_uncore_arb,
|
||||
&icl_uncore_arb,
|
||||
&icl_uncore_clockbox,
|
||||
NULL,
|
||||
};
|
||||
|
@ -358,7 +380,6 @@ void icl_uncore_cpu_init(void)
|
|||
{
|
||||
uncore_msr_uncores = icl_msr_uncores;
|
||||
icl_uncore_cbox.num_boxes = icl_get_cbox_num();
|
||||
snb_uncore_arb.ops = &skl_uncore_msr_ops;
|
||||
}
|
||||
|
||||
enum {
|
||||
|
|
|
@ -10,45 +10,47 @@
|
|||
#include <linux/jump_label.h>
|
||||
|
||||
/*
|
||||
* Volatile isn't enough to prevent the compiler from reordering the
|
||||
* read/write functions for the control registers and messing everything up.
|
||||
* A memory clobber would solve the problem, but would prevent reordering of
|
||||
* all loads stores around it, which can hurt performance. Solution is to
|
||||
* use a variable and mimic reads and writes to it to enforce serialization
|
||||
* The compiler should not reorder volatile asm statements with respect to each
|
||||
* other: they should execute in program order. However GCC 4.9.x and 5.x have
|
||||
* a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder
|
||||
* volatile asm. The write functions are not affected since they have memory
|
||||
* clobbers preventing reordering. To prevent reads from being reordered with
|
||||
* respect to writes, use a dummy memory operand.
|
||||
*/
|
||||
extern unsigned long __force_order;
|
||||
|
||||
#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL)
|
||||
|
||||
void native_write_cr0(unsigned long val);
|
||||
|
||||
static inline unsigned long native_read_cr0(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
|
||||
asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER);
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline unsigned long native_read_cr2(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
|
||||
asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER);
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void native_write_cr2(unsigned long val)
|
||||
{
|
||||
asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
|
||||
asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
|
||||
}
|
||||
|
||||
static inline unsigned long __native_read_cr3(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
|
||||
asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void native_write_cr3(unsigned long val)
|
||||
{
|
||||
asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
|
||||
asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
|
||||
}
|
||||
|
||||
static inline unsigned long native_read_cr4(void)
|
||||
|
@ -63,10 +65,10 @@ static inline unsigned long native_read_cr4(void)
|
|||
asm volatile("1: mov %%cr4, %0\n"
|
||||
"2:\n"
|
||||
_ASM_EXTABLE(1b, 2b)
|
||||
: "=r" (val), "=m" (__force_order) : "0" (0));
|
||||
: "=r" (val) : "0" (0), __FORCE_ORDER);
|
||||
#else
|
||||
/* CR4 always exists on x86_64. */
|
||||
asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
|
||||
asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER);
|
||||
#endif
|
||||
return val;
|
||||
}
|
||||
|
|
|
@ -377,7 +377,7 @@ void native_write_cr0(unsigned long val)
|
|||
unsigned long bits_missing = 0;
|
||||
|
||||
set_register:
|
||||
asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order));
|
||||
asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
|
||||
|
||||
if (static_branch_likely(&cr_pinning)) {
|
||||
if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
|
||||
|
@ -396,7 +396,7 @@ void native_write_cr4(unsigned long val)
|
|||
unsigned long bits_changed = 0;
|
||||
|
||||
set_register:
|
||||
asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
|
||||
asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
|
||||
|
||||
if (static_branch_likely(&cr_pinning)) {
|
||||
if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
|
||||
|
|
|
@ -388,10 +388,28 @@ static int msr_to_offset(u32 msr)
|
|||
return -1;
|
||||
}
|
||||
|
||||
__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
|
||||
struct pt_regs *regs, int trapnr,
|
||||
unsigned long error_code,
|
||||
unsigned long fault_addr)
|
||||
{
|
||||
pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
|
||||
(unsigned int)regs->cx, regs->ip, (void *)regs->ip);
|
||||
|
||||
show_stack_regs(regs);
|
||||
|
||||
panic("MCA architectural violation!\n");
|
||||
|
||||
while (true)
|
||||
cpu_relax();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* MSR access wrappers used for error injection */
|
||||
static u64 mce_rdmsrl(u32 msr)
|
||||
{
|
||||
u64 v;
|
||||
DECLARE_ARGS(val, low, high);
|
||||
|
||||
if (__this_cpu_read(injectm.finished)) {
|
||||
int offset = msr_to_offset(msr);
|
||||
|
@ -401,21 +419,43 @@ static u64 mce_rdmsrl(u32 msr)
|
|||
return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
|
||||
}
|
||||
|
||||
if (rdmsrl_safe(msr, &v)) {
|
||||
WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
|
||||
/*
|
||||
* Return zero in case the access faulted. This should
|
||||
* not happen normally but can happen if the CPU does
|
||||
* something weird, or if the code is buggy.
|
||||
*/
|
||||
v = 0;
|
||||
}
|
||||
/*
|
||||
* RDMSR on MCA MSRs should not fault. If they do, this is very much an
|
||||
* architectural violation and needs to be reported to hw vendor. Panic
|
||||
* the box to not allow any further progress.
|
||||
*/
|
||||
asm volatile("1: rdmsr\n"
|
||||
"2:\n"
|
||||
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault)
|
||||
: EAX_EDX_RET(val, low, high) : "c" (msr));
|
||||
|
||||
return v;
|
||||
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
|
||||
struct pt_regs *regs, int trapnr,
|
||||
unsigned long error_code,
|
||||
unsigned long fault_addr)
|
||||
{
|
||||
pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
|
||||
(unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
|
||||
regs->ip, (void *)regs->ip);
|
||||
|
||||
show_stack_regs(regs);
|
||||
|
||||
panic("MCA architectural violation!\n");
|
||||
|
||||
while (true)
|
||||
cpu_relax();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void mce_wrmsrl(u32 msr, u64 v)
|
||||
{
|
||||
u32 low, high;
|
||||
|
||||
if (__this_cpu_read(injectm.finished)) {
|
||||
int offset = msr_to_offset(msr);
|
||||
|
||||
|
@ -423,7 +463,15 @@ static void mce_wrmsrl(u32 msr, u64 v)
|
|||
*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
|
||||
return;
|
||||
}
|
||||
wrmsrl(msr, v);
|
||||
|
||||
low = (u32)v;
|
||||
high = (u32)(v >> 32);
|
||||
|
||||
/* See comment in mce_rdmsrl() */
|
||||
asm volatile("1: wrmsr\n"
|
||||
"2:\n"
|
||||
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault)
|
||||
: : "c" (msr), "a"(low), "d" (high) : "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -172,4 +172,14 @@ extern bool amd_filter_mce(struct mce *m);
|
|||
static inline bool amd_filter_mce(struct mce *m) { return false; };
|
||||
#endif
|
||||
|
||||
__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
|
||||
struct pt_regs *regs, int trapnr,
|
||||
unsigned long error_code,
|
||||
unsigned long fault_addr);
|
||||
|
||||
__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
|
||||
struct pt_regs *regs, int trapnr,
|
||||
unsigned long error_code,
|
||||
unsigned long fault_addr);
|
||||
|
||||
#endif /* __X86_MCE_INTERNAL_H__ */
|
||||
|
|
|
@ -9,9 +9,11 @@
|
|||
#include <linux/seq_file.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <asm/mce.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/mce.h>
|
||||
#include <asm/intel-family.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
|
@ -40,9 +42,14 @@ static struct severity {
|
|||
unsigned char context;
|
||||
unsigned char excp;
|
||||
unsigned char covered;
|
||||
unsigned char cpu_model;
|
||||
unsigned char cpu_minstepping;
|
||||
unsigned char bank_lo, bank_hi;
|
||||
char *msg;
|
||||
} severities[] = {
|
||||
#define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
|
||||
#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
|
||||
#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s
|
||||
#define KERNEL .context = IN_KERNEL
|
||||
#define USER .context = IN_USER
|
||||
#define KERNEL_RECOV .context = IN_KERNEL_RECOV
|
||||
|
@ -97,7 +104,6 @@ static struct severity {
|
|||
KEEP, "Corrected error",
|
||||
NOSER, BITCLR(MCI_STATUS_UC)
|
||||
),
|
||||
|
||||
/*
|
||||
* known AO MCACODs reported via MCE or CMC:
|
||||
*
|
||||
|
@ -113,6 +119,18 @@ static struct severity {
|
|||
AO, "Action optional: last level cache writeback error",
|
||||
SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
|
||||
),
|
||||
/*
|
||||
* Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured
|
||||
* to report uncorrected errors using CMCI with a special signature.
|
||||
* UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported
|
||||
* in one of the memory controller banks.
|
||||
* Set severity to "AO" for same action as normal patrol scrub error.
|
||||
*/
|
||||
MCESEV(
|
||||
AO, "Uncorrected Patrol Scrub Error",
|
||||
SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
|
||||
MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18)
|
||||
),
|
||||
|
||||
/* ignore OVER for UCNA */
|
||||
MCESEV(
|
||||
|
@ -320,6 +338,12 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
|
|||
continue;
|
||||
if (s->excp && excp != s->excp)
|
||||
continue;
|
||||
if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model)
|
||||
continue;
|
||||
if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
|
||||
continue;
|
||||
if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi))
|
||||
continue;
|
||||
if (msg)
|
||||
*msg = s->msg;
|
||||
s->covered = 1;
|
||||
|
|
|
@ -242,9 +242,9 @@ static void __init fpu__init_system_ctx_switch(void)
|
|||
*/
|
||||
static void __init fpu__init_parse_early_param(void)
|
||||
{
|
||||
char arg[32];
|
||||
char arg[128];
|
||||
char *argptr = arg;
|
||||
int bit;
|
||||
int arglen, res, bit;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
if (cmdline_find_option_bool(boot_command_line, "no387"))
|
||||
|
@ -267,12 +267,26 @@ static void __init fpu__init_parse_early_param(void)
|
|||
if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
|
||||
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
|
||||
|
||||
if (cmdline_find_option(boot_command_line, "clearcpuid", arg,
|
||||
sizeof(arg)) &&
|
||||
get_option(&argptr, &bit) &&
|
||||
bit >= 0 &&
|
||||
bit < NCAPINTS * 32)
|
||||
setup_clear_cpu_cap(bit);
|
||||
arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
|
||||
if (arglen <= 0)
|
||||
return;
|
||||
|
||||
pr_info("Clearing CPUID bits:");
|
||||
do {
|
||||
res = get_option(&argptr, &bit);
|
||||
if (res == 0 || res == 3)
|
||||
break;
|
||||
|
||||
/* If the argument was too long, the last bit may be cut off */
|
||||
if (res == 1 && arglen >= sizeof(arg))
|
||||
break;
|
||||
|
||||
if (bit >= 0 && bit < NCAPINTS * 32) {
|
||||
pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
|
||||
setup_clear_cpu_cap(bit);
|
||||
}
|
||||
} while (res == 2);
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -106,7 +106,6 @@ fs_initcall(nmi_warning_debugfs);
|
|||
|
||||
static void nmi_check_duration(struct nmiaction *action, u64 duration)
|
||||
{
|
||||
u64 whole_msecs = READ_ONCE(action->max_duration);
|
||||
int remainder_ns, decimal_msecs;
|
||||
|
||||
if (duration < nmi_longest_ns || duration < action->max_duration)
|
||||
|
@ -114,12 +113,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
|
|||
|
||||
action->max_duration = duration;
|
||||
|
||||
remainder_ns = do_div(whole_msecs, (1000 * 1000));
|
||||
remainder_ns = do_div(duration, (1000 * 1000));
|
||||
decimal_msecs = remainder_ns / 1000;
|
||||
|
||||
printk_ratelimited(KERN_INFO
|
||||
"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
|
||||
action->handler, whole_msecs, decimal_msecs);
|
||||
action->handler, duration, decimal_msecs);
|
||||
}
|
||||
|
||||
static int nmi_handle(unsigned int type, struct pt_regs *regs)
|
||||
|
|
|
@ -3617,7 +3617,7 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
|
|||
u64 tsc_aux = 0;
|
||||
|
||||
if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
|
||||
return emulate_gp(ctxt, 0);
|
||||
return emulate_ud(ctxt);
|
||||
ctxt->dst.val = tsc_aux;
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
|
|
@ -6453,6 +6453,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
|
|||
cond_resched_lock(&kvm->mmu_lock);
|
||||
}
|
||||
}
|
||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
||||
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
srcu_read_unlock(&kvm->srcu, rcu_idx);
|
||||
|
|
|
@ -5383,6 +5383,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
* - Tell IOMMU to use legacy mode for this interrupt.
|
||||
* - Retrieve ga_tag of prior interrupt remapping data.
|
||||
*/
|
||||
pi.prev_ga_tag = 0;
|
||||
pi.is_guest_mode = false;
|
||||
ret = irq_set_vcpu_affinity(host_irq, &pi);
|
||||
|
||||
|
|
|
@ -2231,6 +2231,8 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
|
|||
vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
|
||||
vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
|
||||
vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
|
||||
|
||||
vmx->segment_cache.bitmask = 0;
|
||||
}
|
||||
|
||||
if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
|
||||
|
@ -3094,8 +3096,10 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
|
|||
prepare_vmcs02_early(vmx, vmcs12);
|
||||
|
||||
if (from_vmentry) {
|
||||
if (unlikely(!nested_get_vmcs12_pages(vcpu)))
|
||||
if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
|
||||
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
||||
return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
if (nested_vmx_check_vmentry_hw(vcpu)) {
|
||||
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
||||
|
|
|
@ -743,11 +743,10 @@ static void handle_bad_sector(struct bio *bio, sector_t maxsector)
|
|||
{
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
printk(KERN_INFO "attempt to access beyond end of device\n");
|
||||
printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
|
||||
bio_devname(bio, b), bio->bi_opf,
|
||||
(unsigned long long)bio_end_sector(bio),
|
||||
(long long)maxsector);
|
||||
pr_info_ratelimited("attempt to access beyond end of device\n"
|
||||
"%s: rw=%d, want=%llu, limit=%llu\n",
|
||||
bio_devname(bio, b), bio->bi_opf,
|
||||
bio_end_sector(bio), maxsector);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
|
|
|
@ -36,8 +36,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
|
|||
struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
|
||||
kobj);
|
||||
|
||||
cancel_delayed_work_sync(&hctx->run_work);
|
||||
|
||||
if (hctx->flags & BLK_MQ_F_BLOCKING)
|
||||
cleanup_srcu_struct(hctx->srcu);
|
||||
blk_free_flush_queue(hctx->fq);
|
||||
|
|
|
@ -891,9 +891,16 @@ static void __blk_release_queue(struct work_struct *work)
|
|||
|
||||
blk_free_queue_stats(q->stats);
|
||||
|
||||
if (queue_is_mq(q))
|
||||
if (queue_is_mq(q)) {
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&q->requeue_work);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
cancel_delayed_work_sync(&hctx->run_work);
|
||||
}
|
||||
|
||||
blk_exit_queue(q);
|
||||
|
||||
blk_queue_free_zone_bitmaps(q);
|
||||
|
|
|
@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
|
|||
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
|
||||
|
||||
skcipher_request_set_sync_tfm(skreq, null_tfm);
|
||||
skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
|
||||
|
||||
|
@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
|||
areq->outlen = outlen;
|
||||
|
||||
aead_request_set_callback(&areq->cra_u.aead_req,
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
af_alg_async_cb, areq);
|
||||
err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
|
||||
crypto_aead_decrypt(&areq->cra_u.aead_req);
|
||||
|
||||
/* AIO operation in progress */
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
if (err == -EINPROGRESS)
|
||||
return -EIOCBQUEUED;
|
||||
|
||||
sock_put(sk);
|
||||
} else {
|
||||
/* Synchronous operation */
|
||||
aead_request_set_callback(&areq->cra_u.aead_req,
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP |
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
crypto_req_done, &ctx->wait);
|
||||
err = crypto_wait_req(ctx->enc ?
|
||||
|
|
|
@ -123,7 +123,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
|||
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
|
||||
|
||||
/* AIO operation in progress */
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
if (err == -EINPROGRESS)
|
||||
return -EIOCBQUEUED;
|
||||
|
||||
sock_put(sk);
|
||||
|
|
|
@ -227,7 +227,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
|
|||
struct binder_work {
|
||||
struct list_head entry;
|
||||
|
||||
enum {
|
||||
enum binder_work_type {
|
||||
BINDER_WORK_TRANSACTION = 1,
|
||||
BINDER_WORK_TRANSACTION_COMPLETE,
|
||||
BINDER_WORK_RETURN_ERROR,
|
||||
|
@ -889,27 +889,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked(
|
|||
return w;
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_dequeue_work_head() - Dequeues the item at head of list
|
||||
* @proc: binder_proc associated with list
|
||||
* @list: list to dequeue head
|
||||
*
|
||||
* Removes the head of the list if there are items on the list
|
||||
*
|
||||
* Return: pointer dequeued binder_work, NULL if list was empty
|
||||
*/
|
||||
static struct binder_work *binder_dequeue_work_head(
|
||||
struct binder_proc *proc,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct binder_work *w;
|
||||
|
||||
binder_inner_proc_lock(proc);
|
||||
w = binder_dequeue_work_head_ilocked(list);
|
||||
binder_inner_proc_unlock(proc);
|
||||
return w;
|
||||
}
|
||||
|
||||
static void
|
||||
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
|
||||
static void binder_free_thread(struct binder_thread *thread);
|
||||
|
@ -2347,8 +2326,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
|||
* file is done when the transaction is torn
|
||||
* down.
|
||||
*/
|
||||
WARN_ON(failed_at &&
|
||||
proc->tsk == current->group_leader);
|
||||
} break;
|
||||
case BINDER_TYPE_PTR:
|
||||
/*
|
||||
|
@ -4591,13 +4568,17 @@ static void binder_release_work(struct binder_proc *proc,
|
|||
struct list_head *list)
|
||||
{
|
||||
struct binder_work *w;
|
||||
enum binder_work_type wtype;
|
||||
|
||||
while (1) {
|
||||
w = binder_dequeue_work_head(proc, list);
|
||||
binder_inner_proc_lock(proc);
|
||||
w = binder_dequeue_work_head_ilocked(list);
|
||||
wtype = w ? w->type : 0;
|
||||
binder_inner_proc_unlock(proc);
|
||||
if (!w)
|
||||
return;
|
||||
|
||||
switch (w->type) {
|
||||
switch (wtype) {
|
||||
case BINDER_WORK_TRANSACTION: {
|
||||
struct binder_transaction *t;
|
||||
|
||||
|
@ -4631,9 +4612,11 @@ static void binder_release_work(struct binder_proc *proc,
|
|||
kfree(death);
|
||||
binder_stats_deleted(BINDER_STAT_DEATH);
|
||||
} break;
|
||||
case BINDER_WORK_NODE:
|
||||
break;
|
||||
default:
|
||||
pr_err("unexpected work type, %d, not freed\n",
|
||||
w->type);
|
||||
wtype);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2664,6 +2664,7 @@ static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
|
|||
buf = kmalloc(size, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
kfree(dr);
|
||||
usb_free_urb(urb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -538,6 +538,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
|
|||
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
|
||||
percpu_up_write(&hu->proto_lock);
|
||||
|
||||
cancel_work_sync(&hu->init_ready);
|
||||
cancel_work_sync(&hu->write_work);
|
||||
|
||||
if (hdev) {
|
||||
|
|
|
@ -357,6 +357,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
|
|||
struct hci_dev *hdev = hu->hdev;
|
||||
|
||||
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
|
||||
|
||||
cancel_work_sync(&hu->init_ready);
|
||||
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
hci_unregister_dev(hdev);
|
||||
hci_free_dev(hdev);
|
||||
|
|
|
@ -1977,7 +1977,7 @@ static int try_smi_init(struct smi_info *new_smi)
|
|||
/* Do this early so it's available for logs. */
|
||||
if (!new_smi->io.dev) {
|
||||
pr_err("IPMI interface added with no device\n");
|
||||
rv = EIO;
|
||||
rv = -EIO;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
|
|
|
@ -437,12 +437,17 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
|
|||
return -EINVAL;
|
||||
|
||||
regmap_read(regmap, AT91_CKGR_MOR, &tmp);
|
||||
tmp &= ~MOR_KEY_MASK;
|
||||
|
||||
if (index && !(tmp & AT91_PMC_MOSCSEL))
|
||||
regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
|
||||
tmp = AT91_PMC_MOSCSEL;
|
||||
else if (!index && (tmp & AT91_PMC_MOSCSEL))
|
||||
regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
|
||||
tmp = 0;
|
||||
else
|
||||
return 0;
|
||||
|
||||
regmap_update_bits(regmap, AT91_CKGR_MOR,
|
||||
AT91_PMC_MOSCSEL | MOR_KEY_MASK,
|
||||
tmp | AT91_PMC_KEY);
|
||||
|
||||
while (!clk_sam9x5_main_ready(regmap))
|
||||
cpu_relax();
|
||||
|
|
|
@ -1336,8 +1336,10 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
|
|||
pll->hw.init = &init;
|
||||
|
||||
ret = devm_clk_hw_register(cprman->dev, &pll->hw);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(pll);
|
||||
return NULL;
|
||||
}
|
||||
return &pll->hw;
|
||||
}
|
||||
|
||||
|
|
|
@ -162,10 +162,10 @@ static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys
|
|||
"audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
|
||||
|
||||
static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
|
||||
"audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
|
||||
"sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
|
||||
|
||||
static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
|
||||
"audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
|
||||
"sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
|
||||
|
||||
static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out",
|
||||
"video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
|
||||
|
|
|
@ -522,7 +522,7 @@ static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider)
|
|||
np = of_find_node_with_property(np, *clk_name);
|
||||
if (!np) {
|
||||
clk_name++;
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!of_device_is_available(np))
|
||||
|
|
|
@ -919,6 +919,8 @@ static const struct mtk_gate infra_clks[] = {
|
|||
"pwm_sel", 19),
|
||||
GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm",
|
||||
"pwm_sel", 21),
|
||||
GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0",
|
||||
"uart_sel", 22),
|
||||
GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1",
|
||||
"uart_sel", 23),
|
||||
GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2",
|
||||
|
|
|
@ -298,6 +298,17 @@ static struct clk_regmap g12a_fclk_div2 = {
|
|||
&g12a_fclk_div2_div.hw
|
||||
},
|
||||
.num_parents = 1,
|
||||
/*
|
||||
* Similar to fclk_div3, it seems that this clock is used by
|
||||
* the resident firmware and is required by the platform to
|
||||
* operate correctly.
|
||||
* Until the following condition are met, we need this clock to
|
||||
* be marked as critical:
|
||||
* a) Mark the clock used by a firmware resource, if possible
|
||||
* b) CCF has a clock hand-off mechanism to make the sure the
|
||||
* clock stays on until the proper driver comes along
|
||||
*/
|
||||
.flags = CLK_IS_CRITICAL,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -666,7 +666,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
|
|||
.cmd_rcgr = 0x48044,
|
||||
.mnd_width = 0,
|
||||
.hid_width = 5,
|
||||
.parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
|
||||
.parent_map = gcc_parent_map_xo_gpll0,
|
||||
.freq_tbl = ftbl_hmss_rbcpr_clk_src,
|
||||
.clkr.hw.init = &(struct clk_init_data){
|
||||
.name = "hmss_rbcpr_clk_src",
|
||||
|
|
|
@ -167,7 +167,7 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
|
|||
unsigned long flags,
|
||||
spinlock_t *lock)
|
||||
{
|
||||
struct clk *clk;
|
||||
struct clk *clk = ERR_PTR(-ENOMEM);
|
||||
struct clk_mux *mux = NULL;
|
||||
struct clk_gate *gate = NULL;
|
||||
struct clk_divider *div = NULL;
|
||||
|
|
|
@ -484,6 +484,12 @@ remove_opp:
|
|||
/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
|
||||
late_initcall(armada37xx_cpufreq_driver_init);
|
||||
|
||||
static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
|
||||
{ .compatible = "marvell,armada-3700-nb-pm" },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
|
||||
|
||||
MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
|
||||
MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -884,12 +884,15 @@ static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
|
|||
unsigned long action, void *unused)
|
||||
{
|
||||
int cpu;
|
||||
struct cpufreq_policy cpu_policy;
|
||||
struct cpufreq_policy *cpu_policy;
|
||||
|
||||
rebooting = true;
|
||||
for_each_online_cpu(cpu) {
|
||||
cpufreq_get_policy(&cpu_policy, cpu);
|
||||
powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
|
||||
cpu_policy = cpufreq_cpu_get(cpu);
|
||||
if (!cpu_policy)
|
||||
continue;
|
||||
powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
|
||||
cpufreq_cpu_put(cpu_policy);
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
|
|
|
@ -124,6 +124,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
|
|||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_DES
|
||||
select CRYPTO_XTS
|
||||
help
|
||||
Selecting this will use CAAM Queue Interface (QI) for sending
|
||||
& receiving crypto jobs to/from CAAM. This gives better performance
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "qi.h"
|
||||
#include "jr.h"
|
||||
#include "caamalg_desc.h"
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
/*
|
||||
* crypto alg
|
||||
|
@ -67,6 +68,11 @@ struct caam_ctx {
|
|||
struct device *qidev;
|
||||
spinlock_t lock; /* Protects multiple init of driver context */
|
||||
struct caam_drv_ctx *drv_ctx[NUM_OP];
|
||||
struct crypto_skcipher *fallback;
|
||||
};
|
||||
|
||||
struct caam_skcipher_req_ctx {
|
||||
struct skcipher_request fallback_req;
|
||||
};
|
||||
|
||||
static int aead_set_sh_desc(struct crypto_aead *aead)
|
||||
|
@ -906,12 +912,17 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
|
|||
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
int ret = 0;
|
||||
int err;
|
||||
|
||||
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
|
||||
dev_err(jrdev, "key size mismatch\n");
|
||||
goto badkey;
|
||||
}
|
||||
|
||||
err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ctx->cdata.keylen = keylen;
|
||||
ctx->cdata.key_virt = key;
|
||||
ctx->cdata.key_inline = true;
|
||||
|
@ -1828,6 +1839,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
|||
return edesc;
|
||||
}
|
||||
|
||||
static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
|
||||
return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
|
||||
}
|
||||
|
||||
static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
|
||||
{
|
||||
struct skcipher_edesc *edesc;
|
||||
|
@ -1838,6 +1857,21 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
|
|||
if (!req->cryptlen)
|
||||
return 0;
|
||||
|
||||
if (ctx->fallback && xts_skcipher_ivsize(req)) {
|
||||
struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
|
||||
|
||||
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
|
||||
skcipher_request_set_callback(&rctx->fallback_req,
|
||||
req->base.flags,
|
||||
req->base.complete,
|
||||
req->base.data);
|
||||
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
|
||||
req->dst, req->cryptlen, req->iv);
|
||||
|
||||
return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
|
||||
crypto_skcipher_decrypt(&rctx->fallback_req);
|
||||
}
|
||||
|
||||
if (unlikely(caam_congested))
|
||||
return -EAGAIN;
|
||||
|
||||
|
@ -1962,6 +1996,7 @@ static struct caam_skcipher_alg driver_algs[] = {
|
|||
.base = {
|
||||
.cra_name = "xts(aes)",
|
||||
.cra_driver_name = "xts-aes-caam-qi",
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = xts_skcipher_setkey,
|
||||
|
@ -2940,9 +2975,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
|
|||
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||
struct caam_skcipher_alg *caam_alg =
|
||||
container_of(alg, typeof(*caam_alg), skcipher);
|
||||
struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
|
||||
int ret = 0;
|
||||
|
||||
return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
|
||||
false);
|
||||
if (alg_aai == OP_ALG_AAI_XTS) {
|
||||
const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
|
||||
struct crypto_skcipher *fallback;
|
||||
|
||||
fallback = crypto_alloc_skcipher(tfm_name, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(fallback)) {
|
||||
dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
|
||||
tfm_name, PTR_ERR(fallback));
|
||||
return PTR_ERR(fallback);
|
||||
}
|
||||
|
||||
ctx->fallback = fallback;
|
||||
crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
|
||||
crypto_skcipher_reqsize(fallback));
|
||||
}
|
||||
|
||||
ret = caam_init_common(ctx, &caam_alg->caam, false);
|
||||
if (ret && ctx->fallback)
|
||||
crypto_free_skcipher(ctx->fallback);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int caam_aead_init(struct crypto_aead *tfm)
|
||||
|
@ -2968,7 +3026,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
|
|||
|
||||
static void caam_cra_exit(struct crypto_skcipher *tfm)
|
||||
{
|
||||
caam_exit_common(crypto_skcipher_ctx(tfm));
|
||||
struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
if (ctx->fallback)
|
||||
crypto_free_skcipher(ctx->fallback);
|
||||
caam_exit_common(ctx);
|
||||
}
|
||||
|
||||
static void caam_aead_exit(struct crypto_aead *tfm)
|
||||
|
@ -3002,7 +3064,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
|
|||
alg->base.cra_module = THIS_MODULE;
|
||||
alg->base.cra_priority = CAAM_CRA_PRIORITY;
|
||||
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
|
||||
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
alg->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
|
||||
alg->init = caam_cra_init;
|
||||
alg->exit = caam_cra_exit;
|
||||
|
|
|
@ -1746,7 +1746,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
|||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto e_ctx;
|
||||
goto e_data;
|
||||
}
|
||||
} else {
|
||||
/* Stash the context */
|
||||
|
|
|
@ -1053,6 +1053,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
|
|||
ndev = n->dev;
|
||||
if (!ndev)
|
||||
goto free_dst;
|
||||
if (is_vlan_dev(ndev))
|
||||
ndev = vlan_dev_real_dev(ndev);
|
||||
|
||||
port_id = cxgb4_port_idx(ndev);
|
||||
|
||||
csk = chtls_sock_create(cdev);
|
||||
|
|
|
@ -910,9 +910,9 @@ static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
|
|||
return (__force int)cpu_to_be16(thdr->length);
|
||||
}
|
||||
|
||||
static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
|
||||
static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
|
||||
{
|
||||
return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
|
||||
return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0);
|
||||
}
|
||||
|
||||
static int csk_wait_memory(struct chtls_dev *cdev,
|
||||
|
@ -1210,6 +1210,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
|
|||
copied = 0;
|
||||
csk = rcu_dereference_sk_user_data(sk);
|
||||
cdev = csk->cdev;
|
||||
lock_sock(sk);
|
||||
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
|
||||
|
||||
err = sk_stream_wait_connect(sk, &timeo);
|
||||
|
|
|
@ -527,7 +527,7 @@ static void release_ixp_crypto(struct device *dev)
|
|||
|
||||
if (crypt_virt) {
|
||||
dma_free_coherent(dev,
|
||||
NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
|
||||
NPE_QLEN * sizeof(struct crypt_ctl),
|
||||
crypt_virt, crypt_phys);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -442,7 +442,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
|
|||
static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
|
||||
{
|
||||
struct mtk_ring **ring = cryp->ring;
|
||||
int i, err = ENOMEM;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MTK_RING_MAX; i++) {
|
||||
ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
|
||||
|
@ -469,14 +469,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
|
|||
return 0;
|
||||
|
||||
err_cleanup:
|
||||
for (; i--; ) {
|
||||
do {
|
||||
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
|
||||
ring[i]->res_base, ring[i]->res_dma);
|
||||
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
|
||||
ring[i]->cmd_base, ring[i]->cmd_dma);
|
||||
kfree(ring[i]);
|
||||
}
|
||||
return err;
|
||||
} while (i--);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int mtk_crypto_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -453,6 +453,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
|
|||
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
|
||||
u32 val, mask;
|
||||
|
||||
if (likely(ctx->digcnt))
|
||||
omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
|
||||
|
||||
/*
|
||||
* Setting ALGO_CONST only for the first iteration and
|
||||
* CLOSE_HASH only for the last one. Note that flags mode bits
|
||||
|
|
|
@ -1697,11 +1697,6 @@ static int spacc_probe(struct platform_device *pdev)
|
|||
goto err_clk_put;
|
||||
}
|
||||
|
||||
ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
|
||||
if (ret)
|
||||
goto err_clk_disable;
|
||||
|
||||
|
||||
/*
|
||||
* Use an IRQ threshold of 50% as a default. This seems to be a
|
||||
* reasonable trade off of latency against throughput but can be
|
||||
|
@ -1709,6 +1704,10 @@ static int spacc_probe(struct platform_device *pdev)
|
|||
*/
|
||||
engine->stat_irq_thresh = (engine->fifo_sz / 2);
|
||||
|
||||
ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
|
||||
if (ret)
|
||||
goto err_clk_disable;
|
||||
|
||||
/*
|
||||
* Configure the interrupts. We only use the STAT_CNT interrupt as we
|
||||
* only submit a new packet for processing when we complete another in
|
||||
|
|
|
@ -1218,15 +1218,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
|
|||
add_threaded_test(info);
|
||||
|
||||
/* Check if channel was added successfully */
|
||||
dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
|
||||
|
||||
if (dtc->chan) {
|
||||
if (!list_empty(&info->channels)) {
|
||||
/*
|
||||
* if new channel was not successfully added, revert the
|
||||
* "test_channel" string to the name of the last successfully
|
||||
* added channel. exception for when users issues empty string
|
||||
* to channel parameter.
|
||||
*/
|
||||
dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
|
||||
if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
|
||||
&& (strcmp("", strim(test_channel)) != 0)) {
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -772,6 +772,10 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
|
|||
if (dws->dma_dev != chan->device->dev)
|
||||
return false;
|
||||
|
||||
/* permit channels in accordance with the channels mask */
|
||||
if (dws->channels && !(dws->channels & dwc->mask))
|
||||
return false;
|
||||
|
||||
/* We have to copy data since dws can be temporary storage */
|
||||
memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
|
||||
{
|
||||
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||||
u32 cfghi = DWC_CFGH_FIFO_MODE;
|
||||
u32 cfghi = is_slave_direction(dwc->direction) ? 0 : DWC_CFGH_FIFO_MODE;
|
||||
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
|
||||
bool hs_polarity = dwc->dws.hs_polarity;
|
||||
|
||||
|
|
|
@ -22,18 +22,21 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
|
|||
};
|
||||
dma_cap_mask_t cap;
|
||||
|
||||
if (dma_spec->args_count != 3)
|
||||
if (dma_spec->args_count < 3 || dma_spec->args_count > 4)
|
||||
return NULL;
|
||||
|
||||
slave.src_id = dma_spec->args[0];
|
||||
slave.dst_id = dma_spec->args[0];
|
||||
slave.m_master = dma_spec->args[1];
|
||||
slave.p_master = dma_spec->args[2];
|
||||
if (dma_spec->args_count >= 4)
|
||||
slave.channels = dma_spec->args[3];
|
||||
|
||||
if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
|
||||
slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
|
||||
slave.m_master >= dw->pdata->nr_masters ||
|
||||
slave.p_master >= dw->pdata->nr_masters))
|
||||
slave.p_master >= dw->pdata->nr_masters ||
|
||||
slave.channels >= BIT(dw->pdata->nr_channels)))
|
||||
return NULL;
|
||||
|
||||
dma_cap_zero(cap);
|
||||
|
|
|
@ -209,8 +209,8 @@ static int config_irq(void *ctx, struct platform_device *pdev)
|
|||
/* register interrupt handler */
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
dev_dbg(&pdev->dev, "got irq %d\n", irq);
|
||||
if (!irq)
|
||||
return -ENODEV;
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH,
|
||||
DRV_NAME, ctx);
|
||||
|
|
|
@ -1074,16 +1074,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
PCI_DEVICE_ID_INTEL_5100_19, 0);
|
||||
if (!einj) {
|
||||
ret = -ENODEV;
|
||||
goto bail_einj;
|
||||
goto bail_mc_free;
|
||||
}
|
||||
|
||||
rc = pci_enable_device(einj);
|
||||
if (rc < 0) {
|
||||
ret = rc;
|
||||
goto bail_disable_einj;
|
||||
goto bail_einj;
|
||||
}
|
||||
|
||||
|
||||
mci->pdev = &pdev->dev;
|
||||
|
||||
priv = mci->pvt_info;
|
||||
|
@ -1149,14 +1148,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
bail_scrub:
|
||||
priv->scrub_enable = 0;
|
||||
cancel_delayed_work_sync(&(priv->i5100_scrubbing));
|
||||
edac_mc_free(mci);
|
||||
|
||||
bail_disable_einj:
|
||||
pci_disable_device(einj);
|
||||
|
||||
bail_einj:
|
||||
pci_dev_put(einj);
|
||||
|
||||
bail_mc_free:
|
||||
edac_mc_free(mci);
|
||||
|
||||
bail_disable_ch1:
|
||||
pci_disable_device(ch1mm);
|
||||
|
||||
|
|
|
@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev)
|
|||
|
||||
/* add EMIF ECC error handler */
|
||||
error_irq = platform_get_irq(pdev, 0);
|
||||
if (!error_irq) {
|
||||
if (error_irq < 0) {
|
||||
ret = error_irq;
|
||||
edac_printk(KERN_ERR, EDAC_MOD_NAME,
|
||||
"EMIF irq number not defined.\n");
|
||||
goto err;
|
||||
|
|
|
@ -6984,8 +6984,7 @@ static int dm_update_plane_state(struct dc *dc,
|
|||
dm_old_plane_state->dc_state,
|
||||
dm_state->context)) {
|
||||
|
||||
ret = EINVAL;
|
||||
return ret;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
|
|||
source[len - 1] = '\0';
|
||||
|
||||
ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(source);
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_irq(&crc->lock);
|
||||
|
||||
|
|
|
@ -2120,7 +2120,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
|
|||
intel_dp->dpcd,
|
||||
sizeof(intel_dp->dpcd));
|
||||
cdv_intel_edp_panel_vdd_off(gma_encoder);
|
||||
if (ret == 0) {
|
||||
if (ret <= 0) {
|
||||
/* if this fails, presume the device is a ghost */
|
||||
DRM_INFO("failed to retrieve link info, disabling eDP\n");
|
||||
cdv_intel_dp_encoder_destroy(encoder);
|
||||
|
|
|
@ -834,7 +834,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
|
|||
int i;
|
||||
|
||||
a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
|
||||
sizeof(a6xx_state->indexed_regs));
|
||||
sizeof(*a6xx_state->indexed_regs));
|
||||
if (!a6xx_state->indexed_regs)
|
||||
return;
|
||||
|
||||
|
|
|
@ -819,7 +819,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
|
|||
struct drm_plane *plane;
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
int cnt = 0, rc = 0, mixer_width, i, z_pos;
|
||||
int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
|
||||
|
||||
struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
|
||||
int multirect_count = 0;
|
||||
|
@ -852,9 +852,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
|
|||
|
||||
memset(pipe_staged, 0, sizeof(pipe_staged));
|
||||
|
||||
mixer_width = mode->hdisplay / cstate->num_mixers;
|
||||
if (cstate->num_mixers) {
|
||||
mixer_width = mode->hdisplay / cstate->num_mixers;
|
||||
|
||||
_dpu_crtc_setup_lm_bounds(crtc, state);
|
||||
_dpu_crtc_setup_lm_bounds(crtc, state);
|
||||
}
|
||||
|
||||
crtc_rect.x2 = mode->hdisplay;
|
||||
crtc_rect.y2 = mode->vdisplay;
|
||||
|
|
|
@ -137,8 +137,26 @@ static int mxsfb_atomic_helper_check(struct drm_device *dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct drm_framebuffer *
|
||||
mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd)
|
||||
{
|
||||
const struct drm_format_info *info;
|
||||
|
||||
info = drm_get_format_info(dev, mode_cmd);
|
||||
if (!info)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
|
||||
dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
return drm_gem_fb_create(dev, file_priv, mode_cmd);
|
||||
}
|
||||
|
||||
static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
|
||||
.fb_create = drm_gem_fb_create,
|
||||
.fb_create = mxsfb_fb_create,
|
||||
.atomic_check = mxsfb_atomic_helper_check,
|
||||
.atomic_commit = drm_atomic_helper_commit,
|
||||
};
|
||||
|
|
|
@ -2418,12 +2418,12 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
|
|||
static const struct panel_desc ortustech_com43h4m85ulc = {
|
||||
.modes = &ortustech_com43h4m85ulc_mode,
|
||||
.num_modes = 1,
|
||||
.bpc = 8,
|
||||
.bpc = 6,
|
||||
.size = {
|
||||
.width = 56,
|
||||
.height = 93,
|
||||
},
|
||||
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
|
||||
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
|
||||
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
|
||||
};
|
||||
|
||||
|
|
|
@ -75,6 +75,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev)
|
||||
{
|
||||
/*
|
||||
* The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs
|
||||
* these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order
|
||||
* to operate correctly.
|
||||
*/
|
||||
gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK);
|
||||
gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16));
|
||||
}
|
||||
|
||||
static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
|
||||
{
|
||||
u32 quirks = 0;
|
||||
|
@ -304,6 +315,8 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
|
|||
int ret;
|
||||
u32 val;
|
||||
|
||||
panfrost_gpu_init_quirks(pfdev);
|
||||
|
||||
/* Just turn on everything for now */
|
||||
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
|
||||
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
|
||||
|
@ -357,7 +370,6 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
|
|||
return err;
|
||||
}
|
||||
|
||||
panfrost_gpu_init_quirks(pfdev);
|
||||
panfrost_gpu_power_on(pfdev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
|
|||
void panfrost_gpu_power_on(struct panfrost_device *pfdev);
|
||||
void panfrost_gpu_power_off(struct panfrost_device *pfdev);
|
||||
|
||||
void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev);
|
||||
|
||||
#endif
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue