1
0
Fork 0

This is the 5.4.75 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl+j2NoACgkQONu9yGCS
 aT5J8RAAnGBQDsYW32cIwCnEMAAzderx88BD1LWUh3VUzYkUkR/BZVzfR9oBCNNo
 3JnCXiJGAmxQqNmM4U4oll1qtDKSnkEL0d3x0dzoPU0jAK2DneZZeUceGXrCsACD
 07UmEJ+pg8YwHIDWjPl/XzsOFNDZGIri2ioW5sw0UMg5oSgei+pJsnr999198KoI
 3xae/qnkBgWhD2pghHQKHHXJnQTe9+JD+g+cMEWFK36Uxgpc2bpPeZtEaQBRaZZY
 CdPPwWuzWY6fBPOYCvrYHG86oPGuR3n03AM0nXWwRPkK2fAxsodr6UpZhQ+tjxJD
 gHGsQH1PrgH+wmiAgyaASdl+kvP0B2sRykeLcGLHT1t9Z3OGU3Jo0hxW6pGb85Da
 loF+OzhrI7Jlpk9XBJq81GdDGyNvco43gLGID2gTrNpLQ/WMW0slrCYE1WvLQ0ZQ
 3u9iUXd3X330W7cZTQgap03MNFrvruwdHnQL6uO73TKCofX15QWX86Vn16GLLlna
 etA2le4GSm0yTC9G4Z5YnAIUZdvcrT6iYbmsQ/NoGhu4aJSiQe5O9j+Tb9LoCZYs
 V2fYpUYUCsWoq+6KsPHKM0wOAHQkxkCPWDjIrqzWwrRBLfjytVEnc6JQS/ZIdap6
 GejH8CeiAnvOpD5MyVGCCih+7slQadG//+z3h7WNugZkVGqFDWc=
 =2fiZ
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEdQaENiSDAlGTDEbB7G51OISzHs0FAl+kaQwACgkQ7G51OISz
 Hs1Zhg/+OKG0w+i1rkz8nkzqQHj8FMKL5urxngxWiT8hq8VY7rXDE9Ibk2asa23a
 NNX+En/wkSl7xPB1H+HWvvMbMkhpf7XagiSS/6Qx6q7+1nBblqMizd1/oCZcGo4q
 zwXfQRSJp7auiYuHkDxvkhVXZXXQqGCK5Hj4/6gHwpPG3/W7FIFyhIjzXxdAziA/
 8p5txOsIKyxGf6uBsHQFOxpxylkc1vKBckV/2dAGOswN86CsvGd5+8KJ/Vw4qs0R
 5EsrY22VEK2E1H3ygr5Yqg4NgXkHoSYJQUeqLZGrq2Usaeg4H/vfWQJ144Hfdr4s
 S/8csXhnLVl+fAKPrHVhifC/CZTwxQGPTkK3O2Ewy9co7kQdmiSwmlztGCqXPmHa
 cBmo7mbuoGTVvLEA0jqXOhU5sJMNp1gTHYkrdIQKu0iRF4fK4WUZxO1Jh5bTwo4j
 iuaQQi/EaT1uYoibjdonT2Blk8YqVxlZpS4fiCpS+FDW3+DysiTaHKvMHE6wXG5s
 Zj6IUlgckKZbZJLgV6k28ENs17xelj75sLlOWHMl+56MLdFiDR3HiM6h4Q62G5s6
 9TnVBQpAf+pPAs3v9FV2W8dyZTPUcrrzCt9V0cJccwBleWB14q73KSVk4ScaZ2Hz
 tRfjbtvtTsAalPdqqDic7uz8OzEHR3Mv5tB9i1/ItYJ1FtVj8vQ=
 =p+Ga
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.75' into 5.4-2.2.x-imx

This is the 5.4.75 stable release

Conflicts:
- drivers/i2c/busses/i2c-imx.c:
Drop NXP changes, which are covered by commit [2c58d5e0c7] from
upstream.

- drivers/net/can/flexcan.c:
Keep NXP implementation, patch [ca10989632] from upstream is
covered in the NXP tree.

- drivers/usb/host/xhci.h:
Fix merge fuzz for upstream commit [2600a131e1].

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
5.4-rM2-2.2.x-imx-squashed
Andrey Zhizhikin 2020-11-05 21:04:13 +00:00
commit 4068d70849
236 changed files with 2912 additions and 1207 deletions

View File

@ -5462,6 +5462,14 @@
as generic guest with no PV drivers. Currently support
XEN HVM, KVM, HYPER_V and VMWARE guest.
xen.event_eoi_delay= [XEN]
How long to delay EOI handling in case of event
storms (jiffies). Default is 10.
xen.event_loop_timeout= [XEN]
After which time (jiffies) the event handling loop
should start to delay EOI handling. Default is 2.
xirc2ps_cs= [NET,PCMCIA]
Format:
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]

View File

@ -36,8 +36,7 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
:c:type:`v4l2_hsv_encoding` specifies which encoding is used.
.. note:: The default R'G'B' quantization is full range for all
colorspaces except for BT.2020 which uses limited range R'G'B'
quantization.
colorspaces. HSV formats are always full range.
.. tabularcolumns:: |p{6.7cm}|p{10.8cm}|
@ -169,8 +168,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
- Details
* - ``V4L2_QUANTIZATION_DEFAULT``
- Use the default quantization encoding as defined by the
colorspace. This is always full range for R'G'B' (except for the
BT.2020 colorspace) and HSV. It is usually limited range for Y'CbCr.
colorspace. This is always full range for R'G'B' and HSV.
It is usually limited range for Y'CbCr.
* - ``V4L2_QUANTIZATION_FULL_RANGE``
- Use the full range quantization encoding. I.e. the range [0…1] is
mapped to [0…255] (with possible clipping to [1…254] to avoid the
@ -180,4 +179,4 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
* - ``V4L2_QUANTIZATION_LIM_RANGE``
- Use the limited range quantization encoding. I.e. the range [0…1]
is mapped to [16…235]. Cb and Cr are mapped from [-0.5…0.5] to
[16…240].
[16…240]. Limited Range cannot be used with HSV.

View File

@ -377,9 +377,8 @@ Colorspace BT.2020 (V4L2_COLORSPACE_BT2020)
The :ref:`itu2020` standard defines the colorspace used by Ultra-high
definition television (UHDTV). The default transfer function is
``V4L2_XFER_FUNC_709``. The default Y'CbCr encoding is
``V4L2_YCBCR_ENC_BT2020``. The default R'G'B' quantization is limited
range (!), and so is the default Y'CbCr quantization. The chromaticities
of the primary colors and the white reference are:
``V4L2_YCBCR_ENC_BT2020``. The default Y'CbCr quantization is limited range.
The chromaticities of the primary colors and the white reference are:

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 74
SUBLEVEL = 75
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -405,6 +405,13 @@ config MMU_GATHER_NO_RANGE
config HAVE_MMU_GATHER_NO_GATHER
bool
config ARCH_WANT_IRQS_OFF_ACTIVATE_MM
bool
help
Temporary select until all architectures can be converted to have
irqs disabled over activate_mm. Architectures that do IPI based TLB
shootdowns should enable this.
config ARCH_HAVE_NMI_SAFE_CMPXCHG
bool

View File

@ -85,7 +85,7 @@
* avoid duplicating the MB dtsi file given that IRQ from
* this intc to cpu intc are different for axs101 and axs103
*/
mb_intc: dw-apb-ictl@e0012000 {
mb_intc: interrupt-controller@e0012000 {
#interrupt-cells = <1>;
compatible = "snps,dw-apb-ictl";
reg = < 0x0 0xe0012000 0x0 0x200 >;

View File

@ -129,7 +129,7 @@
* avoid duplicating the MB dtsi file given that IRQ from
* this intc to cpu intc are different for axs101 and axs103
*/
mb_intc: dw-apb-ictl@e0012000 {
mb_intc: interrupt-controller@e0012000 {
#interrupt-cells = <1>;
compatible = "snps,dw-apb-ictl";
reg = < 0x0 0xe0012000 0x0 0x200 >;

View File

@ -135,7 +135,7 @@
* avoid duplicating the MB dtsi file given that IRQ from
* this intc to cpu intc are different for axs101 and axs103
*/
mb_intc: dw-apb-ictl@e0012000 {
mb_intc: interrupt-controller@e0012000 {
#interrupt-cells = <1>;
compatible = "snps,dw-apb-ictl";
reg = < 0x0 0xe0012000 0x0 0x200 >;

View File

@ -46,7 +46,7 @@
};
mb_intc: dw-apb-ictl@e0012000 {
mb_intc: interrupt-controller@e0012000 {
#interrupt-cells = <1>;
compatible = "snps,dw-apb-ictl";
reg = < 0xe0012000 0x200 >;

View File

@ -54,7 +54,7 @@
};
mb_intc: dw-apb-ictl@e0012000 {
mb_intc: interrupt-controller@e0012000 {
#interrupt-cells = <1>;
compatible = "snps,dw-apb-ictl";
reg = < 0xe0012000 0x200 >;

View File

@ -562,7 +562,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
{
struct arc_reg_pct_build pct_bcr;
struct arc_reg_cc_build cc_bcr;
int i, has_interrupts, irq;
int i, has_interrupts, irq = -1;
int counter_size; /* in bits */
union cc_name {
@ -637,19 +637,28 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
.attr_groups = arc_pmu->attr_groups,
};
if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
if (has_interrupts) {
irq = platform_get_irq(pdev, 0);
if (irq >= 0) {
int ret;
arc_pmu->irq = irq;
arc_pmu->irq = irq;
/* intc map function ensures irq_set_percpu_devid() called */
request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
this_cpu_ptr(&arc_pmu_cpu));
/* intc map function ensures irq_set_percpu_devid() called */
ret = request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
this_cpu_ptr(&arc_pmu_cpu));
if (!ret)
on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
else
irq = -1;
}
on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
} else {
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
}
if (irq == -1)
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
/*
* perf parser doesn't really like '-' symbol in events name, so let's
* use '_' in arc pct name as it goes to kernel PMU event prefix.

View File

@ -507,8 +507,10 @@ config ARCH_S3C24XX
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_S3C_RTC if RTC_CLASS
select NEED_MACH_IO_H
select S3C2410_WATCHDOG
select SAMSUNG_ATAGS
select USE_OF
select WATCHDOG
help
Samsung S3C2410, S3C2412, S3C2413, S3C2416, S3C2440, S3C2442, S3C2443
and S3C2450 SoCs based systems, such as the Simtec Electronics BAST

View File

@ -192,6 +192,7 @@
fixed-link {
speed = <1000>;
full-duplex;
pause;
};
};
};

View File

@ -328,7 +328,7 @@
status = "disabled";
};
target-module@56000000 {
sgx_module: target-module@56000000 {
compatible = "ti,sysc-omap4", "ti,sysc";
reg = <0x5600fe00 0x4>,
<0x5600fe10 0x4>;

View File

@ -74,3 +74,13 @@
};
/include/ "omap443x-clocks.dtsi"
/*
* Use dpll_per for sgx at 153.6MHz like droid4 stock v3.0.8 Android kernel
*/
&sgx_module {
assigned-clocks = <&l3_gfx_clkctrl OMAP4_GPU_CLKCTRL 24>,
<&dpll_per_m7x2_ck>;
assigned-clock-rates = <0>, <153600000>;
assigned-clock-parents = <&dpll_per_m7x2_ck>;
};

View File

@ -52,34 +52,26 @@
};
};
xxti: oscillator-0 {
compatible = "fixed-clock";
clock-frequency = <0>;
clock-output-names = "xxti";
#clock-cells = <0>;
};
xusbxti: oscillator-1 {
compatible = "fixed-clock";
clock-frequency = <0>;
clock-output-names = "xusbxti";
#clock-cells = <0>;
};
soc {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges;
external-clocks {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <0>;
xxti: oscillator@0 {
compatible = "fixed-clock";
reg = <0>;
clock-frequency = <0>;
clock-output-names = "xxti";
#clock-cells = <0>;
};
xusbxti: oscillator@1 {
compatible = "fixed-clock";
reg = <1>;
clock-frequency = <0>;
clock-output-names = "xusbxti";
#clock-cells = <0>;
};
};
onenand: onenand@b0600000 {
compatible = "samsung,s5pv210-onenand";
reg = <0xb0600000 0x2000>,
@ -100,19 +92,16 @@
};
clocks: clock-controller@e0100000 {
compatible = "samsung,s5pv210-clock", "simple-bus";
compatible = "samsung,s5pv210-clock";
reg = <0xe0100000 0x10000>;
clock-names = "xxti", "xusbxti";
clocks = <&xxti>, <&xusbxti>;
#clock-cells = <1>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
};
pmu_syscon: syscon@e0108000 {
compatible = "samsung-s5pv210-pmu", "syscon";
reg = <0xe0108000 0x8000>;
};
pmu_syscon: syscon@e0108000 {
compatible = "samsung-s5pv210-pmu", "syscon";
reg = <0xe0108000 0x8000>;
};
pinctrl0: pinctrl@e0200000 {
@ -128,35 +117,28 @@
};
};
amba {
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
ranges;
pdma0: dma@e0900000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0xe0900000 0x1000>;
interrupt-parent = <&vic0>;
interrupts = <19>;
clocks = <&clocks CLK_PDMA0>;
clock-names = "apb_pclk";
#dma-cells = <1>;
#dma-channels = <8>;
#dma-requests = <32>;
};
pdma0: dma@e0900000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0xe0900000 0x1000>;
interrupt-parent = <&vic0>;
interrupts = <19>;
clocks = <&clocks CLK_PDMA0>;
clock-names = "apb_pclk";
#dma-cells = <1>;
#dma-channels = <8>;
#dma-requests = <32>;
};
pdma1: dma@e0a00000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0xe0a00000 0x1000>;
interrupt-parent = <&vic0>;
interrupts = <20>;
clocks = <&clocks CLK_PDMA1>;
clock-names = "apb_pclk";
#dma-cells = <1>;
#dma-channels = <8>;
#dma-requests = <32>;
};
pdma1: dma@e0a00000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0xe0a00000 0x1000>;
interrupt-parent = <&vic0>;
interrupts = <20>;
clocks = <&clocks CLK_PDMA1>;
clock-names = "apb_pclk";
#dma-cells = <1>;
#dma-channels = <8>;
#dma-requests = <32>;
};
spi0: spi@e1300000 {
@ -229,43 +211,36 @@
status = "disabled";
};
audio-subsystem {
compatible = "samsung,s5pv210-audss", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges;
clk_audss: clock-controller@eee10000 {
compatible = "samsung,s5pv210-audss-clock";
reg = <0xeee10000 0x1000>;
clock-names = "hclk", "xxti",
"fout_epll",
"sclk_audio0";
clocks = <&clocks DOUT_HCLKP>, <&xxti>,
<&clocks FOUT_EPLL>,
<&clocks SCLK_AUDIO0>;
#clock-cells = <1>;
};
clk_audss: clock-controller@eee10000 {
compatible = "samsung,s5pv210-audss-clock";
reg = <0xeee10000 0x1000>;
clock-names = "hclk", "xxti",
"fout_epll",
"sclk_audio0";
clocks = <&clocks DOUT_HCLKP>, <&xxti>,
<&clocks FOUT_EPLL>,
<&clocks SCLK_AUDIO0>;
#clock-cells = <1>;
};
i2s0: i2s@eee30000 {
compatible = "samsung,s5pv210-i2s";
reg = <0xeee30000 0x1000>;
interrupt-parent = <&vic2>;
interrupts = <16>;
dma-names = "rx", "tx", "tx-sec";
dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>;
clock-names = "iis",
"i2s_opclk0",
"i2s_opclk1";
clocks = <&clk_audss CLK_I2S>,
<&clk_audss CLK_I2S>,
<&clk_audss CLK_DOUT_AUD_BUS>;
samsung,idma-addr = <0xc0010000>;
pinctrl-names = "default";
pinctrl-0 = <&i2s0_bus>;
#sound-dai-cells = <0>;
status = "disabled";
};
i2s0: i2s@eee30000 {
compatible = "samsung,s5pv210-i2s";
reg = <0xeee30000 0x1000>;
interrupt-parent = <&vic2>;
interrupts = <16>;
dma-names = "rx", "tx", "tx-sec";
dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>;
clock-names = "iis",
"i2s_opclk0",
"i2s_opclk1";
clocks = <&clk_audss CLK_I2S>,
<&clk_audss CLK_I2S>,
<&clk_audss CLK_DOUT_AUD_BUS>;
samsung,idma-addr = <0xc0010000>;
pinctrl-names = "default";
pinctrl-0 = <&i2s0_bus>;
#sound-dai-cells = <0>;
status = "disabled";
};
i2s1: i2s@e2100000 {

View File

@ -680,6 +680,40 @@ static void disable_single_step(struct perf_event *bp)
arch_install_hw_breakpoint(bp);
}
/*
* Arm32 hardware does not always report a watchpoint hit address that matches
* one of the watchpoints set. It can also report an address "near" the
* watchpoint if a single instruction access both watched and unwatched
* addresses. There is no straight-forward way, short of disassembling the
* offending instruction, to map that address back to the watchpoint. This
* function computes the distance of the memory access from the watchpoint as a
* heuristic for the likelyhood that a given access triggered the watchpoint.
*
* See this same function in the arm64 platform code, which has the same
* problem.
*
* The function returns the distance of the address from the bytes watched by
* the watchpoint. In case of an exact match, it returns 0.
*/
static u32 get_distance_from_watchpoint(unsigned long addr, u32 val,
struct arch_hw_breakpoint_ctrl *ctrl)
{
u32 wp_low, wp_high;
u32 lens, lene;
lens = __ffs(ctrl->len);
lene = __fls(ctrl->len);
wp_low = val + lens;
wp_high = val + lene;
if (addr < wp_low)
return wp_low - addr;
else if (addr > wp_high)
return addr - wp_high;
else
return 0;
}
static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
struct arch_hw_breakpoint *info)
{
@ -689,23 +723,25 @@ static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
int i, access;
u32 val, ctrl_reg, alignment_mask;
int i, access, closest_match = 0;
u32 min_dist = -1, dist;
u32 val, ctrl_reg;
struct perf_event *wp, **slots;
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
slots = this_cpu_ptr(wp_on_reg);
/*
* Find all watchpoints that match the reported address. If no exact
* match is found. Attribute the hit to the closest watchpoint.
*/
rcu_read_lock();
for (i = 0; i < core_num_wrps; ++i) {
rcu_read_lock();
wp = slots[i];
if (wp == NULL)
goto unlock;
continue;
info = counter_arch_bp(wp);
/*
* The DFAR is an unknown value on debug architectures prior
* to 7.1. Since we only allow a single watchpoint on these
@ -714,33 +750,31 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
*/
if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
BUG_ON(i > 0);
info = counter_arch_bp(wp);
info->trigger = wp->attr.bp_addr;
} else {
if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
alignment_mask = 0x7;
else
alignment_mask = 0x3;
/* Check if the watchpoint value matches. */
val = read_wb_reg(ARM_BASE_WVR + i);
if (val != (addr & ~alignment_mask))
goto unlock;
/* Possible match, check the byte address select. */
ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
decode_ctrl_reg(ctrl_reg, &ctrl);
if (!((1 << (addr & alignment_mask)) & ctrl.len))
goto unlock;
/* Check that the access type matches. */
if (debug_exception_updates_fsr()) {
access = (fsr & ARM_FSR_ACCESS_MASK) ?
HW_BREAKPOINT_W : HW_BREAKPOINT_R;
if (!(access & hw_breakpoint_type(wp)))
goto unlock;
continue;
}
val = read_wb_reg(ARM_BASE_WVR + i);
ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
decode_ctrl_reg(ctrl_reg, &ctrl);
dist = get_distance_from_watchpoint(addr, val, &ctrl);
if (dist < min_dist) {
min_dist = dist;
closest_match = i;
}
/* Is this an exact match? */
if (dist != 0)
continue;
/* We have a winner. */
info = counter_arch_bp(wp);
info->trigger = addr;
}
@ -762,13 +796,23 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
* we can single-step over the watchpoint trigger.
*/
if (!is_default_overflow_handler(wp))
goto unlock;
continue;
step:
enable_single_step(wp, instruction_pointer(regs));
unlock:
rcu_read_unlock();
}
if (min_dist > 0 && min_dist != -1) {
/* No exact match found. */
wp = slots[closest_match];
info = counter_arch_bp(wp);
info->trigger = addr;
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
perf_bp_event(wp, regs);
if (is_default_overflow_handler(wp))
enable_single_step(wp, instruction_pointer(regs));
}
rcu_read_unlock();
}
static void watchpoint_single_step_handler(unsigned long pc)

View File

@ -241,6 +241,7 @@ config SAMSUNG_PM_DEBUG
depends on PM && DEBUG_KERNEL
depends on PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210
depends on DEBUG_EXYNOS_UART || DEBUG_S3C24XX_UART || DEBUG_S3C2410_UART
depends on DEBUG_LL && MMU
help
Say Y here if you want verbose debugging from the PM Suspend and
Resume code. See <file:Documentation/arm/samsung-s3c24xx/suspend.rst>

View File

@ -54,6 +54,7 @@ config ARCH_BCM_IPROC
config ARCH_BERLIN
bool "Marvell Berlin SoC Family"
select DW_APB_ICTL
select DW_APB_TIMER_OF
select GPIOLIB
select PINCTRL
help

View File

@ -470,6 +470,7 @@
mmc-hs200-1_8v;
mmc-hs400-1_8v;
non-removable;
full-pwr-cycle-in-suspend;
status = "okay";
};

View File

@ -209,6 +209,7 @@ enum vcpu_sysreg {
#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
#define cp14_DBGVCR (DBGVCR32_EL2 * 2)
#define NR_COPRO_REGS (NR_SYS_REGS * 2)

View File

@ -25,6 +25,9 @@ const struct cpumask *cpumask_of_node(int node);
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
static inline const struct cpumask *cpumask_of_node(int node)
{
if (node == NUMA_NO_NODE)
return cpu_all_mask;
return node_to_cpumask_map[node];
}
#endif

View File

@ -35,21 +35,23 @@ void store_cpu_topology(unsigned int cpuid)
if (mpidr & MPIDR_UP_BITMASK)
return;
/* Create cpu topology mapping based on MPIDR. */
if (mpidr & MPIDR_MT_BITMASK) {
/* Multiprocessor system : Multi-threads per core */
cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
} else {
/* Multiprocessor system : Single-thread per core */
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
}
/*
* This would be the place to create cpu topology based on MPIDR.
*
* However, it cannot be trusted to depict the actual topology; some
* pieces of the architecture enforce an artificial cap on Aff0 values
* (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
* artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
* having absolutely no relationship to the actual underlying system
* topology, and cannot be reasonably used as core / package ID.
*
* If the MT bit is set, Aff0 *could* be used to define a thread ID, but
* we still wouldn't be able to obtain a sane core ID. This means we
* need to entirely ignore MPIDR for any topology deduction.
*/
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = cpuid;
cpuid_topo->package_id = cpu_to_node(cpuid);
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,

View File

@ -1746,9 +1746,9 @@ static const struct sys_reg_desc cp14_regs[] = {
{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
DBG_BCR_BVR_WCR_WVR(1),
/* DBGDCCINT */
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT },
/* DBGDSCRext */
{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext },
DBG_BCR_BVR_WCR_WVR(2),
/* DBGDTR[RT]Xint */
{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
@ -1763,7 +1763,7 @@ static const struct sys_reg_desc cp14_regs[] = {
{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
DBG_BCR_BVR_WCR_WVR(6),
/* DBGVCR */
{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR },
DBG_BCR_BVR_WCR_WVR(7),
DBG_BCR_BVR_WCR_WVR(8),
DBG_BCR_BVR_WCR_WVR(9),

View File

@ -46,7 +46,11 @@ EXPORT_SYMBOL(node_to_cpumask_map);
*/
const struct cpumask *cpumask_of_node(int node)
{
if (WARN_ON(node >= nr_node_ids))
if (node == NUMA_NO_NODE)
return cpu_all_mask;
if (WARN_ON(node < 0 || node >= nr_node_ids))
return cpu_none_mask;
if (WARN_ON(node_to_cpumask_map[node] == NULL))

View File

@ -41,7 +41,7 @@ obj-y += esi_stub.o # must be in kernel proper
endif
obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o
obj-$(CONFIG_BINFMT_ELF) += elfcore.o
obj-$(CONFIG_ELF_CORE) += elfcore.o
# fp_emulate() expects f2-f5,f16-f31 to contain the user-level state.
CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31

View File

@ -396,83 +396,9 @@ static void kretprobe_trampoline(void)
{
}
/*
* At this point the target function has been tricked into
* returning into our trampoline. Lookup the associated instance
* and then:
* - call the handler function
* - cleanup by marking the instance as unused
* - long jump back to the original return address
*/
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =
((struct fnptr *)kretprobe_trampoline)->ip;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them, and/or more than one return
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
regs->cr_iip = orig_ret_address;
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
regs->cr_iip = __kretprobe_trampoline_handler(regs, kretprobe_trampoline, NULL);
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
@ -485,6 +411,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->b0;
ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;

View File

@ -6,7 +6,7 @@
* for more details.
*
* Copyright (C) 1998 Harald Koerfgen
* Copyright (C) 2000, 2001, 2002, 2003, 2005 Maciej W. Rozycki
* Copyright (C) 2000, 2001, 2002, 2003, 2005, 2020 Maciej W. Rozycki
*/
#include <linux/console.h>
#include <linux/export.h>
@ -15,6 +15,7 @@
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/irqnr.h>
#include <linux/memblock.h>
#include <linux/param.h>
#include <linux/percpu-defs.h>
#include <linux/sched.h>
@ -22,6 +23,7 @@
#include <linux/types.h>
#include <linux/pm.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
@ -29,7 +31,9 @@
#include <asm/irq.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/page.h>
#include <asm/reboot.h>
#include <asm/sections.h>
#include <asm/time.h>
#include <asm/traps.h>
#include <asm/wbflush.h>
@ -166,6 +170,9 @@ void __init plat_mem_setup(void)
ioport_resource.start = ~0UL;
ioport_resource.end = 0UL;
/* Stay away from the firmware working memory area for now. */
memblock_reserve(PHYS_OFFSET, __pa_symbol(&_text) - PHYS_OFFSET);
}
/*

View File

@ -147,6 +147,7 @@ config PPC
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BUILDTIME_EXTABLE_SORT
@ -1021,6 +1022,19 @@ config FSL_RIO
Include support for RapidIO controller on Freescale embedded
processors (MPC8548, MPC8641, etc).
config PPC_RTAS_FILTER
bool "Enable filtering of RTAS syscalls"
default y
depends on PPC_RTAS
help
The RTAS syscall API has security issues that could be used to
compromise system integrity. This option enforces restrictions on the
RTAS calls and arguments passed by userspace programs to mitigate
these issues.
Say Y unless you know what you are doing and the filter is causing
problems for you.
endmenu
config NONSTATIC_KERNEL

View File

@ -20,7 +20,7 @@ struct drmem_lmb {
struct drmem_lmb_info {
struct drmem_lmb *lmbs;
int n_lmbs;
u32 lmb_size;
u64 lmb_size;
};
extern struct drmem_lmb_info *drmem_info;
@ -79,7 +79,7 @@ struct of_drconf_cell_v2 {
#define DRCONF_MEM_AI_INVALID 0x00000040
#define DRCONF_MEM_RESERVED 0x00000080
static inline u32 drmem_lmb_size(void)
static inline u64 drmem_lmb_size(void)
{
return drmem_info->lmb_size;
}

View File

@ -216,7 +216,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
switch_mm(prev, next, current);
switch_mm_irqs_off(prev, next, current);
}
/* We don't currently use enter_lazy_tlb() for anything */

View File

@ -843,7 +843,7 @@ BEGIN_MMU_FTR_SECTION
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr
load_segment_registers:
_GLOBAL(load_segment_registers)
li r0, NUM_USER_SEGMENTS /* load up user segment register values */
mtctr r0 /* for context 0 */
li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */

View File

@ -940,6 +940,147 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
return NULL;
}
#ifdef CONFIG_PPC_RTAS_FILTER
/*
* The sys_rtas syscall, as originally designed, allows root to pass
* arbitrary physical addresses to RTAS calls. A number of RTAS calls
* can be abused to write to arbitrary memory and do other things that
* are potentially harmful to system integrity, and thus should only
* be used inside the kernel and not exposed to userspace.
*
* All known legitimate users of the sys_rtas syscall will only ever
* pass addresses that fall within the RMO buffer, and use a known
* subset of RTAS calls.
*
* Accordingly, we filter RTAS requests to check that the call is
* permitted, and that provided pointers fall within the RMO buffer.
* The rtas_filters list contains an entry for each permitted call,
* with the indexes of the parameters which are expected to contain
* addresses and sizes of buffers allocated inside the RMO buffer.
*/
struct rtas_filter {
const char *name;
int token;
/* Indexes into the args buffer, -1 if not used */
int buf_idx1;
int size_idx1;
int buf_idx2;
int size_idx2;
int fixed_size;
};
static struct rtas_filter rtas_filters[] __ro_after_init = {
{ "ibm,activate-firmware", -1, -1, -1, -1, -1 },
{ "ibm,configure-connector", -1, 0, -1, 1, -1, 4096 }, /* Special cased */
{ "display-character", -1, -1, -1, -1, -1 },
{ "ibm,display-message", -1, 0, -1, -1, -1 },
{ "ibm,errinjct", -1, 2, -1, -1, -1, 1024 },
{ "ibm,close-errinjct", -1, -1, -1, -1, -1 },
{ "ibm,open-errinct", -1, -1, -1, -1, -1 },
{ "ibm,get-config-addr-info2", -1, -1, -1, -1, -1 },
{ "ibm,get-dynamic-sensor-state", -1, 1, -1, -1, -1 },
{ "ibm,get-indices", -1, 2, 3, -1, -1 },
{ "get-power-level", -1, -1, -1, -1, -1 },
{ "get-sensor-state", -1, -1, -1, -1, -1 },
{ "ibm,get-system-parameter", -1, 1, 2, -1, -1 },
{ "get-time-of-day", -1, -1, -1, -1, -1 },
{ "ibm,get-vpd", -1, 0, -1, 1, 2 },
{ "ibm,lpar-perftools", -1, 2, 3, -1, -1 },
{ "ibm,platform-dump", -1, 4, 5, -1, -1 },
{ "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 },
{ "ibm,scan-log-dump", -1, 0, 1, -1, -1 },
{ "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 },
{ "ibm,set-eeh-option", -1, -1, -1, -1, -1 },
{ "set-indicator", -1, -1, -1, -1, -1 },
{ "set-power-level", -1, -1, -1, -1, -1 },
{ "set-time-for-power-on", -1, -1, -1, -1, -1 },
{ "ibm,set-system-parameter", -1, 1, -1, -1, -1 },
{ "set-time-of-day", -1, -1, -1, -1, -1 },
{ "ibm,suspend-me", -1, -1, -1, -1, -1 },
{ "ibm,update-nodes", -1, 0, -1, -1, -1, 4096 },
{ "ibm,update-properties", -1, 0, -1, -1, -1, 4096 },
{ "ibm,physical-attestation", -1, 0, 1, -1, -1 },
};
static bool in_rmo_buf(u32 base, u32 end)
{
return base >= rtas_rmo_buf &&
base < (rtas_rmo_buf + RTAS_RMOBUF_MAX) &&
base <= end &&
end >= rtas_rmo_buf &&
end < (rtas_rmo_buf + RTAS_RMOBUF_MAX);
}
static bool block_rtas_call(int token, int nargs,
struct rtas_args *args)
{
int i;
for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) {
struct rtas_filter *f = &rtas_filters[i];
u32 base, size, end;
if (token != f->token)
continue;
if (f->buf_idx1 != -1) {
base = be32_to_cpu(args->args[f->buf_idx1]);
if (f->size_idx1 != -1)
size = be32_to_cpu(args->args[f->size_idx1]);
else if (f->fixed_size)
size = f->fixed_size;
else
size = 1;
end = base + size - 1;
if (!in_rmo_buf(base, end))
goto err;
}
if (f->buf_idx2 != -1) {
base = be32_to_cpu(args->args[f->buf_idx2]);
if (f->size_idx2 != -1)
size = be32_to_cpu(args->args[f->size_idx2]);
else if (f->fixed_size)
size = f->fixed_size;
else
size = 1;
end = base + size - 1;
/*
* Special case for ibm,configure-connector where the
* address can be 0
*/
if (!strcmp(f->name, "ibm,configure-connector") &&
base == 0)
return false;
if (!in_rmo_buf(base, end))
goto err;
}
return false;
}
err:
pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n");
pr_err_ratelimited("sys_rtas: token=0x%x, nargs=%d (called by %s)\n",
token, nargs, current->comm);
return true;
}
#else
static bool block_rtas_call(int token, int nargs,
struct rtas_args *args)
{
return false;
}
#endif /* CONFIG_PPC_RTAS_FILTER */
/* We assume to be passed big endian arguments */
SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
{
@ -977,6 +1118,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
args.rets = &args.args[nargs];
memset(args.rets, 0, nret * sizeof(rtas_arg_t));
if (block_rtas_call(token, nargs, &args))
return -EINVAL;
/* Need to handle ibm,suspend_me call specially */
if (token == ibm_suspend_me_token) {
@ -1038,6 +1182,9 @@ void __init rtas_initialize(void)
unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
u32 base, size, entry;
int no_base, no_size, no_entry;
#ifdef CONFIG_PPC_RTAS_FILTER
int i;
#endif
/* Get RTAS dev node and fill up our "rtas" structure with infos
* about it.
@ -1077,6 +1224,12 @@ void __init rtas_initialize(void)
#ifdef CONFIG_RTAS_ERROR_LOGGING
rtas_last_error_token = rtas_token("rtas-last-error");
#endif
#ifdef CONFIG_PPC_RTAS_FILTER
for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) {
rtas_filters[i].token = rtas_token(rtas_filters[i].name);
}
#endif
}
int __init early_init_dt_scan_rtas(unsigned long node,

View File

@ -31,29 +31,27 @@
static DEFINE_PER_CPU(struct cpu, cpu_devices);
/*
* SMT snooze delay stuff, 64-bit only for now
*/
#ifdef CONFIG_PPC64
/* Time in microseconds we delay before sleeping in the idle loop */
static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
/*
* Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle:
* smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in
* 2014:
*
* "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean
* up the kernel code."
*
* powerpc-utils stopped using it as of 1.3.8. At some point in the future this
* code should be removed.
*/
static ssize_t store_smt_snooze_delay(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
ssize_t ret;
long snooze;
ret = sscanf(buf, "%ld", &snooze);
if (ret != 1)
return -EINVAL;
per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n",
current->comm, current->pid);
return count;
}
@ -61,9 +59,9 @@ static ssize_t show_smt_snooze_delay(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n",
current->comm, current->pid);
return sprintf(buf, "100\n");
}
static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
@ -71,16 +69,10 @@ static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
static int __init setup_smt_snooze_delay(char *str)
{
unsigned int cpu;
long snooze;
if (!cpu_has_feature(CPU_FTR_SMT))
return 1;
snooze = simple_strtol(str, NULL, 10);
for_each_possible_cpu(cpu)
per_cpu(smt_snooze_delay, cpu) = snooze;
pr_warn("smt-snooze-delay command line option has no effect\n");
return 1;
}
__setup("smt-snooze-delay=", setup_smt_snooze_delay);

View File

@ -877,7 +877,7 @@ static void p9_hmi_special_emu(struct pt_regs *regs)
{
unsigned int ra, rb, t, i, sel, instr, rc;
const void __user *addr;
u8 vbuf[16], *vdst;
u8 vbuf[16] __aligned(16), *vdst;
unsigned long ea, msr, msr_mask;
bool swap;

View File

@ -5191,6 +5191,12 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
case KVM_PPC_ALLOCATE_HTAB: {
u32 htab_order;
/* If we're a nested hypervisor, we currently only support radix */
if (kvmhv_on_pseries()) {
r = -EOPNOTSUPP;
break;
}
r = -EFAULT;
if (get_user(htab_order, (u32 __user *)argp))
break;

View File

@ -293,14 +293,7 @@ grackle_wake_up:
* we do any r1 memory access as we are not sure they
* are in a sane state above the first 256Mb region
*/
li r0,16 /* load up segment register values */
mtctr r0 /* for context 0 */
lis r3,0x2000 /* Ku = 1, VSID = 0 */
li r4,0
3: mtsrin r3,r4
addi r3,r3,0x111 /* increment VSID */
addis r4,r4,0x1000 /* address of next segment */
bdnz 3b
bl load_segment_registers
sync
isync

View File

@ -179,14 +179,14 @@ static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj,
return count;
}
static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
static void create_elog_obj(uint64_t id, size_t size, uint64_t type)
{
struct elog_obj *elog;
int rc;
elog = kzalloc(sizeof(*elog), GFP_KERNEL);
if (!elog)
return NULL;
return;
elog->kobj.kset = elog_kset;
@ -219,18 +219,37 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
rc = kobject_add(&elog->kobj, NULL, "0x%llx", id);
if (rc) {
kobject_put(&elog->kobj);
return NULL;
return;
}
/*
* As soon as the sysfs file for this elog is created/activated there is
* a chance the opal_errd daemon (or any userspace) might read and
* acknowledge the elog before kobject_uevent() is called. If that
* happens then there is a potential race between
* elog_ack_store->kobject_put() and kobject_uevent() which leads to a
* use-after-free of a kernfs object resulting in a kernel crash.
*
* To avoid that, we need to take a reference on behalf of the bin file,
* so that our reference remains valid while we call kobject_uevent().
* We then drop our reference before exiting the function, leaving the
* bin file to drop the last reference (if it hasn't already).
*/
/* Take a reference for the bin file */
kobject_get(&elog->kobj);
rc = sysfs_create_bin_file(&elog->kobj, &elog->raw_attr);
if (rc) {
if (rc == 0) {
kobject_uevent(&elog->kobj, KOBJ_ADD);
} else {
/* Drop the reference taken for the bin file */
kobject_put(&elog->kobj);
return NULL;
}
kobject_uevent(&elog->kobj, KOBJ_ADD);
/* Drop our reference */
kobject_put(&elog->kobj);
return elog;
return;
}
static irqreturn_t elog_event(int irq, void *data)

View File

@ -43,7 +43,7 @@
#include <asm/udbg.h>
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#define DBG(fmt...) do { } while (0)
#endif
static void pnv_smp_setup_cpu(int cpu)

View File

@ -279,7 +279,7 @@ static int dlpar_offline_lmb(struct drmem_lmb *lmb)
return dlpar_change_lmb_state(lmb, false);
}
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
{
unsigned long block_sz, start_pfn;
int sections_per_block;
@ -310,10 +310,11 @@ out:
static int pseries_remove_mem_node(struct device_node *np)
{
const __be32 *regs;
const __be32 *prop;
unsigned long base;
unsigned int lmb_size;
unsigned long lmb_size;
int ret = -EINVAL;
int addr_cells, size_cells;
/*
* Check to see if we are actually removing memory
@ -324,12 +325,19 @@ static int pseries_remove_mem_node(struct device_node *np)
/*
* Find the base address and size of the memblock
*/
regs = of_get_property(np, "reg", NULL);
if (!regs)
prop = of_get_property(np, "reg", NULL);
if (!prop)
return ret;
base = be64_to_cpu(*(unsigned long *)regs);
lmb_size = be32_to_cpu(regs[3]);
addr_cells = of_n_addr_cells(np);
size_cells = of_n_size_cells(np);
/*
* "reg" property represents (addr,size) tuple.
*/
base = of_read_number(prop, addr_cells);
prop += addr_cells;
lmb_size = of_read_number(prop, size_cells);
pseries_remove_memblock(base, lmb_size);
return 0;
@ -620,7 +628,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
#else
static inline int pseries_remove_memblock(unsigned long base,
unsigned int memblock_size)
unsigned long memblock_size)
{
return -EOPNOTSUPP;
}
@ -953,10 +961,11 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
static int pseries_add_mem_node(struct device_node *np)
{
const __be32 *regs;
const __be32 *prop;
unsigned long base;
unsigned int lmb_size;
unsigned long lmb_size;
int ret = -EINVAL;
int addr_cells, size_cells;
/*
* Check to see if we are actually adding memory
@ -967,12 +976,18 @@ static int pseries_add_mem_node(struct device_node *np)
/*
* Find the base and size of the memblock
*/
regs = of_get_property(np, "reg", NULL);
if (!regs)
prop = of_get_property(np, "reg", NULL);
if (!prop)
return ret;
base = be64_to_cpu(*(unsigned long *)regs);
lmb_size = be32_to_cpu(regs[3]);
addr_cells = of_n_addr_cells(np);
size_cells = of_n_size_cells(np);
/*
* "reg" property represents (addr,size) tuple.
*/
base = of_read_number(prop, addr_cells);
prop += addr_cells;
lmb_size = of_read_number(prop, size_cells);
/*
* Update memory region to represent the memory add

View File

@ -10,4 +10,7 @@
/* vDSO location */
#define AT_SYSINFO_EHDR 33
/* entries in ARCH_DLINFO */
#define AT_VECTOR_SIZE_ARCH 1
#endif /* _UAPI_ASM_RISCV_AUXVEC_H */

View File

@ -360,22 +360,23 @@ ENTRY(startup_kdump)
# the save area and does disabled wait with a faulty address.
#
ENTRY(startup_pgm_check_handler)
stmg %r0,%r15,__LC_SAVE_AREA_SYNC
la %r1,4095
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r1)
mvc __LC_GPREGS_SAVE_AREA-4095(128,%r1),__LC_SAVE_AREA_SYNC
mvc __LC_PSW_SAVE_AREA-4095(16,%r1),__LC_PGM_OLD_PSW
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
la %r8,4095
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8)
stmg %r0,%r7,__LC_GPREGS_SAVE_AREA-4095(%r8)
mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA_SYNC
mvc __LC_PSW_SAVE_AREA-4095(16,%r8),__LC_PGM_OLD_PSW
mvc __LC_RETURN_PSW(16),__LC_PGM_OLD_PSW
ni __LC_RETURN_PSW,0xfc # remove IO and EX bits
ni __LC_RETURN_PSW+1,0xfb # remove MCHK bit
oi __LC_RETURN_PSW+1,0x2 # set wait state bit
larl %r2,.Lold_psw_disabled_wait
stg %r2,__LC_PGM_NEW_PSW+8
l %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r2)
larl %r9,.Lold_psw_disabled_wait
stg %r9,__LC_PGM_NEW_PSW+8
l %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r9)
brasl %r14,print_pgm_check_info
.Lold_psw_disabled_wait:
la %r1,4095
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
la %r8,4095
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8)
lpswe __LC_RETURN_PSW # disabled wait
.Ldump_info_stack:
.long 0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD

View File

@ -354,8 +354,9 @@ static DEFINE_PER_CPU(atomic_t, clock_sync_word);
static DEFINE_MUTEX(clock_sync_mutex);
static unsigned long clock_sync_flags;
#define CLOCK_SYNC_HAS_STP 0
#define CLOCK_SYNC_STP 1
#define CLOCK_SYNC_HAS_STP 0
#define CLOCK_SYNC_STP 1
#define CLOCK_SYNC_STPINFO_VALID 2
/*
* The get_clock function for the physical clock. It will get the current
@ -592,6 +593,22 @@ void stp_queue_work(void)
queue_work(time_sync_wq, &stp_work);
}
static int __store_stpinfo(void)
{
int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
if (rc)
clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
else
set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
return rc;
}
static int stpinfo_valid(void)
{
return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
}
static int stp_sync_clock(void *data)
{
struct clock_sync_data *sync = data;
@ -613,8 +630,7 @@ static int stp_sync_clock(void *data)
if (rc == 0) {
sync->clock_delta = clock_delta;
clock_sync_global(clock_delta);
rc = chsc_sstpi(stp_page, &stp_info,
sizeof(struct stp_sstpi));
rc = __store_stpinfo();
if (rc == 0 && stp_info.tmd != 2)
rc = -EAGAIN;
}
@ -659,7 +675,7 @@ static void stp_work_fn(struct work_struct *work)
if (rc)
goto out_unlock;
rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
rc = __store_stpinfo();
if (rc || stp_info.c == 0)
goto out_unlock;
@ -696,10 +712,14 @@ static ssize_t stp_ctn_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%016llx\n",
*(unsigned long long *) stp_info.ctnid);
ssize_t ret = -ENODATA;
mutex_lock(&stp_work_mutex);
if (stpinfo_valid())
ret = sprintf(buf, "%016llx\n",
*(unsigned long long *) stp_info.ctnid);
mutex_unlock(&stp_work_mutex);
return ret;
}
static DEVICE_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
@ -708,9 +728,13 @@ static ssize_t stp_ctn_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%i\n", stp_info.ctn);
ssize_t ret = -ENODATA;
mutex_lock(&stp_work_mutex);
if (stpinfo_valid())
ret = sprintf(buf, "%i\n", stp_info.ctn);
mutex_unlock(&stp_work_mutex);
return ret;
}
static DEVICE_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
@ -719,9 +743,13 @@ static ssize_t stp_dst_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x2000))
return -ENODATA;
return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
ssize_t ret = -ENODATA;
mutex_lock(&stp_work_mutex);
if (stpinfo_valid() && (stp_info.vbits & 0x2000))
ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
mutex_unlock(&stp_work_mutex);
return ret;
}
static DEVICE_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
@ -730,9 +758,13 @@ static ssize_t stp_leap_seconds_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x8000))
return -ENODATA;
return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
ssize_t ret = -ENODATA;
mutex_lock(&stp_work_mutex);
if (stpinfo_valid() && (stp_info.vbits & 0x8000))
ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
mutex_unlock(&stp_work_mutex);
return ret;
}
static DEVICE_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
@ -741,9 +773,13 @@ static ssize_t stp_stratum_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
ssize_t ret = -ENODATA;
mutex_lock(&stp_work_mutex);
if (stpinfo_valid())
ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
mutex_unlock(&stp_work_mutex);
return ret;
}
static DEVICE_ATTR(stratum, 0400, stp_stratum_show, NULL);
@ -752,9 +788,13 @@ static ssize_t stp_time_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x0800))
return -ENODATA;
return sprintf(buf, "%i\n", (int) stp_info.tto);
ssize_t ret = -ENODATA;
mutex_lock(&stp_work_mutex);
if (stpinfo_valid() && (stp_info.vbits & 0x0800))
ret = sprintf(buf, "%i\n", (int) stp_info.tto);
mutex_unlock(&stp_work_mutex);
return ret;
}
static DEVICE_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
@ -763,9 +803,13 @@ static ssize_t stp_time_zone_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x4000))
return -ENODATA;
return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
ssize_t ret = -ENODATA;
mutex_lock(&stp_work_mutex);
if (stpinfo_valid() && (stp_info.vbits & 0x4000))
ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
mutex_unlock(&stp_work_mutex);
return ret;
}
static DEVICE_ATTR(time_zone_offset, 0400,
@ -775,9 +819,13 @@ static ssize_t stp_timing_mode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%i\n", stp_info.tmd);
ssize_t ret = -ENODATA;
mutex_lock(&stp_work_mutex);
if (stpinfo_valid())
ret = sprintf(buf, "%i\n", stp_info.tmd);
mutex_unlock(&stp_work_mutex);
return ret;
}
static DEVICE_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
@ -786,9 +834,13 @@ static ssize_t stp_timing_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%i\n", stp_info.tst);
ssize_t ret = -ENODATA;
mutex_lock(&stp_work_mutex);
if (stpinfo_valid())
ret = sprintf(buf, "%i\n", stp_info.tst);
mutex_unlock(&stp_work_mutex);
return ret;
}
static DEVICE_ATTR(timing_state, 0400, stp_timing_state_show, NULL);

View File

@ -1039,38 +1039,9 @@ void smp_fetch_global_pmu(void)
* are flush_tlb_*() routines, and these run after flush_cache_*()
* which performs the flushw.
*
* The SMP TLB coherency scheme we use works as follows:
*
* 1) mm->cpu_vm_mask is a bit mask of which cpus an address
* space has (potentially) executed on, this is the heuristic
* we use to avoid doing cross calls.
*
* Also, for flushing from kswapd and also for clones, we
* use cpu_vm_mask as the list of cpus to make run the TLB.
*
* 2) TLB context numbers are shared globally across all processors
* in the system, this allows us to play several games to avoid
* cross calls.
*
* One invariant is that when a cpu switches to a process, and
* that processes tsk->active_mm->cpu_vm_mask does not have the
* current cpu's bit set, that tlb context is flushed locally.
*
* If the address space is non-shared (ie. mm->count == 1) we avoid
* cross calls when we want to flush the currently running process's
* tlb state. This is done by clearing all cpu bits except the current
* processor's in current->mm->cpu_vm_mask and performing the
* flush locally only. This will force any subsequent cpus which run
* this task to flush the context from the local tlb if the process
* migrates to another cpu (again).
*
* 3) For shared address spaces (threads) and swapping we bite the
* bullet for most cases and perform the cross call (but only to
* the cpus listed in cpu_vm_mask).
*
* The performance gain from "optimizing" away the cross call for threads is
* questionable (in theory the big win for threads is the massive sharing of
* address space state across processors).
* mm->cpu_vm_mask is a bit mask of which cpus an address
* space has (potentially) executed on, this is the heuristic
* we use to limit cross calls.
*/
/* This currently is only used by the hugetlb arch pre-fault
@ -1080,18 +1051,13 @@ void smp_fetch_global_pmu(void)
void smp_flush_tlb_mm(struct mm_struct *mm)
{
u32 ctx = CTX_HWBITS(mm->context);
int cpu = get_cpu();
if (atomic_read(&mm->mm_users) == 1) {
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
goto local_flush_and_out;
}
get_cpu();
smp_cross_call_masked(&xcall_flush_tlb_mm,
ctx, 0, 0,
mm_cpumask(mm));
local_flush_and_out:
__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
put_cpu();
@ -1114,17 +1080,15 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
{
u32 ctx = CTX_HWBITS(mm->context);
struct tlb_pending_info info;
int cpu = get_cpu();
get_cpu();
info.ctx = ctx;
info.nr = nr;
info.vaddrs = vaddrs;
if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
else
smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
&info, 1);
smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
&info, 1);
__flush_tlb_pending(ctx, nr, vaddrs);
@ -1134,14 +1098,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
{
unsigned long context = CTX_HWBITS(mm->context);
int cpu = get_cpu();
if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
else
smp_cross_call_masked(&xcall_flush_tlb_page,
context, vaddr, 0,
mm_cpumask(mm));
get_cpu();
smp_cross_call_masked(&xcall_flush_tlb_page,
context, vaddr, 0,
mm_cpumask(mm));
__flush_tlb_page(context, vaddr);
put_cpu();

View File

@ -35,14 +35,14 @@ int write_sigio_irq(int fd)
}
/* These are called from os-Linux/sigio.c to protect its pollfds arrays. */
static DEFINE_SPINLOCK(sigio_spinlock);
static DEFINE_MUTEX(sigio_mutex);
void sigio_lock(void)
{
spin_lock(&sigio_spinlock);
mutex_lock(&sigio_mutex);
}
void sigio_unlock(void)
{
spin_unlock(&sigio_spinlock);
mutex_unlock(&sigio_mutex);
}

View File

@ -335,11 +335,15 @@ static u64 get_ibs_op_count(u64 config)
{
u64 count = 0;
/*
* If the internal 27-bit counter rolled over, the count is MaxCnt
* and the lower 7 bits of CurCnt are randomized.
* Otherwise CurCnt has the full 27-bit current counter value.
*/
if (config & IBS_OP_VAL)
count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */
if (ibs_caps & IBS_CAPS_RDWROPCNT)
count += (config & IBS_OP_CUR_CNT) >> 32;
count = (config & IBS_OP_MAX_CNT) << 4;
else if (ibs_caps & IBS_CAPS_RDWROPCNT)
count = (config & IBS_OP_CUR_CNT) >> 32;
return count;
}
@ -632,18 +636,24 @@ fail:
perf_ibs->offset_max,
offset + 1);
} while (offset < offset_max);
/*
* Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately
* depending on their availability.
* Can't add to offset_max as they are staggered
*/
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
/*
* Read IbsBrTarget and IbsOpData4 separately
* depending on their availability.
* Can't add to offset_max as they are staggered
*/
if (ibs_caps & IBS_CAPS_BRNTRGT) {
rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
size++;
if (perf_ibs == &perf_ibs_op) {
if (ibs_caps & IBS_CAPS_BRNTRGT) {
rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
size++;
}
if (ibs_caps & IBS_CAPS_OPDATA4) {
rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
size++;
}
}
if (ibs_caps & IBS_CAPS_OPDATA4) {
rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++);
size++;
}
}

View File

@ -243,7 +243,7 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
static struct event_constraint intel_icl_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
INTEL_UEVENT_CONSTRAINT(0x1c0, 0), /* INST_RETIRED.PREC_DIST */
FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* INST_RETIRED.PREC_DIST */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */

View File

@ -432,6 +432,7 @@
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
#define MSR_AMD64_IBSCTL 0xc001103a
#define MSR_AMD64_IBSBRTARGET 0xc001103b
#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
#define MSR_AMD64_SEV 0xc0010131

View File

@ -311,19 +311,12 @@ EXPORT_SYMBOL_GPL(unwind_get_return_address);
unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
{
struct task_struct *task = state->task;
if (unwind_done(state))
return NULL;
if (state->regs)
return &state->regs->ip;
if (task != current && state->sp == task->thread.sp) {
struct inactive_task_frame *frame = (void *)task->thread.sp;
return &frame->ret_addr;
}
if (state->sp)
return (unsigned long *)state->sp - 1;
@ -653,7 +646,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
} else {
struct inactive_task_frame *frame = (void *)task->thread.sp;
state->sp = task->thread.sp;
state->sp = task->thread.sp + sizeof(*frame);
state->bp = READ_ONCE_NOCHECK(frame->bp);
state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
state->signal = (void *)state->ip == ret_from_fork;

View File

@ -749,6 +749,9 @@ int __init acpi_aml_init(void)
{
int ret;
if (acpi_disabled)
return -ENODEV;
/* Initialize AML IO interface */
mutex_init(&acpi_aml_io.lock);
init_waitqueue_head(&acpi_aml_io.wait);

View File

@ -223,9 +223,9 @@ static int __init extlog_init(void)
u64 cap;
int rc;
rdmsrl(MSR_IA32_MCG_CAP, cap);
if (!(cap & MCG_ELOG_P) || !extlog_get_l1addr())
if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) ||
!(cap & MCG_ELOG_P) ||
!extlog_get_l1addr())
return -ENODEV;
if (edac_get_report_status() == EDAC_REPORTING_FORCE) {

View File

@ -136,6 +136,7 @@ struct acpi_button {
int last_state;
ktime_t last_time;
bool suspended;
bool lid_state_initialized;
};
static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
@ -391,6 +392,8 @@ static int acpi_lid_update_state(struct acpi_device *device,
static void acpi_lid_initialize_state(struct acpi_device *device)
{
struct acpi_button *button = acpi_driver_data(device);
switch (lid_init_state) {
case ACPI_BUTTON_LID_INIT_OPEN:
(void)acpi_lid_notify_state(device, 1);
@ -402,13 +405,14 @@ static void acpi_lid_initialize_state(struct acpi_device *device)
default:
break;
}
button->lid_state_initialized = true;
}
static void acpi_button_notify(struct acpi_device *device, u32 event)
{
struct acpi_button *button = acpi_driver_data(device);
struct input_dev *input;
int users;
switch (event) {
case ACPI_FIXED_HARDWARE_EVENT:
@ -417,10 +421,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
case ACPI_BUTTON_NOTIFY_STATUS:
input = button->input;
if (button->type == ACPI_BUTTON_TYPE_LID) {
mutex_lock(&button->input->mutex);
users = button->input->users;
mutex_unlock(&button->input->mutex);
if (users)
if (button->lid_state_initialized)
acpi_lid_update_state(device, true);
} else {
int keycode;
@ -465,7 +466,7 @@ static int acpi_button_resume(struct device *dev)
struct acpi_button *button = acpi_driver_data(device);
button->suspended = false;
if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users) {
if (button->type == ACPI_BUTTON_TYPE_LID) {
button->last_state = !!acpi_lid_evaluate_state(device);
button->last_time = ktime_get();
acpi_lid_initialize_state(device);

View File

@ -1968,20 +1968,16 @@ bool acpi_ec_dispatch_gpe(void)
if (acpi_any_gpe_status_set(first_ec->gpe))
return true;
if (ec_no_wakeup)
return false;
/*
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
if (ret == ACPI_INTERRUPT_HANDLED) {
if (ret == ACPI_INTERRUPT_HANDLED)
pm_pr_dbg("EC GPE dispatched\n");
/* Flush the event and query workqueues. */
acpi_ec_flush_work();
}
/* Flush the event and query workqueues. */
acpi_ec_flush_work();
return false;
}

View File

@ -403,7 +403,8 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
p->flags, p->processor_PD, p->memory_PD);
if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) ||
hmat_revision > 1) {
target = find_mem_target(p->memory_PD);
if (!target) {
pr_debug("HMAT: Memory Domain missing from SRAT\n");

View File

@ -31,7 +31,7 @@ int acpi_numa __initdata;
int pxm_to_node(int pxm)
{
if (pxm < 0)
if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
return NUMA_NO_NODE;
return pxm_to_node_map[pxm];
}

View File

@ -282,6 +282,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "530U4E/540U4E"),
},
},
/* https://bugs.launchpad.net/bugs/1894667 */
{
.callback = video_detect_force_video,
.ident = "HP 635 Notebook",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP 635 Notebook PC"),
},
},
/* Non win8 machines which need native backlight nevertheless */
{

View File

@ -2100,7 +2100,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
pp->dhfis_bits &= ~done_mask;
pp->dmafis_bits &= ~done_mask;
pp->sdbfis_bits |= done_mask;
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
if (!ap->qc_active) {
DPRINTK("over\n");

View File

@ -3442,6 +3442,7 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
struct device *parent = dev->parent;
struct fwnode_handle *fn = dev->fwnode;
if (fwnode) {
@ -3456,7 +3457,8 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
} else {
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
fn->secondary = NULL;
if (!(parent && fn == parent->fwnode))
fn->secondary = ERR_PTR(-ENODEV);
} else {
dev->fwnode = NULL;
}

View File

@ -291,8 +291,7 @@ static int rpm_get_suppliers(struct device *dev)
device_links_read_lock_held()) {
int retval;
if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
if (!(link->flags & DL_FLAG_PM_RUNTIME))
continue;
retval = pm_runtime_get_sync(link->supplier);
@ -312,8 +311,6 @@ static void rpm_put_suppliers(struct device *dev)
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held()) {
if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
continue;
while (refcount_dec_not_one(&link->rpm_active))
pm_runtime_put(link->supplier);

View File

@ -787,9 +787,9 @@ static void recv_work(struct work_struct *work)
blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
}
nbd_config_put(nbd);
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
nbd_config_put(nbd);
kfree(args);
}

View File

@ -202,7 +202,7 @@ static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
static int do_block_io_op(struct xen_blkif_ring *ring);
static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct blkif_request *req,
struct pending_req *pending_req);
@ -615,6 +615,8 @@ int xen_blkif_schedule(void *arg)
struct xen_vbd *vbd = &blkif->vbd;
unsigned long timeout;
int ret;
bool do_eoi;
unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
set_freezable();
while (!kthread_should_stop()) {
@ -639,16 +641,23 @@ int xen_blkif_schedule(void *arg)
if (timeout == 0)
goto purge_gnt_list;
do_eoi = ring->waiting_reqs;
ring->waiting_reqs = 0;
smp_mb(); /* clear flag *before* checking for work */
ret = do_block_io_op(ring);
ret = do_block_io_op(ring, &eoi_flags);
if (ret > 0)
ring->waiting_reqs = 1;
if (ret == -EACCES)
wait_event_interruptible(ring->shutdown_wq,
kthread_should_stop());
if (do_eoi && !ring->waiting_reqs) {
xen_irq_lateeoi(ring->irq, eoi_flags);
eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
}
purge_gnt_list:
if (blkif->vbd.feature_gnt_persistent &&
time_after(jiffies, ring->next_lru)) {
@ -1121,7 +1130,7 @@ static void end_block_io_op(struct bio *bio)
* and transmute it to the block API to hand it over to the proper block disk.
*/
static int
__do_block_io_op(struct xen_blkif_ring *ring)
__do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
{
union blkif_back_rings *blk_rings = &ring->blk_rings;
struct blkif_request req;
@ -1144,6 +1153,9 @@ __do_block_io_op(struct xen_blkif_ring *ring)
if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
break;
/* We've seen a request, so clear spurious eoi flag. */
*eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
if (kthread_should_stop()) {
more_to_do = 1;
break;
@ -1202,13 +1214,13 @@ done:
}
static int
do_block_io_op(struct xen_blkif_ring *ring)
do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
{
union blkif_back_rings *blk_rings = &ring->blk_rings;
int more_to_do;
do {
more_to_do = __do_block_io_op(ring);
more_to_do = __do_block_io_op(ring, eoi_flags);
if (more_to_do)
break;

View File

@ -229,9 +229,8 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
BUG();
}
err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
xen_blkif_be_int, 0,
"blkif-backend", ring);
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid,
evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
if (err < 0) {
xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
ring->blk_rings.common.sring = NULL;

View File

@ -129,7 +129,12 @@ error_destroy_mc_io:
*/
void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
{
struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
struct fsl_mc_device *dpmcp_dev;
if (!mc_io)
return;
dpmcp_dev = mc_io->dpmcp_dev;
if (dpmcp_dev)
fsl_mc_io_unset_dpmcp(mc_io);

View File

@ -146,10 +146,12 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
if (!omap2_clk_is_hw_omap(clk_hw)) {
pr_warn("can't setup clkdm for basic clk %s\n",
__clk_get_name(clk));
clk_put(clk);
continue;
}
to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
omap2_init_clk_clkdm(clk_hw);
clk_put(clk);
}
}

View File

@ -688,7 +688,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
}
if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 &&
!acpi_pstate_strict) {
cpumask_clear(policy->cpus);
cpumask_set_cpu(cpu, policy->cpus);
cpumask_copy(data->freqdomain_cpus,

View File

@ -141,7 +141,8 @@ static const struct reg_field sti_stih407_dvfs_regfields[DVFS_MAX_REGFIELDS] = {
static const struct reg_field *sti_cpufreq_match(void)
{
if (of_machine_is_compatible("st,stih407") ||
of_machine_is_compatible("st,stih410"))
of_machine_is_compatible("st,stih410") ||
of_machine_is_compatible("st,stih418"))
return sti_stih407_dvfs_regfields;
return NULL;
@ -258,7 +259,8 @@ static int sti_cpufreq_init(void)
int ret;
if ((!of_machine_is_compatible("st,stih407")) &&
(!of_machine_is_compatible("st,stih410")))
(!of_machine_is_compatible("st,stih410")) &&
(!of_machine_is_compatible("st,stih418")))
return -ENODEV;
ddata.cpu = get_cpu_device(0);

View File

@ -639,11 +639,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
unsigned long flags;
unsigned long residue = 0;
spin_lock_irqsave(&jzchan->vchan.lock, flags);
status = dma_cookie_status(chan, cookie, txstate);
if ((status == DMA_COMPLETE) || (txstate == NULL))
return status;
spin_lock_irqsave(&jzchan->vchan.lock, flags);
goto out_unlock_irqrestore;
vdesc = vchan_find_desc(&jzchan->vchan, cookie);
if (vdesc) {
@ -660,6 +660,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
&& jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
status = DMA_ERROR;
out_unlock_irqrestore:
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
return status;
}

View File

@ -127,7 +127,7 @@ static void ptn5150_irq_work(struct work_struct *work)
case PTN5150_DFP_ATTACHED:
extcon_set_state_sync(info->edev,
EXTCON_USB_HOST, false);
gpiod_set_value(info->vbus_gpiod, 0);
gpiod_set_value_cansleep(info->vbus_gpiod, 0);
extcon_set_state_sync(info->edev, EXTCON_USB,
true);
break;
@ -138,9 +138,9 @@ static void ptn5150_irq_work(struct work_struct *work)
PTN5150_REG_CC_VBUS_DETECTION_MASK) >>
PTN5150_REG_CC_VBUS_DETECTION_SHIFT);
if (vbus)
gpiod_set_value(info->vbus_gpiod, 0);
gpiod_set_value_cansleep(info->vbus_gpiod, 0);
else
gpiod_set_value(info->vbus_gpiod, 1);
gpiod_set_value_cansleep(info->vbus_gpiod, 1);
extcon_set_state_sync(info->edev,
EXTCON_USB_HOST, true);
@ -156,7 +156,7 @@ static void ptn5150_irq_work(struct work_struct *work)
EXTCON_USB_HOST, false);
extcon_set_state_sync(info->edev,
EXTCON_USB, false);
gpiod_set_value(info->vbus_gpiod, 0);
gpiod_set_value_cansleep(info->vbus_gpiod, 0);
}
}

View File

@ -173,6 +173,8 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
protocols_imp[tot_num_ret + loop] = *(list + loop);
tot_num_ret += loop_num_ret;
scmi_reset_rx_to_maxsz(handle, t);
} while (loop_num_ret);
scmi_xfer_put(handle, t);

View File

@ -177,6 +177,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
}
tot_rate_cnt += num_returned;
scmi_reset_rx_to_maxsz(handle, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware

View File

@ -103,6 +103,8 @@ int scmi_do_xfer_with_response(const struct scmi_handle *h,
struct scmi_xfer *xfer);
int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
size_t tx_size, size_t rx_size, struct scmi_xfer **p);
void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
struct scmi_xfer *xfer);
int scmi_handle_put(const struct scmi_handle *handle);
struct scmi_handle *scmi_handle_get(struct device *dev);
void scmi_set_handle(struct scmi_device *scmi_dev);

View File

@ -481,6 +481,14 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
return ret;
}
void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
struct scmi_xfer *xfer)
{
struct scmi_info *info = handle_to_scmi_info(handle);
xfer->rx.len = info->desc->max_msg_size;
}
#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
/**

View File

@ -281,6 +281,8 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
}
tot_opp_cnt += num_returned;
scmi_reset_rx_to_maxsz(handle, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware

View File

@ -35,9 +35,7 @@ struct scmi_msg_reset_domain_reset {
#define EXPLICIT_RESET_ASSERT BIT(1)
#define ASYNCHRONOUS_RESET BIT(2)
__le32 reset_state;
#define ARCH_RESET_TYPE BIT(31)
#define COLD_RESET_STATE BIT(0)
#define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE)
#define ARCH_COLD_RESET 0
};
struct reset_dom_info {

View File

@ -154,6 +154,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
}
desc_index += num_returned;
scmi_reset_rx_to_maxsz(handle, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware

View File

@ -3890,7 +3890,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_device_lock_adev(tmp_adev, false);
r = amdgpu_device_pre_asic_reset(tmp_adev,
NULL,
(tmp_adev == adev) ? job : NULL,
&need_full_reset);
/*TODO Should we stop ?*/
if (r) {

View File

@ -561,6 +561,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint64_t va_flags;
uint64_t vm_size;
int r = 0;
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
@ -581,6 +582,15 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->va_address &= AMDGPU_GMC_HOLE_MASK;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_SIZE;
if (args->va_address + args->map_size > vm_size) {
dev_dbg(&dev->pdev->dev,
"va_address 0x%llx is in top reserved area 0x%llx\n",
args->va_address + args->map_size, vm_size);
return -EINVAL;
}
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
args->flags);

View File

@ -105,8 +105,8 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_MMHUB_0 1
#define AMDGPU_MMHUB_1 2
/* hardcode that limit for now */
#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
/* Reserve 2MB at top/bottom of address space for kernel use */
#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1

View File

@ -58,8 +58,9 @@ static int update_qpd_v10(struct device_queue_manager *dqm,
/* check if sh_mem_config register already configured */
if (qpd->sh_mem_config == 0) {
qpd->sh_mem_config =
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
(SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
#if 0
/* TODO:
* This shouldn't be an issue with Navi10. Verify.

View File

@ -3956,6 +3956,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
struct amdgpu_device *adev = connector->dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
/*
* Call only if mst_mgr was iniitalized before since it's not done
* for all connector types.
*/
if (aconnector->mst_mgr.dev)
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)

View File

@ -2268,7 +2268,7 @@ enum dc_status dc_link_validate_mode_timing(
/* A hack to avoid failing any modes for EDID override feature on
* topology change such as lower quality cable for DP or different dongle
*/
if (link->remote_sinks[0])
if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL)
return DC_OK;
/* Passive Dongle */

View File

@ -898,10 +898,10 @@ void enc1_stream_encoder_dp_blank(
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
/* Larger delay to wait until VBLANK - use max retry of
* 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
* 10us*10200=102ms. This covers 100.0ms of minimum 10 Hz mode +
* a little more because we may not trust delay accuracy.
*/
max_retries = DP_BLANK_MAX_RETRY * 250;
max_retries = DP_BLANK_MAX_RETRY * 501;
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);

View File

@ -63,13 +63,13 @@ enum gpio_result dal_gpio_open_ex(
enum gpio_mode mode)
{
if (gpio->pin) {
ASSERT_CRITICAL(false);
BREAK_TO_DEBUGGER();
return GPIO_RESULT_ALREADY_OPENED;
}
// No action if allocation failed during gpio construct
if (!gpio->hw_container.ddc) {
ASSERT_CRITICAL(false);
BREAK_TO_DEBUGGER();
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
gpio->mode = mode;

View File

@ -57,7 +57,7 @@
* general debug capabilities
*
*/
#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)
#if defined(CONFIG_DEBUG_KERNEL_DC) && (defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB))
#define ASSERT_CRITICAL(expr) do { \
if (WARN_ON(!(expr))) { \
kgdb_breakpoint(); \

View File

@ -2864,7 +2864,7 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
if (hwmgr->is_kicker)
switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
else
switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
break;
case CHIP_VEGAM:
switch_limit_us = 30;

View File

@ -295,8 +295,12 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
const struct i2c_device_id *id)
{
struct device *dev = &stdp4028_i2c->dev;
int ret;
ge_b850v3_lvds_init(dev);
ret = ge_b850v3_lvds_init(dev);
if (ret)
return ret;
ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c;
i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr);
@ -354,8 +358,12 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
const struct i2c_device_id *id)
{
struct device *dev = &stdp2690_i2c->dev;
int ret;
ge_b850v3_lvds_init(dev);
ret = ge_b850v3_lvds_init(dev);
if (ret)
return ret;
ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c;
i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr);

View File

@ -365,7 +365,6 @@ static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
if (lpm)
val |= CMD_MODE_ALL_LP;
dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS);
dsi_write(dsi, DSI_CMD_MODE_CFG, val);
}
@ -541,16 +540,22 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
unsigned long mode_flags)
{
u32 val;
dsi_write(dsi, DSI_PWR_UP, RESET);
if (mode_flags & MIPI_DSI_MODE_VIDEO) {
dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE);
dw_mipi_dsi_video_mode_config(dsi);
dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS);
} else {
dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
}
val = PHY_TXREQUESTCLKHS;
if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
val |= AUTO_CLKLANE_CTRL;
dsi_write(dsi, DSI_LPCLK_CTRL, val);
dsi_write(dsi, DSI_PWR_UP, POWERUP);
}

View File

@ -33,6 +33,8 @@
#include <uapi/drm/i915_drm.h>
#include <uapi/drm/drm_fourcc.h>
#include <asm/hypervisor.h>
#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@ -2197,7 +2199,9 @@ static inline bool intel_vtd_active(void)
if (intel_iommu_gfx_mapped)
return true;
#endif
return false;
/* Running as a guest, we assume the host is enforcing VT'd */
return !hypervisor_is_type(X86_HYPER_NATIVE);
}
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)

View File

@ -761,7 +761,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
/* Don't evict this BO if it's outside of the
* requested placement range
*/
if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
(place->lpfn && place->lpfn <= bo->mem.start))
return false;

View File

@ -2773,7 +2773,9 @@ static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *repo
if (report->type != HID_INPUT_REPORT)
return -1;
if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
if (WACOM_PAD_FIELD(field))
return 0;
else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
wacom_wac_pen_report(hdev, report);
else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
wacom_wac_finger_report(hdev, report);

View File

@ -147,7 +147,8 @@ static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
struct coresight_device *coresight_get_enabled_sink(bool reset);
struct coresight_device *
coresight_get_enabled_sink(struct coresight_device *source);
struct coresight_device *coresight_get_sink_by_id(u32 id);
struct list_head *coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink);

View File

@ -481,50 +481,46 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
return csdev;
}
static int coresight_enabled_sink(struct device *dev, const void *data)
static struct coresight_device *
coresight_find_enabled_sink(struct coresight_device *csdev)
{
const bool *reset = data;
struct coresight_device *csdev = to_coresight_device(dev);
int i;
struct coresight_device *sink;
if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
csdev->activated) {
/*
* Now that we have a handle on the sink for this session,
* disable the sysFS "enable_sink" flag so that possible
* concurrent perf session that wish to use another sink don't
* trip on it. Doing so has no ramification for the current
* session.
*/
if (*reset)
csdev->activated = false;
csdev->activated)
return csdev;
return 1;
/*
* Recursively explore each port found on this element.
*/
for (i = 0; i < csdev->pdata->nr_outport; i++) {
struct coresight_device *child_dev;
child_dev = csdev->pdata->conns[i].child_dev;
if (child_dev)
sink = coresight_find_enabled_sink(child_dev);
if (sink)
return sink;
}
return 0;
return NULL;
}
/**
* coresight_get_enabled_sink - returns the first enabled sink found on the bus
* @deactivate: Whether the 'enable_sink' flag should be reset
* coresight_get_enabled_sink - returns the first enabled sink using
* connection based search starting from the source reference
*
* When operated from perf the deactivate parameter should be set to 'true'.
* That way the "enabled_sink" flag of the sink that was selected can be reset,
* allowing for other concurrent perf sessions to choose a different sink.
*
* When operated from sysFS users have full control and as such the deactivate
* parameter should be set to 'false', hence mandating users to explicitly
* clear the flag.
* @source: Coresight source device reference
*/
struct coresight_device *coresight_get_enabled_sink(bool deactivate)
struct coresight_device *
coresight_get_enabled_sink(struct coresight_device *source)
{
struct device *dev = NULL;
if (!source)
return NULL;
dev = bus_find_device(&coresight_bustype, NULL, &deactivate,
coresight_enabled_sink);
return dev ? to_coresight_device(dev) : NULL;
return coresight_find_enabled_sink(source);
}
static int coresight_sink_by_id(struct device *dev, const void *data)
@ -764,11 +760,7 @@ int coresight_enable(struct coresight_device *csdev)
goto out;
}
/*
* Search for a valid sink for this session but don't reset the
* "enable_sink" flag in sysFS. Users get to do that explicitly.
*/
sink = coresight_get_enabled_sink(false);
sink = coresight_get_enabled_sink(csdev);
if (!sink) {
ret = -EINVAL;
goto out;

View File

@ -1528,15 +1528,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
return ret;
}
/* Request IRQ */
ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
pdev->name, i2c_imx);
if (ret) {
dev_err(&pdev->dev, "can't claim irq %d\n", irq);
goto clk_disable;
}
/* Init queue */
init_waitqueue_head(&i2c_imx->queue);
@ -1555,6 +1546,14 @@ static int i2c_imx_probe(struct platform_device *pdev)
if (ret < 0)
goto rpm_disable;
/* Request IRQ */
ret = request_threaded_irq(irq, i2c_imx_isr, NULL, IRQF_SHARED,
pdev->name, i2c_imx);
if (ret) {
dev_err(&pdev->dev, "can't claim irq %d\n", irq);
goto rpm_disable;
}
/* Set up clock divider */
i2c_imx->bitrate = IMX_I2C_BIT_RATE;
ret = of_property_read_u32(pdev->dev.of_node,
@ -1614,13 +1613,12 @@ i2c_adapter_remove:
i2c_del_adapter(&i2c_imx->adapter);
clk_notifier_unregister:
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
free_irq(irq, i2c_imx);
rpm_disable:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
clk_disable:
clk_disable_unprepare(i2c_imx->clk);
return ret;
}
@ -1628,7 +1626,7 @@ clk_disable:
static int i2c_imx_remove(struct platform_device *pdev)
{
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
int ret;
int irq, ret;
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
@ -1648,6 +1646,9 @@ static int i2c_imx_remove(struct platform_device *pdev)
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, i2c_imx);
clk_disable_unprepare(i2c_imx->clk);
pm_runtime_put_noidle(&pdev->dev);

View File

@ -357,7 +357,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
num_channels = ARRAY_SIZE(rcar_gyroadc_iio_channels_3);
break;
default:
return -EINVAL;
goto err_e_inval;
}
/*
@ -374,7 +374,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
dev_err(dev,
"Failed to get child reg property of ADC \"%pOFn\".\n",
child);
return ret;
goto err_of_node_put;
}
/* Channel number is too high. */
@ -382,7 +382,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
dev_err(dev,
"Only %i channels supported with %pOFn, but reg = <%i>.\n",
num_channels, child, reg);
return -EINVAL;
goto err_e_inval;
}
}
@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
dev_err(dev,
"Channel %i uses different ADC mode than the rest.\n",
reg);
return -EINVAL;
goto err_e_inval;
}
/* Channel is valid, grab the regulator. */
@ -401,7 +401,8 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
if (IS_ERR(vref)) {
dev_dbg(dev, "Channel %i 'vref' supply not connected.\n",
reg);
return PTR_ERR(vref);
ret = PTR_ERR(vref);
goto err_of_node_put;
}
priv->vref[reg] = vref;
@ -425,8 +426,10 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
* attached to the GyroADC at a time, so if we found it,
* we can stop parsing here.
*/
if (childmode == RCAR_GYROADC_MODE_SELECT_1_MB88101A)
if (childmode == RCAR_GYROADC_MODE_SELECT_1_MB88101A) {
of_node_put(child);
break;
}
}
if (first) {
@ -435,6 +438,12 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
}
return 0;
err_e_inval:
ret = -EINVAL;
err_of_node_put:
of_node_put(child);
return ret;
}
static void rcar_gyroadc_deinit_supplies(struct iio_dev *indio_dev)

View File

@ -28,6 +28,12 @@ struct adc0832 {
struct regulator *reg;
struct mutex lock;
u8 mux_bits;
/*
* Max size needed: 16x 1 byte ADC data + 8 bytes timestamp
* May be shorter if not all channels are enabled subject
* to the timestamp remaining 8 byte aligned.
*/
u8 data[24] __aligned(8);
u8 tx_buf[2] ____cacheline_aligned;
u8 rx_buf[2];
@ -199,7 +205,6 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adc0832 *adc = iio_priv(indio_dev);
u8 data[24] = { }; /* 16x 1 byte ADC data + 8 bytes timestamp */
int scan_index;
int i = 0;
@ -217,10 +222,10 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p)
goto out;
}
data[i] = ret;
adc->data[i] = ret;
i++;
}
iio_push_to_buffers_with_timestamp(indio_dev, data,
iio_push_to_buffers_with_timestamp(indio_dev, adc->data,
iio_get_time_ns(indio_dev));
out:
mutex_unlock(&adc->lock);

View File

@ -47,6 +47,12 @@ struct adc12138 {
struct completion complete;
/* The number of cclk periods for the S/H's acquisition time */
unsigned int acquisition_time;
/*
* Maximum size needed: 16x 2 bytes ADC data + 8 bytes timestamp.
* Less may be need if not all channels are enabled, as long as
* the 8 byte alignment of the timestamp is maintained.
*/
__be16 data[20] __aligned(8);
u8 tx_buf[2] ____cacheline_aligned;
u8 rx_buf[2];
@ -329,7 +335,6 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adc12138 *adc = iio_priv(indio_dev);
__be16 data[20] = { }; /* 16x 2 bytes ADC data + 8 bytes timestamp */
__be16 trash;
int ret;
int scan_index;
@ -345,7 +350,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
reinit_completion(&adc->complete);
ret = adc12138_start_and_read_conv(adc, scan_chan,
i ? &data[i - 1] : &trash);
i ? &adc->data[i - 1] : &trash);
if (ret) {
dev_warn(&adc->spi->dev,
"failed to start conversion\n");
@ -362,7 +367,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
}
if (i) {
ret = adc12138_read_conv_data(adc, &data[i - 1]);
ret = adc12138_read_conv_data(adc, &adc->data[i - 1]);
if (ret) {
dev_warn(&adc->spi->dev,
"failed to get conversion data\n");
@ -370,7 +375,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
}
}
iio_push_to_buffers_with_timestamp(indio_dev, data,
iio_push_to_buffers_with_timestamp(indio_dev, adc->data,
iio_get_time_ns(indio_dev));
out:
mutex_unlock(&adc->lock);

View File

@ -46,13 +46,20 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct itg3200 *st = iio_priv(indio_dev);
__be16 buf[ITG3200_SCAN_ELEMENTS + sizeof(s64)/sizeof(u16)];
/*
* Ensure correct alignment and padding including for the
* timestamp that may be inserted.
*/
struct {
__be16 buf[ITG3200_SCAN_ELEMENTS];
s64 ts __aligned(8);
} scan;
int ret = itg3200_read_all_channels(st->i2c, buf);
int ret = itg3200_read_all_channels(st->i2c, scan.buf);
if (ret < 0)
goto error_ret;
iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp);
iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);

View File

@ -169,6 +169,7 @@ struct si1145_part_info {
* @part_info: Part information
* @trig: Pointer to iio trigger
* @meas_rate: Value of MEAS_RATE register. Only set in HW in auto mode
* @buffer: Used to pack data read from sensor.
*/
struct si1145_data {
struct i2c_client *client;
@ -180,6 +181,14 @@ struct si1145_data {
bool autonomous;
struct iio_trigger *trig;
int meas_rate;
/*
* Ensure timestamp will be naturally aligned if present.
* Maximum buffer size (may be only partly used if not all
* channels are enabled):
* 6*2 bytes channels data + 4 bytes alignment +
* 8 bytes timestamp
*/
u8 buffer[24] __aligned(8);
};
/**
@ -441,12 +450,6 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
struct iio_poll_func *pf = private;
struct iio_dev *indio_dev = pf->indio_dev;
struct si1145_data *data = iio_priv(indio_dev);
/*
* Maximum buffer size:
* 6*2 bytes channels data + 4 bytes alignment +
* 8 bytes timestamp
*/
u8 buffer[24];
int i, j = 0;
int ret;
u8 irq_status = 0;
@ -479,7 +482,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
ret = i2c_smbus_read_i2c_block_data_or_emulated(
data->client, indio_dev->channels[i].address,
sizeof(u16) * run, &buffer[j]);
sizeof(u16) * run, &data->buffer[j]);
if (ret < 0)
goto done;
j += run * sizeof(u16);
@ -494,7 +497,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
goto done;
}
iio_push_to_buffers_with_timestamp(indio_dev, buffer,
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
iio_get_time_ns(indio_dev));
done:

View File

@ -727,6 +727,7 @@ int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
listener->qed_handle);
cm_id->rem_ref(cm_id);
kfree(listener);
return rc;
}

View File

@ -74,7 +74,7 @@ EXPORT_SYMBOL(hil_mlc_unregister);
static LIST_HEAD(hil_mlcs);
static DEFINE_RWLOCK(hil_mlcs_lock);
static struct timer_list hil_mlcs_kicker;
static int hil_mlcs_probe;
static int hil_mlcs_probe, hil_mlc_stop;
static void hil_mlcs_process(unsigned long unused);
static DECLARE_TASKLET_DISABLED(hil_mlcs_tasklet, hil_mlcs_process, 0);
@ -702,9 +702,13 @@ static int hilse_donode(hil_mlc *mlc)
if (!mlc->ostarted) {
mlc->ostarted = 1;
mlc->opacket = pack;
mlc->out(mlc);
rc = mlc->out(mlc);
nextidx = HILSEN_DOZE;
write_unlock_irqrestore(&mlc->lock, flags);
if (rc) {
hil_mlc_stop = 1;
return 1;
}
break;
}
mlc->ostarted = 0;
@ -715,8 +719,13 @@ static int hilse_donode(hil_mlc *mlc)
case HILSE_CTS:
write_lock_irqsave(&mlc->lock, flags);
nextidx = mlc->cts(mlc) ? node->bad : node->good;
rc = mlc->cts(mlc);
nextidx = rc ? node->bad : node->good;
write_unlock_irqrestore(&mlc->lock, flags);
if (rc) {
hil_mlc_stop = 1;
return 1;
}
break;
default:
@ -780,6 +789,12 @@ static void hil_mlcs_process(unsigned long unused)
static void hil_mlcs_timer(struct timer_list *unused)
{
if (hil_mlc_stop) {
/* could not send packet - stop immediately. */
pr_warn(PREFIX "HIL seems stuck - Disabling HIL MLC.\n");
return;
}
hil_mlcs_probe = 1;
tasklet_schedule(&hil_mlcs_tasklet);
/* Re-insert the periodic task. */

Some files were not shown because too many files have changed in this diff Show More