1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

The netfilter conflicts were rather simple overlapping
changes.

However, the cls_tcindex.c stuff was a bit more complex.

On the 'net' side, Cong is fixing several races and memory
leaks.  Whilst on the 'net-next' side we have Vlad adding
the rtnl-ness support.

What I've decided to do, in order to resolve this, is revert the
conversion over to using a workqueue that Cong did, bringing us back
to pure RCU.  I did it this way because I believe that either Cong's
races don't apply with have Vlad did things, or Cong will have to
implement the race fix slightly differently.

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2019-02-15 12:38:38 -08:00
commit 3313da8188
272 changed files with 1968 additions and 1234 deletions

View File

@ -1701,12 +1701,11 @@
By default, super page will be supported if Intel IOMMU
has the capability. With this option, super page will
not be supported.
sm_off [Default Off]
By default, scalable mode will be supported if the
sm_on [Default Off]
By default, scalable mode will be disabled even if the
hardware advertises that it has support for the scalable
mode translation. With this option set, scalable mode
will not be used even on hardware which claims to support
it.
will be used on hardware which claims to support it.
tboot_noforce [Default Off]
Do not force the Intel IOMMU enabled under tboot.
By default, tboot will force Intel IOMMU on, which

View File

@ -22,8 +22,9 @@ and changeable from userspace under certain rules.
2. Querying from userspace
Both admin and operational state can be queried via the netlink
operation RTM_GETLINK. It is also possible to subscribe to RTMGRP_LINK
to be notified of updates. This is important for setting from userspace.
operation RTM_GETLINK. It is also possible to subscribe to RTNLGRP_LINK
to be notified of updates while the interface is admin up. This is
important for setting from userspace.
These values contain interface state:
@ -101,8 +102,9 @@ because some driver controlled protocol establishment has to
complete. Corresponding functions are netif_dormant_on() to set the
flag, netif_dormant_off() to clear it and netif_dormant() to query.
On device allocation, networking core sets the flags equivalent to
netif_carrier_ok() and !netif_dormant().
On device allocation, both flags __LINK_STATE_NOCARRIER and
__LINK_STATE_DORMANT are cleared, so the effective state is equivalent
to netif_carrier_ok() and !netif_dormant().
Whenever the driver CHANGES one of these flags, a workqueue event is
@ -133,11 +135,11 @@ netif_carrier_ok() && !netif_dormant() is set by the
driver. Afterwards, the userspace application can set IFLA_OPERSTATE
to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set
netif_carrier_off() or netif_dormant_on(). Changes made by userspace
are multicasted on the netlink group RTMGRP_LINK.
are multicasted on the netlink group RTNLGRP_LINK.
So basically a 802.1X supplicant interacts with the kernel like this:
-subscribe to RTMGRP_LINK
-subscribe to RTNLGRP_LINK
-set IFLA_LINKMODE to 1 via RTM_SETLINK
-query RTM_GETLINK once to get initial state
-if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until

View File

@ -80,7 +80,9 @@ nonzero when shrink_dcache_pages() has been called and the
dcache isn't pruned yet.
nr_negative shows the number of unused dentries that are also
negative dentries which do not mapped to actual files.
negative dentries which do not map to any files. Instead,
they help speeding up rejection of non-existing files provided
by the users.
==============================================================

View File

@ -5192,7 +5192,7 @@ DRM DRIVERS FOR XEN
M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
T: git git://anongit.freedesktop.org/drm/drm-misc
L: dri-devel@lists.freedesktop.org
L: xen-devel@lists.xen.org
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
F: drivers/gpu/drm/xen/
F: Documentation/gpu/xen-front.rst
@ -6165,7 +6165,7 @@ FREESCALE SOC SOUND DRIVERS
M: Timur Tabi <timur@kernel.org>
M: Nicolin Chen <nicoleotsuka@gmail.com>
M: Xiubo Li <Xiubo.Lee@gmail.com>
R: Fabio Estevam <fabio.estevam@nxp.com>
R: Fabio Estevam <festevam@gmail.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
@ -10913,7 +10913,7 @@ F: include/linux/nvmem-consumer.h
F: include/linux/nvmem-provider.h
NXP SGTL5000 DRIVER
M: Fabio Estevam <fabio.estevam@nxp.com>
M: Fabio Estevam <festevam@gmail.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/sound/sgtl5000.txt
@ -11327,10 +11327,12 @@ F: include/dt-bindings/
OPENCORES I2C BUS DRIVER
M: Peter Korsgaard <peter@korsgaard.com>
M: Andrew Lunn <andrew@lunn.ch>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-ocores
F: drivers/i2c/busses/i2c-ocores.c
F: include/linux/platform_data/i2c-ocores.h
OPENRISC ARCHITECTURE
M: Jonas Bonn <jonas@southpole.se>

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 0
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Shy Crocodile
# *DOCUMENTATION*

View File

@ -56,15 +56,15 @@
#elif defined(CONFIG_ALPHA_DP264) || \
defined(CONFIG_ALPHA_LYNX) || \
defined(CONFIG_ALPHA_SHARK) || \
defined(CONFIG_ALPHA_EIGER)
defined(CONFIG_ALPHA_SHARK)
# define NR_IRQS 64
#elif defined(CONFIG_ALPHA_TITAN)
#define NR_IRQS 80
#elif defined(CONFIG_ALPHA_RAWHIDE) || \
defined(CONFIG_ALPHA_TAKARA)
defined(CONFIG_ALPHA_TAKARA) || \
defined(CONFIG_ALPHA_EIGER)
# define NR_IRQS 128
#elif defined(CONFIG_ALPHA_WILDFIRE)

View File

@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
/* Macro for exception fixup code to access integer registers. */
#define dpf_reg(r) \
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
(r) <= 18 ? (r)+8 : (r)-10])
(r) <= 18 ? (r)+10 : (r)-10])
asmlinkage void
do_page_fault(unsigned long address, unsigned long mmcsr,

View File

@ -215,7 +215,7 @@
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
bus-width = <0x4>;
cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
cd-inverted;
max-frequency = <26000000>;
vmmc-supply = <&vmmcsd_fixed>;

View File

@ -476,7 +476,7 @@
clocksource: timer@20000 {
compatible = "ti,da830-timer";
reg = <0x20000 0x1000>;
interrupts = <12>, <13>;
interrupts = <21>, <22>;
interrupt-names = "tint12", "tint34";
clocks = <&pll0_auxclk>;
};

View File

@ -103,7 +103,7 @@
power {
label = "Power Button";
gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
gpio-key,wakeup;
wakeup-source;
linux,code = <KEY_POWER>;
};
};

View File

@ -309,7 +309,7 @@
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
keep-power-in-suspend;
enable-sdio-wakeup;
wakeup-source;
vmmc-supply = <&reg_sd3_vmmc>;
status = "okay";
};

View File

@ -467,7 +467,7 @@
};
gpt: gpt@2098000 {
compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt";
compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
reg = <0x02098000 0x4000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_GPT_BUS>,

View File

@ -274,7 +274,7 @@
compatible = "amlogic,meson6-dwmac", "snps,dwmac";
reg = <0xc9410000 0x10000
0xc1108108 0x4>;
interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
status = "disabled";
};

View File

@ -205,8 +205,7 @@
cap-sd-highspeed;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vcc_3v3>;
};

View File

@ -221,7 +221,6 @@
/* Realtek RTL8211F (0x001cc916) */
eth_phy: ethernet-phy@0 {
reg = <0>;
eee-broken-1000t;
interrupt-parent = <&gpio_intc>;
/* GPIOH_3 */
interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
@ -273,8 +272,7 @@
cap-sd-highspeed;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&tflash_vdd>;
vqmmc-supply = <&tf_io>;

View File

@ -206,8 +206,7 @@
cap-sd-highspeed;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vcc_3v3>;
};

View File

@ -105,7 +105,7 @@
interrupts-extended = <
&cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
&cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
&cpcap 48 1
&cpcap 48 0
>;
interrupt-names =
"id_ground", "id_float", "se0conn", "vbusvld",

View File

@ -714,11 +714,7 @@
vdda-supply = <&vdac>;
#address-cells = <1>;
#size-cells = <0>;
port {
reg = <0>;
venc_out: endpoint {
remote-endpoint = <&opa_in>;
ti,channels = <1>;

View File

@ -814,7 +814,7 @@
/* For debugging, it is often good idea to remove this GPIO.
It means you can remove back cover (to reboot by removing
battery) and still use the MMC card. */
cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */
cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
};
/* most boards use vaux3, only some old versions use vmmc2 instead */

View File

@ -370,6 +370,19 @@
compatible = "ti,omap2-onenand";
reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
/*
* These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
* bootloader set values when booted with v4.19 using both N950
* and N9 devices (OneNAND Manufacturer: Samsung):
*
* gpmc cs0 before gpmc_cs_program_settings:
* cs0 GPMC_CS_CONFIG1: 0xfd001202
* cs0 GPMC_CS_CONFIG2: 0x00181800
* cs0 GPMC_CS_CONFIG3: 0x00030300
* cs0 GPMC_CS_CONFIG4: 0x18001804
* cs0 GPMC_CS_CONFIG5: 0x03171d1d
* cs0 GPMC_CS_CONFIG6: 0x97080000
*/
gpmc,sync-read;
gpmc,sync-write;
gpmc,burst-length = <16>;
@ -379,26 +392,27 @@
gpmc,device-width = <2>;
gpmc,mux-add-data = <2>;
gpmc,cs-on-ns = <0>;
gpmc,cs-rd-off-ns = <87>;
gpmc,cs-wr-off-ns = <87>;
gpmc,cs-rd-off-ns = <122>;
gpmc,cs-wr-off-ns = <122>;
gpmc,adv-on-ns = <0>;
gpmc,adv-rd-off-ns = <10>;
gpmc,adv-wr-off-ns = <10>;
gpmc,oe-on-ns = <15>;
gpmc,oe-off-ns = <87>;
gpmc,adv-rd-off-ns = <15>;
gpmc,adv-wr-off-ns = <15>;
gpmc,oe-on-ns = <20>;
gpmc,oe-off-ns = <122>;
gpmc,we-on-ns = <0>;
gpmc,we-off-ns = <87>;
gpmc,rd-cycle-ns = <112>;
gpmc,wr-cycle-ns = <112>;
gpmc,access-ns = <81>;
gpmc,we-off-ns = <122>;
gpmc,rd-cycle-ns = <148>;
gpmc,wr-cycle-ns = <148>;
gpmc,access-ns = <117>;
gpmc,page-burst-access-ns = <15>;
gpmc,bus-turnaround-ns = <0>;
gpmc,cycle2cycle-delay-ns = <0>;
gpmc,wait-monitoring-ns = <0>;
gpmc,clk-activation-ns = <5>;
gpmc,wr-data-mux-bus-ns = <30>;
gpmc,wr-access-ns = <81>;
gpmc,sync-clk-ps = <15000>;
gpmc,clk-activation-ns = <10>;
gpmc,wr-data-mux-bus-ns = <40>;
gpmc,wr-access-ns = <117>;
gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */
/*
* MTD partition table corresponding to Nokia's MeeGo 1.2

View File

@ -1046,8 +1046,6 @@
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
ti,syss-mask = <1>;
ti,no-reset-on-init;
ti,no-idle-on-init;
/* Domains (V, P, C): core, core_pwrdm, l4per_clkdm */
clocks = <&l4per_clkctrl OMAP5_UART3_CLKCTRL 0>;
clock-names = "fck";

View File

@ -1681,15 +1681,12 @@
du: display@feb00000 {
compatible = "renesas,du-r8a7743";
reg = <0 0xfeb00000 0 0x40000>,
<0 0xfeb90000 0 0x1c>;
reg-names = "du", "lvds.0";
reg = <0 0xfeb00000 0 0x40000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 724>,
<&cpg CPG_MOD 723>,
<&cpg CPG_MOD 726>;
clock-names = "du.0", "du.1", "lvds.0";
<&cpg CPG_MOD 723>;
clock-names = "du.0", "du.1";
status = "disabled";
ports {
@ -1704,6 +1701,33 @@
port@1 {
reg = <1>;
du_out_lvds0: endpoint {
remote-endpoint = <&lvds0_in>;
};
};
};
};
lvds0: lvds@feb90000 {
compatible = "renesas,r8a7743-lvds";
reg = <0 0xfeb90000 0 0x1c>;
clocks = <&cpg CPG_MOD 726>;
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
resets = <&cpg 726>;
status = "disabled";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
lvds0_in: endpoint {
remote-endpoint = <&du_out_lvds0>;
};
};
port@1 {
reg = <1>;
lvds0_out: endpoint {
};
};
};

View File

@ -216,6 +216,7 @@
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
clock-output-names = "osc24M";
};
osc32k: clk-32k {

View File

@ -53,7 +53,7 @@
aliases {
serial0 = &uart0;
/* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
ethernet0 = &emac;
ethernet1 = &sdiowifi;
};

View File

@ -110,11 +110,11 @@
bus-num = <3>;
status = "okay";
spi-slave;
#address-cells = <0>;
slave@0 {
slave {
compatible = "lwn,bk4";
spi-max-frequency = <30000000>;
reg = <0>;
};
};

View File

@ -75,8 +75,7 @@ void __init n2100_map_io(void)
/*
* N2100 PCI.
*/
static int __init
n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;

View File

@ -3,6 +3,7 @@
#include <linux/suspend.h>
#include <asm/suspend.h>
#include "smc.h"
#include "pm.h"
static int tango_pm_powerdown(unsigned long arg)
{
@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = {
.valid = suspend_valid_only_mem,
};
static int __init tango_pm_init(void)
void __init tango_pm_init(void)
{
suspend_set_ops(&tango_pm_ops);
return 0;
}
late_initcall(tango_pm_init);

View File

@ -0,0 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_SUSPEND
void __init tango_pm_init(void);
#else
#define tango_pm_init NULL
#endif

View File

@ -2,6 +2,7 @@
#include <asm/mach/arch.h>
#include <asm/hardware/cache-l2x0.h>
#include "smc.h"
#include "pm.h"
static void tango_l2c_write(unsigned long val, unsigned int reg)
{
@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT")
.dt_compat = tango_dt_compat,
.l2c_aux_mask = ~0,
.l2c_write_sec = tango_l2c_write,
.init_late = tango_pm_init,
MACHINE_END

View File

@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
if (ssp == NULL)
return -ENODEV;
iounmap(ssp->mmio_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
kfree(ssp);
return 0;
}

View File

@ -7,7 +7,6 @@
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>

View File

@ -188,6 +188,7 @@
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */
};
};

View File

@ -390,7 +390,7 @@
};
video-codec@1c0e000 {
compatible = "allwinner,sun50i-h5-video-engine";
compatible = "allwinner,sun50i-a64-video-engine";
reg = <0x01c0e000 0x1000>;
clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>,
<&ccu CLK_DRAM_VE>;

View File

@ -187,8 +187,7 @@
max-frequency = <100000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;

View File

@ -305,8 +305,7 @@
max-frequency = <200000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddio_ao3v3>;
vqmmc-supply = <&vddio_tf>;

View File

@ -238,8 +238,7 @@
max-frequency = <100000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_card>;

View File

@ -258,8 +258,7 @@
max-frequency = <100000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&tflash_vdd>;
vqmmc-supply = <&tf_io>;

View File

@ -196,8 +196,7 @@
max-frequency = <100000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_card>;

View File

@ -154,8 +154,7 @@
max-frequency = <100000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vcc_3v3>;
};

View File

@ -211,8 +211,7 @@
max-frequency = <100000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vcc_3v3>;

View File

@ -131,8 +131,7 @@
max-frequency = <100000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_card>;

View File

@ -238,8 +238,7 @@
max-frequency = <100000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vcc_3v3>;
vqmmc-supply = <&vcc_card>;

View File

@ -183,8 +183,7 @@
max-frequency = <100000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_card>;

View File

@ -137,8 +137,7 @@
max-frequency = <100000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;

View File

@ -356,8 +356,7 @@
max-frequency = <100000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;

View File

@ -147,8 +147,7 @@
max-frequency = <100000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;

View File

@ -170,8 +170,7 @@
max-frequency = <100000000>;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;

View File

@ -404,7 +404,7 @@
};
intc: interrupt-controller@9bc0000 {
compatible = "arm,gic-v3";
compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
#interrupt-cells = <3>;
interrupt-controller;
#redistributor-regions = <1>;

View File

@ -1011,6 +1011,9 @@
<&cpg CPG_CORE R8A774A1_CLK_S3D1>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac1 0x13>, <&dmac1 0x12>,
<&dmac2 0x13>, <&dmac2 0x12>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
resets = <&cpg 310>;
status = "disabled";

View File

@ -1262,6 +1262,9 @@
<&cpg CPG_CORE R8A7796_CLK_S3D1>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac1 0x13>, <&dmac1 0x12>,
<&dmac2 0x13>, <&dmac2 0x12>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
resets = <&cpg 310>;
status = "disabled";

View File

@ -1068,6 +1068,9 @@
<&cpg CPG_CORE R8A77965_CLK_S3D1>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac1 0x13>, <&dmac1 0x12>,
<&dmac2 0x13>, <&dmac2 0x12>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
resets = <&cpg 310>;
status = "disabled";

View File

@ -120,10 +120,12 @@ static int create_dtb(struct kimage *image,
{
void *buf;
size_t buf_size;
size_t cmdline_len;
int ret;
cmdline_len = cmdline ? strlen(cmdline) : 0;
buf_size = fdt_totalsize(initial_boot_params)
+ strlen(cmdline) + DTB_EXTRA_SPACE;
+ cmdline_len + DTB_EXTRA_SPACE;
for (;;) {
buf = vmalloc(buf_size);

View File

@ -286,74 +286,73 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
}
static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start)
static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start,
unsigned long end)
{
pte_t *ptep = pte_offset_kernel(pmdp, 0UL);
unsigned long addr;
unsigned i;
unsigned long addr = start;
pte_t *ptep = pte_offset_kernel(pmdp, start);
for (i = 0; i < PTRS_PER_PTE; i++, ptep++) {
addr = start + i * PAGE_SIZE;
do {
note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
}
} while (ptep++, addr += PAGE_SIZE, addr != end);
}
static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start)
static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start,
unsigned long end)
{
pmd_t *pmdp = pmd_offset(pudp, 0UL);
unsigned long addr;
unsigned i;
unsigned long next, addr = start;
pmd_t *pmdp = pmd_offset(pudp, start);
for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
do {
pmd_t pmd = READ_ONCE(*pmdp);
next = pmd_addr_end(addr, end);
addr = start + i * PMD_SIZE;
if (pmd_none(pmd) || pmd_sect(pmd)) {
note_page(st, addr, 3, pmd_val(pmd));
} else {
BUG_ON(pmd_bad(pmd));
walk_pte(st, pmdp, addr);
walk_pte(st, pmdp, addr, next);
}
}
} while (pmdp++, addr = next, addr != end);
}
static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start)
static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start,
unsigned long end)
{
pud_t *pudp = pud_offset(pgdp, 0UL);
unsigned long addr;
unsigned i;
unsigned long next, addr = start;
pud_t *pudp = pud_offset(pgdp, start);
for (i = 0; i < PTRS_PER_PUD; i++, pudp++) {
do {
pud_t pud = READ_ONCE(*pudp);
next = pud_addr_end(addr, end);
addr = start + i * PUD_SIZE;
if (pud_none(pud) || pud_sect(pud)) {
note_page(st, addr, 2, pud_val(pud));
} else {
BUG_ON(pud_bad(pud));
walk_pmd(st, pudp, addr);
walk_pmd(st, pudp, addr, next);
}
}
} while (pudp++, addr = next, addr != end);
}
static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
unsigned long start)
{
pgd_t *pgdp = pgd_offset(mm, 0UL);
unsigned i;
unsigned long addr;
unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0;
unsigned long next, addr = start;
pgd_t *pgdp = pgd_offset(mm, start);
for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) {
do {
pgd_t pgd = READ_ONCE(*pgdp);
next = pgd_addr_end(addr, end);
addr = start + i * PGDIR_SIZE;
if (pgd_none(pgd)) {
note_page(st, addr, 1, pgd_val(pgd));
} else {
BUG_ON(pgd_bad(pgd));
walk_pud(st, pgdp, addr);
walk_pud(st, pgdp, addr, next);
}
}
} while (pgdp++, addr = next, addr != end);
}
void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)

View File

@ -155,18 +155,22 @@ out:
static int __init nfhd_init(void)
{
u32 blocks, bsize;
int ret;
int i;
nfhd_id = nf_get_id("XHDI");
if (!nfhd_id)
return -ENODEV;
major_num = register_blkdev(major_num, "nfhd");
if (major_num <= 0) {
ret = register_blkdev(major_num, "nfhd");
if (ret < 0) {
pr_warn("nfhd: unable to get major number\n");
return major_num;
return ret;
}
if (!major_num)
major_num = ret;
for (i = NFHD_DEV_OFFSET; i < 24; i++) {
if (nfhd_get_capacity(i, 0, &blocks, &bsize))
continue;

View File

@ -1403,6 +1403,21 @@ config LOONGSON3_ENHANCEMENT
please say 'N' here. If you want a high-performance kernel to run on
new Loongson 3 machines only, please say 'Y' here.
config CPU_LOONGSON3_WORKAROUNDS
bool "Old Loongson 3 LLSC Workarounds"
default y if SMP
depends on CPU_LOONGSON3
help
Loongson 3 processors have the llsc issues which require workarounds.
Without workarounds the system may hang unexpectedly.
Newer Loongson 3 will fix these issues and no workarounds are needed.
The workarounds have no significant side effect on them but may
decrease the performance of the system so this option should be
disabled unless the kernel is intended to be run on old systems.
If unsure, please say Y.
config CPU_LOONGSON2E
bool "Loongson 2E"
depends on SYS_HAS_CPU_LOONGSON2E

View File

@ -76,7 +76,7 @@
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pins_uart2>;
pinctrl-0 = <&pins_uart3>;
};
&uart4 {
@ -196,9 +196,9 @@
bias-disable;
};
pins_uart2: uart2 {
function = "uart2";
groups = "uart2-data", "uart2-hwflow";
pins_uart3: uart3 {
function = "uart3";
groups = "uart3-data", "uart3-hwflow";
bias-disable;
};

View File

@ -161,7 +161,7 @@
#dma-cells = <2>;
interrupt-parent = <&intc>;
interrupts = <29>;
interrupts = <20>;
clocks = <&cgu JZ4740_CLK_DMA>;

View File

@ -90,11 +90,11 @@
interrupts = <0>;
};
axi_i2c: i2c@10A00000 {
axi_i2c: i2c@10a00000 {
compatible = "xlnx,xps-iic-2.00.a";
interrupt-parent = <&axi_intc>;
interrupts = <4>;
reg = < 0x10A00000 0x10000 >;
reg = < 0x10a00000 0x10000 >;
clocks = <&ext>;
xlnx,clk-freq = <0x5f5e100>;
xlnx,family = "Artix7";
@ -106,9 +106,9 @@
#address-cells = <1>;
#size-cells = <0>;
ad7420@4B {
ad7420@4b {
compatible = "adi,adt7420";
reg = <0x4B>;
reg = <0x4b>;
};
} ;
};

View File

@ -58,6 +58,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
if (kernel_uses_llsc) { \
int temp; \
\
loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
@ -85,6 +86,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
if (kernel_uses_llsc) { \
int temp; \
\
loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
@ -118,6 +120,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
if (kernel_uses_llsc) { \
int temp; \
\
loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
@ -256,6 +259,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
if (kernel_uses_llsc) { \
long temp; \
\
loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
@ -283,6 +287,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
if (kernel_uses_llsc) { \
long temp; \
\
loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
@ -316,6 +321,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
if (kernel_uses_llsc) { \
long temp; \
\
loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \

View File

@ -222,6 +222,42 @@
#define __smp_mb__before_atomic() __smp_mb__before_llsc()
#define __smp_mb__after_atomic() smp_llsc_mb()
/*
* Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
* store or pref) in between an ll & sc can cause the sc instruction to
* erroneously succeed, breaking atomicity. Whilst it's unusual to write code
* containing such sequences, this bug bites harder than we might otherwise
* expect due to reordering & speculation:
*
* 1) A memory access appearing prior to the ll in program order may actually
* be executed after the ll - this is the reordering case.
*
* In order to avoid this we need to place a memory barrier (ie. a sync
* instruction) prior to every ll instruction, in between it & any earlier
* memory access instructions. Many of these cases are already covered by
* smp_mb__before_llsc() but for the remaining cases, typically ones in
* which multiple CPUs may operate on a memory location but ordering is not
* usually guaranteed, we use loongson_llsc_mb() below.
*
* This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
*
* 2) If a conditional branch exists between an ll & sc with a target outside
* of the ll-sc loop, for example an exit upon value mismatch in cmpxchg()
* or similar, then misprediction of the branch may allow speculative
* execution of memory accesses from outside of the ll-sc loop.
*
* In order to avoid this we need a memory barrier (ie. a sync instruction)
* at each affected branch target, for which we also use loongson_llsc_mb()
* defined below.
*
* This case affects all current Loongson 3 CPUs.
*/
#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */
#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
#else
#define loongson_llsc_mb() do { } while (0)
#endif
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */

View File

@ -69,6 +69,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
loongson_llsc_mb();
do {
__asm__ __volatile__(
" " __LL "%0, %1 # set_bit \n"
@ -79,6 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
} while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
} else if (kernel_uses_llsc) {
loongson_llsc_mb();
do {
__asm__ __volatile__(
" .set push \n"
@ -123,6 +125,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
: "ir" (~(1UL << bit)));
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
loongson_llsc_mb();
do {
__asm__ __volatile__(
" " __LL "%0, %1 # clear_bit \n"
@ -133,6 +136,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
} while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
} else if (kernel_uses_llsc) {
loongson_llsc_mb();
do {
__asm__ __volatile__(
" .set push \n"
@ -193,6 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
loongson_llsc_mb();
do {
__asm__ __volatile__(
" .set push \n"

View File

@ -50,6 +50,7 @@
"i" (-EFAULT) \
: "memory"); \
} else if (cpu_has_llsc) { \
loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
@ -163,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"i" (-EFAULT)
: "memory");
} else if (cpu_has_llsc) {
loongson_llsc_mb();
__asm__ __volatile__(
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
@ -192,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
"i" (-EFAULT)
: "memory");
loongson_llsc_mb();
} else
return -ENOSYS;

View File

@ -228,6 +228,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global));
} else if (kernel_uses_llsc) {
loongson_llsc_mb();
__asm__ __volatile__ (
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
@ -242,6 +243,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
" .set pop \n"
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global));
loongson_llsc_mb();
}
#else /* !CONFIG_SMP */
if (pte_none(*buddy))

View File

@ -457,5 +457,5 @@ void mips_cm_error_report(void)
}
/* reprime cause register */
write_gcr_error_cause(0);
write_gcr_error_cause(cm_error);
}

View File

@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
static int get_frame_info(struct mips_frame_info *info)
{
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
union mips_instruction insn, *ip, *ip_end;
union mips_instruction insn, *ip;
const unsigned int max_insns = 128;
unsigned int last_insn_size = 0;
unsigned int i;
@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info)
if (!ip)
goto err;
ip_end = (void *)ip + info->func_size;
for (i = 0; i < max_insns && ip < ip_end; i++) {
for (i = 0; i < max_insns; i++) {
ip = (void *)ip + last_insn_size;
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
insn.word = ip->halfword[0] << 16;
last_insn_size = 2;

View File

@ -23,6 +23,29 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
endif
cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap
#
# Some versions of binutils, not currently mainline as of 2019/02/04, support
# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
# to work around a CPU bug (see loongson_llsc_mb() in asm/barrier.h for a
# description).
#
# We disable this in order to prevent the assembler meddling with the
# instruction that labels refer to, ie. if we label an ll instruction:
#
# 1: ll v0, 0(a0)
#
# ...then with the assembler fix applied the label may actually point at a sync
# instruction inserted by the assembler, and if we were using the label in an
# exception table the table would no longer contain the address of the ll
# instruction.
#
# Avoid this by explicitly disabling that assembler behaviour. If upstream
# binutils does not merge support for the flag then we can revisit & remove
# this later - for now it ensures vendor toolchains don't cause problems.
#
cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
#
# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
# as MIPS64 R2; older versions as just R1. This leaves the possibility open

View File

@ -59,7 +59,12 @@ static void loongson_poweroff(void)
{
#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
mach_prepare_shutdown();
unreachable();
/*
* It needs a wait loop here, but mips/kernel/reset.c already calls
* a generic delay loop, machine_hang(), so simply return.
*/
return;
#else
void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;

View File

@ -932,6 +932,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* to mimic that here by taking a load/istream page
* fault.
*/
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(p, 0);
UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
uasm_i_jr(p, ptr);
@ -1646,6 +1648,8 @@ static void
iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
{
#ifdef CONFIG_SMP
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(p, 0);
# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_lld(p, pte, 0, ptr);
@ -2259,6 +2263,8 @@ static void build_r4000_tlb_load_handler(void)
#endif
uasm_l_nopage_tlbl(&l, p);
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(&p, 0);
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_0 & 1) {
@ -2313,6 +2319,8 @@ static void build_r4000_tlb_store_handler(void)
#endif
uasm_l_nopage_tlbs(&l, p);
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(&p, 0);
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {
@ -2368,6 +2376,8 @@ static void build_r4000_tlb_modify_handler(void)
#endif
uasm_l_nopage_tlbm(&l, p);
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(&p, 0);
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {

View File

@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void)
if (octeon_has_feature(OCTEON_FEATURE_PCIE))
return 0;
if (!octeon_is_pci_host()) {
pr_notice("Not in host mode, PCI Controller not initialized\n");
return 0;
}
/* Point pcibios_map_irq() to the PCI version of it */
octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void)
else
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
if (!octeon_is_pci_host()) {
pr_notice("Not in host mode, PCI Controller not initialized\n");
return 0;
}
/* PCI I/O and PCI MEM values */
set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
ioport_resource.start = 0;

View File

@ -8,6 +8,7 @@ ccflags-vdso := \
$(filter -E%,$(KBUILD_CFLAGS)) \
$(filter -mmicromips,$(KBUILD_CFLAGS)) \
$(filter -march=%,$(KBUILD_CFLAGS)) \
$(filter -m%-float,$(KBUILD_CFLAGS)) \
-D__VDSO__
ifdef CONFIG_CC_IS_CLANG
@ -129,7 +130,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
@ -169,7 +170,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)

View File

@ -1258,21 +1258,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
#define pmd_move_must_withdraw pmd_move_must_withdraw
struct spinlock;
static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
struct spinlock *old_pmd_ptl,
struct vm_area_struct *vma)
{
if (radix_enabled())
return false;
/*
* Archs like ppc64 use pgtable to store per pmd
* specific information. So when we switch the pmd,
* we should also withdraw and deposit the pgtable
*/
return true;
}
extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
struct spinlock *old_pmd_ptl,
struct vm_area_struct *vma);
/*
* Hash translation mode use the deposited table to store hash pte
* slot information.
*/
#define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
static inline bool arch_needs_pgtable_deposit(void)
{

View File

@ -400,3 +400,25 @@ void arch_report_meminfo(struct seq_file *m)
atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
}
#endif /* CONFIG_PROC_FS */
/*
* For hash translation mode, we use the deposited table to store hash slot
* information and they are stored at PTRS_PER_PMD offset from related pmd
* location. Hence a pmd move requires deposit and withdraw.
*
* For radix translation with split pmd ptl, we store the deposited table in the
* pmd page. Hence if we have different pmd page we need to withdraw during pmd
* move.
*
* With hash we use deposited table always irrespective of anon or not.
* With radix we use deposited table only for anonymous mapping.
*/
int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
struct spinlock *old_pmd_ptl,
struct vm_area_struct *vma)
{
if (radix_enabled())
return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
return true;
}

View File

@ -43,6 +43,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
{
unsigned long ret[PLPAR_HCALL_BUFSIZE];
uint64_t rc, token;
uint64_t saved = 0;
/*
* When the hypervisor cannot map all the requested memory in a single
@ -56,6 +57,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
p->blocks, BIND_ANY_ADDR, token);
token = ret[0];
if (!saved)
saved = ret[1];
cond_resched();
} while (rc == H_BUSY);
@ -64,7 +67,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
return -ENXIO;
}
p->bound_addr = ret[1];
p->bound_addr = saved;
dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);

View File

@ -35,6 +35,12 @@
#define _PAGE_SPECIAL _PAGE_SOFT
#define _PAGE_TABLE _PAGE_PRESENT
/*
* _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
* distinguish them from swapped out pages
*/
#define _PAGE_PROT_NONE _PAGE_READ
#define _PAGE_PFN_SHIFT 10
/* Set of bits to preserve across pte_modify() */

View File

@ -44,7 +44,7 @@
/* Page protection bits */
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
#define PAGE_NONE __pgprot(0)
#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
static inline int pmd_present(pmd_t pmd)
{
return (pmd_val(pmd) & _PAGE_PRESENT);
return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}
static inline int pmd_none(pmd_t pmd)
@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
static inline int pte_present(pte_t pte)
{
return (pte_val(pte) & _PAGE_PRESENT);
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}
static inline int pte_none(pte_t pte)
@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
*
* Format of swap PTE:
* bit 0: _PAGE_PRESENT (zero)
* bit 1: reserved for future use (zero)
* bit 1: _PAGE_PROT_NONE (zero)
* bits 2 to 6: swap type
* bits 7 to XLEN-1: swap offset
*/

View File

@ -18,8 +18,6 @@
#include <asm/cache.h>
#include <asm/thread_info.h>
#define MAX_BYTES_PER_LONG 0x10
OUTPUT_ARCH(riscv)
ENTRY(_start)
@ -76,6 +74,8 @@ SECTIONS
*(.sbss*)
}
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
EXCEPTION_TABLE(0x10)
NOTES
@ -83,10 +83,6 @@ SECTIONS
*(.rel.dyn*)
}
BSS_SECTION(MAX_BYTES_PER_LONG,
MAX_BYTES_PER_LONG,
MAX_BYTES_PER_LONG)
_end = .;
STABS_DEBUG

View File

@ -30,10 +30,10 @@
.section .text
ENTRY(swsusp_arch_suspend)
lg %r1,__LC_NODAT_STACK
aghi %r1,-STACK_FRAME_OVERHEAD
stmg %r6,%r15,__SF_GPRS(%r1)
aghi %r1,-STACK_FRAME_OVERHEAD
stg %r15,__SF_BACKCHAIN(%r1)
lgr %r1,%r15
lgr %r15,%r1
/* Store FPU registers */
brasl %r14,save_fpu_regs

View File

@ -382,7 +382,9 @@ static void zpci_irq_handler(struct airq_struct *airq)
if (ai == -1UL)
break;
inc_irq_stat(IRQIO_MSI);
airq_iv_lock(aibv, ai);
generic_handle_irq(airq_iv_get_data(aibv, ai));
airq_iv_unlock(aibv, ai);
}
}
}
@ -408,7 +410,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
zdev->aisb = aisb;
/* Create adapter interrupt vector */
zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA);
zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
if (!zdev->aibv)
return -ENOMEM;

View File

@ -602,10 +602,12 @@ ENTRY(trampoline_32bit_src)
3:
/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
pushl %ecx
pushl %edx
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_LME, %eax
wrmsr
popl %edx
popl %ecx
/* Enable PAE and LA57 (if required) paging modes */

View File

@ -3558,6 +3558,14 @@ static void free_excl_cntrs(int cpu)
}
static void intel_pmu_cpu_dying(int cpu)
{
fini_debug_store_on_cpu(cpu);
if (x86_pmu.counter_freezing)
disable_counter_freeze();
}
static void intel_pmu_cpu_dead(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct intel_shared_regs *pc;
@ -3570,11 +3578,6 @@ static void intel_pmu_cpu_dying(int cpu)
}
free_excl_cntrs(cpu);
fini_debug_store_on_cpu(cpu);
if (x86_pmu.counter_freezing)
disable_counter_freeze();
}
static void intel_pmu_sched_task(struct perf_event_context *ctx,
@ -3663,6 +3666,7 @@ static __initconst const struct x86_pmu core_pmu = {
.cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
.cpu_dead = intel_pmu_cpu_dead,
};
static struct attribute *intel_pmu_attrs[];
@ -3703,6 +3707,8 @@ static __initconst const struct x86_pmu intel_pmu = {
.cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
.cpu_dead = intel_pmu_cpu_dead,
.guest_get_msrs = intel_guest_get_msrs,
.sched_task = intel_pmu_sched_task,
};

View File

@ -1222,6 +1222,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
.id_table = snbep_uncore_pci_ids,
};
#define NODE_ID_MASK 0x7
/*
* build pci bus to socket mapping
*/
@ -1243,7 +1245,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
if (err)
break;
nodeid = config;
nodeid = config & NODE_ID_MASK;
/* get the Node ID mapping */
err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
if (err)

View File

@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
native_set_pmd(pmdp, pmd);
set_pmd(pmdp, pmd);
}
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,

View File

@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
quirk_no_way_out(i, m, regs);
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
m->bank = i;
mce_read_aux(m, i);
*msg = tmp;
return 1;

View File

@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn)
#endif
/*
* See set_mce_nospec().
*
* Machine check recovery code needs to change cache mode of poisoned pages to
* UC to avoid speculative access logging another error. But passing the
* address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
* speculative access. So we cheat and flip the top bit of the address. This
* works fine for the code that updates the page tables. But at the end of the
* process we need to flush the TLB and cache and the non-canonical address
* causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
*
* But in the common case we already have a canonical address. This code
* will fix the top bit if needed and is a no-op otherwise.
*/
static inline unsigned long fix_addr(unsigned long addr)
{
#ifdef CONFIG_X86_64
return (long)(addr << 1) >> 1;
#else
return addr;
#endif
}
static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
{
if (cpa->flags & CPA_PAGES_ARRAY) {
@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data)
unsigned int i;
for (i = 0; i < cpa->numpages; i++)
__flush_tlb_one_kernel(__cpa_addr(cpa, i));
__flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
}
static void cpa_flush(struct cpa_data *data, int cache)
@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache)
* Only flush present addresses:
*/
if (pte && (pte_val(*pte) & _PAGE_PRESENT))
clflush_cache_range_opt((void *)addr, PAGE_SIZE);
clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
}
mb();
}
@ -1627,29 +1650,6 @@ out:
return ret;
}
/*
* Machine check recovery code needs to change cache mode of poisoned
* pages to UC to avoid speculative access logging another error. But
* passing the address of the 1:1 mapping to set_memory_uc() is a fine
* way to encourage a speculative access. So we cheat and flip the top
* bit of the address. This works fine for the code that updates the
* page tables. But at the end of the process we need to flush the cache
* and the non-canonical address causes a #GP fault when used by the
* CLFLUSH instruction.
*
* But in the common case we already have a canonical address. This code
* will fix the top bit if needed and is a no-op otherwise.
*/
static inline unsigned long make_addr_canonical_again(unsigned long addr)
{
#ifdef CONFIG_X86_64
return (long)(addr << 1) >> 1;
#else
return addr;
#endif
}
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr,
int force_split, int in_flag,

View File

@ -72,6 +72,7 @@
#include <linux/sched/loadavg.h>
#include <linux/sched/signal.h>
#include <trace/events/block.h>
#include <linux/blk-mq.h>
#include "blk-rq-qos.h"
#include "blk-stat.h"
@ -591,6 +592,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
u64 now = ktime_to_ns(ktime_get());
bool issue_as_root = bio_issue_as_root_blkg(bio);
bool enabled = false;
int inflight = 0;
blkg = bio->bi_blkg;
if (!blkg || !bio_flagged(bio, BIO_TRACKED))
@ -601,6 +603,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
return;
enabled = blk_iolatency_enabled(iolat->blkiolat);
if (!enabled)
return;
while (blkg && blkg->parent) {
iolat = blkg_to_lat(blkg);
if (!iolat) {
@ -609,8 +614,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
}
rqw = &iolat->rq_wait;
atomic_dec(&rqw->inflight);
if (!enabled || iolat->min_lat_nsec == 0)
inflight = atomic_dec_return(&rqw->inflight);
WARN_ON_ONCE(inflight < 0);
if (iolat->min_lat_nsec == 0)
goto next;
iolatency_record_time(iolat, &bio->bi_issue, now,
issue_as_root);
@ -754,10 +760,13 @@ int blk_iolatency_init(struct request_queue *q)
return 0;
}
static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
/*
* return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
* return 0.
*/
static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
{
struct iolatency_grp *iolat = blkg_to_lat(blkg);
struct blk_iolatency *blkiolat = iolat->blkiolat;
u64 oldval = iolat->min_lat_nsec;
iolat->min_lat_nsec = val;
@ -766,9 +775,10 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
BLKIOLATENCY_MAX_WIN_SIZE);
if (!oldval && val)
atomic_inc(&blkiolat->enabled);
return 1;
if (oldval && !val)
atomic_dec(&blkiolat->enabled);
return -1;
return 0;
}
static void iolatency_clear_scaling(struct blkcg_gq *blkg)
@ -800,6 +810,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
u64 lat_val = 0;
u64 oldval;
int ret;
int enable = 0;
ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
if (ret)
@ -834,7 +845,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
blkg = ctx.blkg;
oldval = iolat->min_lat_nsec;
iolatency_set_min_lat_nsec(blkg, lat_val);
enable = iolatency_set_min_lat_nsec(blkg, lat_val);
if (enable) {
WARN_ON_ONCE(!blk_get_queue(blkg->q));
blkg_get(blkg);
}
if (oldval != iolat->min_lat_nsec) {
iolatency_clear_scaling(blkg);
}
@ -842,6 +858,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
ret = 0;
out:
blkg_conf_finish(&ctx);
if (ret == 0 && enable) {
struct iolatency_grp *tmp = blkg_to_lat(blkg);
struct blk_iolatency *blkiolat = tmp->blkiolat;
blk_mq_freeze_queue(blkg->q);
if (enable == 1)
atomic_inc(&blkiolat->enabled);
else if (enable == -1)
atomic_dec(&blkiolat->enabled);
else
WARN_ON_ONCE(1);
blk_mq_unfreeze_queue(blkg->q);
blkg_put(blkg);
blk_put_queue(blkg->q);
}
return ret ?: nbytes;
}
@ -977,8 +1011,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
struct blkcg_gq *blkg = lat_to_blkg(iolat);
struct blk_iolatency *blkiolat = iolat->blkiolat;
int ret;
iolatency_set_min_lat_nsec(blkg, 0);
ret = iolatency_set_min_lat_nsec(blkg, 0);
if (ret == 1)
atomic_inc(&blkiolat->enabled);
if (ret == -1)
atomic_dec(&blkiolat->enabled);
iolatency_clear_scaling(blkg);
}

View File

@ -737,12 +737,20 @@ static void blk_mq_requeue_work(struct work_struct *work)
spin_unlock_irq(&q->requeue_lock);
list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
if (!(rq->rq_flags & RQF_SOFTBARRIER))
if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
continue;
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
blk_mq_sched_insert_request(rq, true, false, false);
/*
* If RQF_DONTPREP, rq has contained some driver specific
* data, so insert it to hctx dispatch list to avoid any
* merge.
*/
if (rq->rq_flags & RQF_DONTPREP)
blk_mq_request_bypass_insert(rq, false);
else
blk_mq_sched_insert_request(rq, true, false, false);
}
while (!list_empty(&rq_list)) {

View File

@ -36,7 +36,6 @@ struct blk_mq_ctx {
struct kobject kobj;
} ____cacheline_aligned_in_smp;
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);

View File

@ -1029,6 +1029,9 @@ void __init acpi_early_init(void)
acpi_permanent_mmap = true;
/* Initialize debug output. Linux does not use ACPICA defaults */
acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
#ifdef CONFIG_X86
/*
* If the machine falls into the DMI check table,

View File

@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
{ "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |

View File

@ -4075,7 +4075,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
if (lock_fdc(drive))
return -EINTR;
return 0;
poll_drive(false, 0);
process_fd_request();
}

View File

@ -380,7 +380,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = cc_ivgen_init(new_drvdata);
if (rc) {
dev_err(dev, "cc_ivgen_init failed\n");
goto post_power_mgr_err;
goto post_buf_mgr_err;
}
/* Allocate crypto algs */
@ -403,6 +403,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto post_hash_err;
}
/* All set, we can allow autosuspend */
cc_pm_go(new_drvdata);
/* If we got here and FIPS mode is enabled
* it means all FIPS test passed, so let TEE
* know we're good.
@ -417,8 +420,6 @@ post_cipher_err:
cc_cipher_free(new_drvdata);
post_ivgen_err:
cc_ivgen_fini(new_drvdata);
post_power_mgr_err:
cc_pm_fini(new_drvdata);
post_buf_mgr_err:
cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:

View File

@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev)
int cc_pm_init(struct cc_drvdata *drvdata)
{
int rc = 0;
struct device *dev = drvdata_to_dev(drvdata);
/* must be before the enabling to avoid resdundent suspending */
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
/* activate the PM module */
rc = pm_runtime_set_active(dev);
if (rc)
return rc;
/* enable the PM module*/
pm_runtime_enable(dev);
return pm_runtime_set_active(dev);
}
return rc;
/* enable the PM module*/
void cc_pm_go(struct cc_drvdata *drvdata)
{
pm_runtime_enable(drvdata_to_dev(drvdata));
}
void cc_pm_fini(struct cc_drvdata *drvdata)

View File

@ -16,6 +16,7 @@
extern const struct dev_pm_ops ccree_pm;
int cc_pm_init(struct cc_drvdata *drvdata);
void cc_pm_go(struct cc_drvdata *drvdata);
void cc_pm_fini(struct cc_drvdata *drvdata);
int cc_pm_suspend(struct device *dev);
int cc_pm_resume(struct device *dev);
@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
return 0;
}
static void cc_pm_go(struct cc_drvdata *drvdata) {}
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
static inline int cc_pm_suspend(struct device *dev)

View File

@ -203,6 +203,7 @@ struct at_xdmac_chan {
u32 save_cim;
u32 save_cnda;
u32 save_cndc;
u32 irq_status;
unsigned long status;
struct tasklet_struct tasklet;
struct dma_slave_config sconfig;
@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data)
struct at_xdmac_desc *desc;
u32 error_mask;
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
__func__, atchan->status);
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
__func__, atchan->irq_status);
error_mask = AT_XDMAC_CIS_RBEIS
| AT_XDMAC_CIS_WBEIS
@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data)
if (at_xdmac_chan_is_cyclic(atchan)) {
at_xdmac_handle_cyclic(atchan);
} else if ((atchan->status & AT_XDMAC_CIS_LIS)
|| (atchan->status & error_mask)) {
} else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
|| (atchan->irq_status & error_mask)) {
struct dma_async_tx_descriptor *txd;
if (atchan->status & AT_XDMAC_CIS_RBEIS)
if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
dev_err(chan2dev(&atchan->chan), "read bus error!!!");
if (atchan->status & AT_XDMAC_CIS_WBEIS)
if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
dev_err(chan2dev(&atchan->chan), "write bus error!!!");
if (atchan->status & AT_XDMAC_CIS_ROIS)
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock(&atchan->lock);
@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
atchan = &atxdmac->chan[i];
chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
atchan->status = chan_status & chan_imr;
atchan->irq_status = chan_status & chan_imr;
dev_vdbg(atxdmac->dma.dev,
"%s: chan%d: imr=0x%x, status=0x%x\n",
__func__, i, chan_imr, chan_status);
@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
tasklet_schedule(&atchan->tasklet);

View File

@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
}
}
static int bcm2835_dma_abort(void __iomem *chan_base)
static int bcm2835_dma_abort(struct bcm2835_chan *c)
{
unsigned long cs;
void __iomem *chan_base = c->chan_base;
long int timeout = 10000;
cs = readl(chan_base + BCM2835_DMA_CS);
if (!(cs & BCM2835_DMA_ACTIVE))
/*
* A zero control block address means the channel is idle.
* (The ACTIVE flag in the CS register is not a reliable indicator.)
*/
if (!readl(chan_base + BCM2835_DMA_ADDR))
return 0;
/* Write 0 to the active bit - Pause the DMA */
writel(0, chan_base + BCM2835_DMA_CS);
/* Wait for any current AXI transfer to complete */
while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
while ((readl(chan_base + BCM2835_DMA_CS) &
BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
cpu_relax();
cs = readl(chan_base + BCM2835_DMA_CS);
}
/* We'll un-pause when we set of our next DMA */
/* Peripheral might be stuck and fail to signal AXI write responses */
if (!timeout)
return -ETIMEDOUT;
if (!(cs & BCM2835_DMA_ACTIVE))
return 0;
/* Terminate the control block chain */
writel(0, chan_base + BCM2835_DMA_NEXTCB);
/* Abort the whole DMA */
writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
chan_base + BCM2835_DMA_CS);
dev_err(c->vc.chan.device->dev,
"failed to complete outstanding writes\n");
writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
return 0;
}
@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
spin_lock_irqsave(&c->vc.lock, flags);
/* Acknowledge interrupt */
writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
/*
* Clear the INT flag to receive further interrupts. Keep the channel
* active in case the descriptor is cyclic or in case the client has
* already terminated the descriptor and issued a new one. (May happen
* if this IRQ handler is threaded.) If the channel is finished, it
* will remain idle despite the ACTIVE flag being set.
*/
writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
c->chan_base + BCM2835_DMA_CS);
d = c->desc;
@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
if (d->cyclic) {
/* call the cyclic callback */
vchan_cyclic_callback(&d->vd);
/* Keep the DMA engine running */
writel(BCM2835_DMA_ACTIVE,
c->chan_base + BCM2835_DMA_CS);
} else {
} else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
vchan_cookie_complete(&c->desc->vd);
bcm2835_dma_start_desc(c);
}
@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
unsigned long flags;
int timeout = 10000;
LIST_HEAD(head);
spin_lock_irqsave(&c->vc.lock, flags);
@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
list_del_init(&c->node);
spin_unlock(&d->lock);
/*
* Stop DMA activity: we assume the callback will not be called
* after bcm_dma_abort() returns (even if it does, it will see
* c->desc is NULL and exit.)
*/
/* stop DMA activity */
if (c->desc) {
vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c->chan_base);
/* Wait for stopping */
while (--timeout) {
if (!(readl(c->chan_base + BCM2835_DMA_CS) &
BCM2835_DMA_ACTIVE))
break;
cpu_relax();
}
if (!timeout)
dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
bcm2835_dma_abort(c);
}
vchan_get_all_descriptors(&c->vc, &head);

View File

@ -711,11 +711,9 @@ static int dmatest_func(void *data)
srcs[i] = um->addr[i] + src_off;
ret = dma_mapping_error(dev->dev, um->addr[i]);
if (ret) {
dmaengine_unmap_put(um);
result("src mapping error", total_tests,
src_off, dst_off, len, ret);
failed_tests++;
continue;
goto error_unmap_continue;
}
um->to_cnt++;
}
@ -730,11 +728,9 @@ static int dmatest_func(void *data)
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(dev->dev, dsts[i]);
if (ret) {
dmaengine_unmap_put(um);
result("dst mapping error", total_tests,
src_off, dst_off, len, ret);
failed_tests++;
continue;
goto error_unmap_continue;
}
um->bidi_cnt++;
}
@ -762,12 +758,10 @@ static int dmatest_func(void *data)
}
if (!tx) {
dmaengine_unmap_put(um);
result("prep error", total_tests, src_off,
dst_off, len, ret);
msleep(100);
failed_tests++;
continue;
goto error_unmap_continue;
}
done->done = false;
@ -776,12 +770,10 @@ static int dmatest_func(void *data)
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
dmaengine_unmap_put(um);
result("submit error", total_tests, src_off,
dst_off, len, ret);
msleep(100);
failed_tests++;
continue;
goto error_unmap_continue;
}
dma_async_issue_pending(chan);
@ -790,22 +782,20 @@ static int dmatest_func(void *data)
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
dmaengine_unmap_put(um);
if (!done->done) {
result("test timed out", total_tests, src_off, dst_off,
len, 0);
failed_tests++;
continue;
goto error_unmap_continue;
} else if (status != DMA_COMPLETE) {
result(status == DMA_ERROR ?
"completion error status" :
"completion busy status", total_tests, src_off,
dst_off, len, ret);
failed_tests++;
continue;
goto error_unmap_continue;
}
dmaengine_unmap_put(um);
if (params->noverify) {
verbose_result("test passed", total_tests, src_off,
dst_off, len, 0);
@ -846,6 +836,12 @@ static int dmatest_func(void *data)
verbose_result("test passed", total_tests, src_off,
dst_off, len, 0);
}
continue;
error_unmap_continue:
dmaengine_unmap_put(um);
failed_tests++;
}
ktime = ktime_sub(ktime_get(), ktime);
ktime = ktime_sub(ktime, comparetime);

View File

@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data)
{
struct imxdma_channel *imxdmac = (void *)data;
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc;
struct imxdma_desc *desc, *next_desc;
unsigned long flags;
spin_lock_irqsave(&imxdma->lock, flags);
@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data)
list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
if (!list_empty(&imxdmac->ld_queue)) {
desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
node);
next_desc = list_first_entry(&imxdmac->ld_queue,
struct imxdma_desc, node);
list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
if (imxdma_xfer_desc(desc) < 0)
if (imxdma_xfer_desc(next_desc) < 0)
dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
__func__, imxdmac->channel);
}

View File

@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver)
}
EXPORT_SYMBOL_GPL(scmi_driver_unregister);
static void scmi_device_release(struct device *dev)
{
kfree(to_scmi_dev(dev));
}
struct scmi_device *
scmi_device_create(struct device_node *np, struct device *parent, int protocol)
{
@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
scmi_dev->dev.parent = parent;
scmi_dev->dev.of_node = np;
scmi_dev->dev.bus = &scmi_bus_type;
scmi_dev->dev.release = scmi_device_release;
dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
retval = device_register(&scmi_dev->dev);
@ -156,9 +162,8 @@ free_mem:
void scmi_device_destroy(struct scmi_device *scmi_dev)
{
scmi_handle_put(scmi_dev->handle);
device_unregister(&scmi_dev->dev);
ida_simple_remove(&scmi_bus_id, scmi_dev->id);
kfree(scmi_dev);
device_unregister(&scmi_dev->dev);
}
void scmi_set_handle(struct scmi_device *scmi_dev)

View File

@ -90,8 +90,10 @@ static int psp_sw_fini(void *handle)
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
release_firmware(adev->psp.ta_fw);
adev->psp.ta_fw = NULL;
if (adev->psp.ta_fw) {
release_firmware(adev->psp.ta_fw);
adev->psp.ta_fw = NULL;
}
return 0;
}
@ -435,6 +437,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
if (!psp->adev->psp.ta_fw)
return -ENOENT;
if (!psp->xgmi_context.initialized) {
ret = psp_xgmi_init_shared_buf(psp);
if (ret)

View File

@ -152,18 +152,22 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err)
goto out2;
if (err) {
release_firmware(adev->psp.ta_fw);
adev->psp.ta_fw = NULL;
dev_info(adev->dev,
"psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
} else {
err = amdgpu_ucode_validate(adev->psp.ta_fw);
if (err)
goto out2;
err = amdgpu_ucode_validate(adev->psp.ta_fw);
if (err)
goto out2;
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
}
return 0;

Some files were not shown because too many files have changed in this diff Show More