1
0
Fork 0

Merge branch 'pm-opp' into pm-cpufreq

hifive-unleashed-5.1
Rafael J. Wysocki 2015-09-03 02:46:01 +02:00
commit 0ed537b5fd
301 changed files with 5445 additions and 2289 deletions

View File

@ -35,7 +35,7 @@ Example:
device_type = "dma";
reg = <0x0 0x1f270000 0x0 0x10000>,
<0x0 0x1f200000 0x0 0x10000>,
<0x0 0x1b008000 0x0 0x2000>,
<0x0 0x1b000000 0x0 0x400000>,
<0x0 0x1054a000 0x0 0x100>;
interrupts = <0x0 0x82 0x4>,
<0x0 0xb8 0x4>,

View File

@ -88,7 +88,7 @@ This defines voltage-current-frequency combinations along with other related
properties.
Required properties:
- opp-hz: Frequency in Hz
- opp-hz: Frequency in Hz, expressed as a 64-bit big-endian integer.
Optional properties:
- opp-microvolt: voltage in micro Volts.
@ -158,20 +158,20 @@ Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
opp-shared;
opp00 {
opp-hz = <1000000000>;
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <970000 975000 985000>;
opp-microamp = <70000>;
clock-latency-ns = <300000>;
opp-suspend;
};
opp01 {
opp-hz = <1100000000>;
opp-hz = /bits/ 64 <1100000000>;
opp-microvolt = <980000 1000000 1010000>;
opp-microamp = <80000>;
clock-latency-ns = <310000>;
};
opp02 {
opp-hz = <1200000000>;
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <1025000>;
clock-latency-ns = <290000>;
turbo-mode;
@ -237,20 +237,20 @@ independently.
*/
opp00 {
opp-hz = <1000000000>;
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <970000 975000 985000>;
opp-microamp = <70000>;
clock-latency-ns = <300000>;
opp-suspend;
};
opp01 {
opp-hz = <1100000000>;
opp-hz = /bits/ 64 <1100000000>;
opp-microvolt = <980000 1000000 1010000>;
opp-microamp = <80000>;
clock-latency-ns = <310000>;
};
opp02 {
opp-hz = <1200000000>;
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <1025000>;
opp-microamp = <90000;
lock-latency-ns = <290000>;
@ -313,20 +313,20 @@ DVFS state together.
opp-shared;
opp00 {
opp-hz = <1000000000>;
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <970000 975000 985000>;
opp-microamp = <70000>;
clock-latency-ns = <300000>;
opp-suspend;
};
opp01 {
opp-hz = <1100000000>;
opp-hz = /bits/ 64 <1100000000>;
opp-microvolt = <980000 1000000 1010000>;
opp-microamp = <80000>;
clock-latency-ns = <310000>;
};
opp02 {
opp-hz = <1200000000>;
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <1025000>;
opp-microamp = <90000>;
clock-latency-ns = <290000>;
@ -339,20 +339,20 @@ DVFS state together.
opp-shared;
opp10 {
opp-hz = <1300000000>;
opp-hz = /bits/ 64 <1300000000>;
opp-microvolt = <1045000 1050000 1055000>;
opp-microamp = <95000>;
clock-latency-ns = <400000>;
opp-suspend;
};
opp11 {
opp-hz = <1400000000>;
opp-hz = /bits/ 64 <1400000000>;
opp-microvolt = <1075000>;
opp-microamp = <100000>;
clock-latency-ns = <400000>;
};
opp12 {
opp-hz = <1500000000>;
opp-hz = /bits/ 64 <1500000000>;
opp-microvolt = <1010000 1100000 1110000>;
opp-microamp = <95000>;
clock-latency-ns = <400000>;
@ -379,7 +379,7 @@ Example 4: Handling multiple regulators
opp-shared;
opp00 {
opp-hz = <1000000000>;
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <970000>, /* Supply 0 */
<960000>, /* Supply 1 */
<960000>; /* Supply 2 */
@ -392,7 +392,7 @@ Example 4: Handling multiple regulators
/* OR */
opp00 {
opp-hz = <1000000000>;
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <970000 975000 985000>, /* Supply 0 */
<960000 965000 975000>, /* Supply 1 */
<960000 965000 975000>; /* Supply 2 */
@ -405,7 +405,7 @@ Example 4: Handling multiple regulators
/* OR */
opp00 {
opp-hz = <1000000000>;
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <970000 975000 985000>, /* Supply 0 */
<960000 965000 975000>, /* Supply 1 */
<960000 965000 975000>; /* Supply 2 */
@ -437,12 +437,12 @@ Example 5: Multiple OPP tables
opp-shared;
opp00 {
opp-hz = <600000000>;
opp-hz = /bits/ 64 <600000000>;
...
};
opp01 {
opp-hz = <800000000>;
opp-hz = /bits/ 64 <800000000>;
...
};
};
@ -453,12 +453,12 @@ Example 5: Multiple OPP tables
opp-shared;
opp10 {
opp-hz = <1000000000>;
opp-hz = /bits/ 64 <1000000000>;
...
};
opp11 {
opp-hz = <1100000000>;
opp-hz = /bits/ 64 <1100000000>;
...
};
};

View File

@ -3,11 +3,13 @@ MT8173 with MAX98090 CODEC
Required properties:
- compatible : "mediatek,mt8173-max98090"
- mediatek,audio-codec: the phandle of the MAX98090 audio codec
- mediatek,platform: the phandle of MT8173 ASoC platform
Example:
sound {
compatible = "mediatek,mt8173-max98090";
mediatek,audio-codec = <&max98090>;
mediatek,platform = <&afe>;
};

View File

@ -3,11 +3,13 @@ MT8173 with RT5650 RT5676 CODECS
Required properties:
- compatible : "mediatek,mt8173-rt5650-rt5676"
- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs
- mediatek,platform: the phandle of MT8173 ASoC platform
Example:
sound {
compatible = "mediatek,mt8173-rt5650-rt5676";
mediatek,audio-codec = <&rt5650 &rt5676>;
mediatek,platform = <&afe>;
};

View File

@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9xxx SPI controller
Required properties:
- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback.
- reg: Base address and size of the controllers memory area
- clocks: phandle to the AHB clock.
- clocks: phandle of the AHB clock.
- clock-names: has to be "ahb".
- #address-cells: <1>, as required by generic SPI binding.
- #size-cells: <0>, also as required by generic SPI binding.
@ -12,9 +12,9 @@ Child nodes as per the generic SPI binding.
Example:
spi@1F000000 {
spi@1f000000 {
compatible = "qca,ar9132-spi", "qca,ar7100-spi";
reg = <0x1F000000 0x10>;
reg = <0x1f000000 0x10>;
clocks = <&pll 2>;
clock-names = "ahb";

View File

@ -35,11 +35,11 @@ temp1_input Local temperature (1/1000 degree,
temp[2-9]_input CPU temperatures (1/1000 degree,
0.125 degree resolution)
fan[1-4]_mode R/W, 0/1 for manual or SmartFan mode
pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode
Setting SmartFan mode is supported only if it has been
previously configured by BIOS (or configuration EEPROM)
fan[1-4]_pwm R/O in SmartFan mode, R/W in manual control mode
pwm[1-4] R/O in SmartFan mode, R/W in manual control mode
The driver checks sensor control registers and does not export the sensors
that are not enabled. Anyway, a sensor that is enabled may actually be not

View File

@ -199,7 +199,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
@ -230,8 +231,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n"
buf += " &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
if proto_ident == "FC":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
elif proto_ident == "SAS":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
elif proto_ident == "iSCSI":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
@ -292,7 +299,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = " + fabric_mod_name + ",\n"
buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
@ -322,17 +329,17 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += "\n"
buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(" + fabric_mod_name + "_ops);\n"
buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n"
buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"

View File

@ -5600,6 +5600,7 @@ F: kernel/irq/
IRQCHIP DRIVERS
M: Thomas Gleixner <tglx@linutronix.de>
M: Jason Cooper <jason@lakedaemon.net>
M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@ -5608,11 +5609,14 @@ F: Documentation/devicetree/bindings/interrupt-controller/
F: drivers/irqchip/
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
M: Jiang Liu <jiang.liu@linux.intel.com>
M: Marc Zyngier <marc.zyngier@arm.com>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: Documentation/IRQ-domain.txt
F: include/linux/irqdomain.h
F: kernel/irq/irqdomain.c
F: kernel/irq/msi.c
ISAPNP
M: Jaroslav Kysela <perex@perex.cz>

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 2
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*

View File

@ -138,8 +138,8 @@
mipi_phy: video-phy@10020710 {
compatible = "samsung,s5pv210-mipi-video-phy";
reg = <0x10020710 8>;
#phy-cells = <1>;
syscon = <&pmu_system_controller>;
};
pd_cam: cam-power-domain@10023C00 {

View File

@ -127,6 +127,10 @@
};
};
&cpu0 {
cpu0-supply = <&buck1_reg>;
};
&fimd {
pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>;
pinctrl-names = "default";

View File

@ -188,6 +188,10 @@
};
};
&cpu0 {
cpu0-supply = <&varm_breg>;
};
&dsi_0 {
vddcore-supply = <&vusb_reg>;
vddio-supply = <&vmipi_reg>;

View File

@ -548,6 +548,10 @@
};
};
&cpu0 {
cpu0-supply = <&vdd_arm_reg>;
};
&pinctrl_1 {
hdmi_hpd: hdmi-hpd {
samsung,pins = "gpx3-7";

View File

@ -40,6 +40,18 @@
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0x900>;
clocks = <&clock CLK_ARM_CLK>;
clock-names = "cpu";
clock-latency = <160000>;
operating-points = <
1200000 1250000
1000000 1150000
800000 1075000
500000 975000
400000 975000
200000 950000
>;
cooling-min-level = <4>;
cooling-max-level = <2>;
#cooling-cells = <2>; /* min followed by max */

View File

@ -286,8 +286,8 @@
can1: can@53fe4000 {
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
reg = <0x53fe4000 0x1000>;
clocks = <&clks 33>;
clock-names = "ipg";
clocks = <&clks 33>, <&clks 33>;
clock-names = "ipg", "per";
interrupts = <43>;
status = "disabled";
};
@ -295,8 +295,8 @@
can2: can@53fe8000 {
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
reg = <0x53fe8000 0x1000>;
clocks = <&clks 34>;
clock-names = "ipg";
clocks = <&clks 34>, <&clks 34>;
clock-names = "ipg", "per";
interrupts = <44>;
status = "disabled";
};

View File

@ -13,9 +13,8 @@ clocks {
#clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>;
reg = <0x02620350 4>, <0x02310110 4>;
reg-names = "control", "multiplier";
fixed-postdiv = <2>;
reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
reg-names = "control", "multiplier", "post-divider";
};
papllclk: papllclk@2620358 {

View File

@ -22,9 +22,8 @@ clocks {
#clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>;
reg = <0x02620350 4>, <0x02310110 4>;
reg-names = "control", "multiplier";
fixed-postdiv = <2>;
reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
reg-names = "control", "multiplier", "post-divider";
};
papllclk: papllclk@2620358 {

View File

@ -22,9 +22,8 @@ clocks {
#clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>;
reg = <0x02620350 4>, <0x02310110 4>;
reg-names = "control", "multiplier";
fixed-postdiv = <2>;
reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
reg-names = "control", "multiplier", "post-divider";
};
papllclk: papllclk@2620358 {

View File

@ -17,6 +17,7 @@
};
aliases {
serial1 = &uart1;
stmpe-i2c0 = &stmpe0;
stmpe-i2c1 = &stmpe1;
};

View File

@ -15,6 +15,10 @@
bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
};
aliases {
serial1 = &uart1;
};
src@101e0000 {
/* These chrystal drivers are not used on this board */
disable-sxtalo;

View File

@ -757,6 +757,7 @@
clock-names = "uartclk", "apb_pclk";
pinctrl-names = "default";
pinctrl-0 = <&uart0_default_mux>;
status = "disabled";
};
uart1: uart@101fb000 {

View File

@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
* registers. This address is needed early so the OCP registers that
* are part of the device's address space can be ioremapped properly.
*
* If SYSC access is not needed, the registers will not be remapped
* and non-availability of MPU access is not treated as an error.
*
* Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
* -ENXIO on absent or invalid register target address space.
*/
@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
_save_mpu_port_index(oh);
/* if we don't need sysc access we don't need to ioremap */
if (!oh->class->sysc)
return 0;
/* we can't continue without MPU PORT if we need sysc access */
if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
return -ENXIO;
@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
oh->name);
/* Extract the IO space from device tree blob */
if (!np)
if (!np) {
pr_err("omap_hwmod: %s: no dt node\n", oh->name);
return -ENXIO;
}
va_start = of_iomap(np, index + oh->mpu_rt_idx);
} else {
@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
oh->name, np->name);
}
if (oh->class->sysc) {
r = _init_mpu_rt_base(oh, NULL, index, np);
if (r < 0) {
WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
oh->name);
return 0;
}
r = _init_mpu_rt_base(oh, NULL, index, np);
if (r < 0) {
WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
oh->name);
return 0;
}
r = _init_clocks(oh, NULL);

View File

@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = {
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = {
.class = &dra7xx_gpmc_hwmod_class,
.clkdm_name = "l3main1_clkdm",
/* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */
.flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS,
.flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS,
.main_clk = "l3_iclk_div",
.prcm = {
.omap4 = {

View File

@ -823,7 +823,7 @@
device_type = "dma";
reg = <0x0 0x1f270000 0x0 0x10000>,
<0x0 0x1f200000 0x0 0x10000>,
<0x0 0x1b008000 0x0 0x2000>,
<0x0 0x1b000000 0x0 0x400000>,
<0x0 0x1054a000 0x0 0x100>;
interrupts = <0x0 0x82 0x4>,
<0x0 0xb8 0x4>,

View File

@ -122,12 +122,12 @@ static int __init uefi_init(void)
/* Show what we know for posterity */
c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
sizeof(vendor));
sizeof(vendor) * sizeof(efi_char16_t));
if (c16) {
for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
vendor[i] = c16[i];
vendor[i] = '\0';
early_memunmap(c16, sizeof(vendor));
early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
}
pr_info("EFI v%u.%.02u by %s\n",

View File

@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
{
unsigned long flags;
if (!clk)
return 0;
spin_lock_irqsave(&clk_lock, flags);
__clk_enable(clk);
spin_unlock_irqrestore(&clk_lock, flags);
@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
{
unsigned long flags;
if (IS_ERR_OR_NULL(clk))
return;
spin_lock_irqsave(&clk_lock, flags);
__clk_disable(clk);
spin_unlock_irqrestore(&clk_lock, flags);
@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
unsigned long flags;
unsigned long rate;
if (!clk)
return 0;
spin_lock_irqsave(&clk_lock, flags);
rate = clk->get_rate(clk);
spin_unlock_irqrestore(&clk_lock, flags);
@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long flags, actual_rate;
if (!clk)
return 0;
if (!clk->set_rate)
return -ENOSYS;
@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
unsigned long flags;
long ret;
if (!clk)
return 0;
if (!clk->set_rate)
return -ENOSYS;
@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
unsigned long flags;
int ret;
if (!clk)
return 0;
if (!clk->set_parent)
return -ENOSYS;
@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
struct clk *clk_get_parent(struct clk *clk)
{
return clk->parent;
return !clk ? NULL : clk->parent;
}
EXPORT_SYMBOL(clk_get_parent);

View File

@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
}
/* Unmask the event */
if (eeh_enabled())
if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
enable_irq(eeh_event_irq);
return ret;

View File

@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
unsigned levels, unsigned long limit,
unsigned long *current_offset)
unsigned long *current_offset, unsigned long *total_allocated)
{
struct page *tce_mem = NULL;
__be64 *addr, *tmp;
@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
}
addr = page_address(tce_mem);
memset(addr, 0, allocated);
*total_allocated += allocated;
--levels;
if (!levels) {
@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
for (i = 0; i < entries; ++i) {
tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
levels, limit, current_offset);
levels, limit, current_offset, total_allocated);
if (!tmp)
break;
@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
struct iommu_table *tbl)
{
void *addr;
unsigned long offset = 0, level_shift;
unsigned long offset = 0, level_shift, total_allocated = 0;
const unsigned window_shift = ilog2(window_size);
unsigned entries_shift = window_shift - page_shift;
unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
/* Allocate TCE table */
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
levels, tce_table_size, &offset);
levels, tce_table_size, &offset, &total_allocated);
/* addr==NULL means that the first level allocation failed */
if (!addr)
@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
page_shift);
tbl->it_level_size = 1ULL << (level_shift - 3);
tbl->it_indirect_levels = levels - 1;
tbl->it_allocated_size = offset;
tbl->it_allocated_size = total_allocated;
pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
window_size, tce_table_size, bus_offset);

View File

@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
union cache_topology ct;
enum cache_type ctype;
if (!test_facility(34))
return -EOPNOTSUPP;
if (!this_cpu_ci)
return -EINVAL;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);

View File

@ -448,13 +448,13 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
BPF_REG_1, offsetof(struct sk_buff, data));
}
/* BPF compatibility: clear A (%b7) and X (%b8) registers */
if (REG_SEEN(BPF_REG_7))
/* lghi %b7,0 */
EMIT4_IMM(0xa7090000, BPF_REG_7, 0);
if (REG_SEEN(BPF_REG_8))
/* lghi %b8,0 */
EMIT4_IMM(0xa7090000, BPF_REG_8, 0);
/* BPF compatibility: clear A (%b0) and X (%b7) registers */
if (REG_SEEN(BPF_REG_A))
/* lghi %ba,0 */
EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
if (REG_SEEN(BPF_REG_X))
/* lghi %bx,0 */
EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
}
/*

View File

@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
unsigned int e820_type = 0;
unsigned long m = efi->efi_memmap;
#ifdef CONFIG_X86_64
m |= (u64)efi->efi_memmap_hi << 32;
#endif
d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
switch (d->type) {
case EFI_RESERVED_TYPE:

View File

@ -280,21 +280,6 @@ static inline void clear_LDT(void)
set_ldt(NULL, 0);
}
/*
* load one particular LDT into the current CPU
*/
static inline void load_LDT_nolock(mm_context_t *pc)
{
set_ldt(pc->ldt, pc->size);
}
static inline void load_LDT(mm_context_t *pc)
{
preempt_disable();
load_LDT_nolock(pc);
preempt_enable();
}
static inline unsigned long get_desc_base(const struct desc_struct *desc)
{
return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));

View File

@ -9,8 +9,7 @@
* we put the segment information here.
*/
typedef struct {
void *ldt;
int size;
struct ldt_struct *ldt;
#ifdef CONFIG_X86_64
/* True if mm supports a task running in 32 bit compatibility mode. */

View File

@ -33,6 +33,50 @@ static inline void load_mm_cr4(struct mm_struct *mm)
static inline void load_mm_cr4(struct mm_struct *mm) {}
#endif
/*
* ldt_structs can be allocated, used, and freed, but they are never
* modified while live.
*/
struct ldt_struct {
/*
* Xen requires page-aligned LDTs with special permissions. This is
* needed to prevent us from installing evil descriptors such as
* call gates. On native, we could merge the ldt_struct and LDT
* allocations, but it's not worth trying to optimize.
*/
struct desc_struct *entries;
int size;
};
static inline void load_mm_ldt(struct mm_struct *mm)
{
struct ldt_struct *ldt;
/* lockless_dereference synchronizes with smp_store_release */
ldt = lockless_dereference(mm->context.ldt);
/*
* Any change to mm->context.ldt is followed by an IPI to all
* CPUs with the mm active. The LDT will not be freed until
* after the IPI is handled by all such CPUs. This means that,
* if the ldt_struct changes before we return, the values we see
* will be safe, and the new values will be loaded before we run
* any user code.
*
* NB: don't try to convert this to use RCU without extreme care.
* We would still need IRQs off, because we don't want to change
* the local LDT after an IPI loaded a newer value than the one
* that we can see.
*/
if (unlikely(ldt))
set_ldt(ldt->entries, ldt->size);
else
clear_LDT();
DEBUG_LOCKS_WARN_ON(preemptible());
}
/*
* Used for LDT copy/destruction.
*/
@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* was called and then modify_ldt changed
* prev->context.ldt but suppressed an IPI to this CPU.
* In this case, prev->context.ldt != NULL, because we
* never free an LDT while the mm still exists. That
* means that next->context.ldt != prev->context.ldt,
* because mms never share an LDT.
* never set context.ldt to NULL while the mm still
* exists. That means that next->context.ldt !=
* prev->context.ldt, because mms never share an LDT.
*/
if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context);
load_mm_ldt(next);
}
#ifdef CONFIG_SMP
else {
@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
load_mm_cr4(next);
load_LDT_nolock(&next->context);
load_mm_ldt(next);
}
}
#endif

View File

@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
*/
if (irq < nr_legacy_irqs() && data->count == 1) {
if (info->ioapic_trigger != data->trigger)
mp_register_handler(irq, data->trigger);
mp_register_handler(irq, info->ioapic_trigger);
data->entry.trigger = data->trigger = info->ioapic_trigger;
data->entry.polarity = data->polarity = info->ioapic_polarity;
}

View File

@ -1410,7 +1410,7 @@ void cpu_init(void)
load_sp0(t, &current->thread);
set_tss_desc(cpu, t);
load_TR_desc();
load_LDT(&init_mm.context);
load_mm_ldt(&init_mm);
clear_all_debug_regs();
dbg_restore_debug_regs();
@ -1459,7 +1459,7 @@ void cpu_init(void)
load_sp0(t, thread);
set_tss_desc(cpu, t);
load_TR_desc();
load_LDT(&init_mm.context);
load_mm_ldt(&init_mm);
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);

View File

@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment)
int idx = segment >> 3;
if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
struct ldt_struct *ldt;
if (idx > LDT_ENTRIES)
return 0;
if (idx > current->active_mm->context.size)
/* IRQs are off, so this synchronizes with smp_store_release */
ldt = lockless_dereference(current->active_mm->context.ldt);
if (!ldt || idx > ldt->size)
return 0;
desc = current->active_mm->context.ldt;
desc = &ldt->entries[idx];
} else {
if (idx > GDT_ENTRIES)
return 0;
desc = raw_cpu_ptr(gdt_page.gdt);
desc = raw_cpu_ptr(gdt_page.gdt) + idx;
}
return get_desc_base(desc + idx);
return get_desc_base(desc);
}
#ifdef CONFIG_COMPAT

View File

@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
@ -20,82 +21,82 @@
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
#ifdef CONFIG_SMP
/* context.lock is held for us, so we don't need any locking. */
static void flush_ldt(void *current_mm)
{
if (current->active_mm == current_mm)
load_LDT(&current->active_mm->context);
mm_context_t *pc;
if (current->active_mm != current_mm)
return;
pc = &current->active_mm->context;
set_ldt(pc->ldt->entries, pc->ldt->size);
}
#endif
static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
static struct ldt_struct *alloc_ldt_struct(int size)
{
void *oldldt, *newldt;
int oldsize;
struct ldt_struct *new_ldt;
int alloc_size;
if (mincount <= pc->size)
return 0;
oldsize = pc->size;
mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
(~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
if (size > LDT_ENTRIES)
return NULL;
new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
if (!new_ldt)
return NULL;
BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
alloc_size = size * LDT_ENTRY_SIZE;
/*
* Xen is very picky: it requires a page-aligned LDT that has no
* trailing nonzero bytes in any page that contains LDT descriptors.
* Keep it simple: zero the whole allocation and never allocate less
* than PAGE_SIZE.
*/
if (alloc_size > PAGE_SIZE)
new_ldt->entries = vzalloc(alloc_size);
else
newldt = (void *)__get_free_page(GFP_KERNEL);
new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!newldt)
return -ENOMEM;
if (oldsize)
memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
oldldt = pc->ldt;
memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
(mincount - oldsize) * LDT_ENTRY_SIZE);
paravirt_alloc_ldt(newldt, mincount);
#ifdef CONFIG_X86_64
/* CHECKME: Do we really need this ? */
wmb();
#endif
pc->ldt = newldt;
wmb();
pc->size = mincount;
wmb();
if (reload) {
#ifdef CONFIG_SMP
preempt_disable();
load_LDT(pc);
if (!cpumask_equal(mm_cpumask(current->mm),
cpumask_of(smp_processor_id())))
smp_call_function(flush_ldt, current->mm, 1);
preempt_enable();
#else
load_LDT(pc);
#endif
if (!new_ldt->entries) {
kfree(new_ldt);
return NULL;
}
if (oldsize) {
paravirt_free_ldt(oldldt, oldsize);
if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(oldldt);
else
put_page(virt_to_page(oldldt));
}
return 0;
new_ldt->size = size;
return new_ldt;
}
static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
/* After calling this, the LDT is immutable. */
static void finalize_ldt_struct(struct ldt_struct *ldt)
{
int err = alloc_ldt(new, old->size, 0);
int i;
paravirt_alloc_ldt(ldt->entries, ldt->size);
}
if (err < 0)
return err;
/* context.lock is held */
static void install_ldt(struct mm_struct *current_mm,
struct ldt_struct *ldt)
{
/* Synchronizes with lockless_dereference in load_mm_ldt. */
smp_store_release(&current_mm->context.ldt, ldt);
for (i = 0; i < old->size; i++)
write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
return 0;
/* Activate the LDT for all CPUs using current_mm. */
on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
}
static void free_ldt_struct(struct ldt_struct *ldt)
{
if (likely(!ldt))
return;
paravirt_free_ldt(ldt->entries, ldt->size);
if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(ldt->entries);
else
kfree(ldt->entries);
kfree(ldt);
}
/*
@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
*/
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
struct ldt_struct *new_ldt;
struct mm_struct *old_mm;
int retval = 0;
mutex_init(&mm->context.lock);
mm->context.size = 0;
old_mm = current->mm;
if (old_mm && old_mm->context.size > 0) {
mutex_lock(&old_mm->context.lock);
retval = copy_ldt(&mm->context, &old_mm->context);
mutex_unlock(&old_mm->context.lock);
if (!old_mm) {
mm->context.ldt = NULL;
return 0;
}
mutex_lock(&old_mm->context.lock);
if (!old_mm->context.ldt) {
mm->context.ldt = NULL;
goto out_unlock;
}
new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
if (!new_ldt) {
retval = -ENOMEM;
goto out_unlock;
}
memcpy(new_ldt->entries, old_mm->context.ldt->entries,
new_ldt->size * LDT_ENTRY_SIZE);
finalize_ldt_struct(new_ldt);
mm->context.ldt = new_ldt;
out_unlock:
mutex_unlock(&old_mm->context.lock);
return retval;
}
@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
*/
void destroy_context(struct mm_struct *mm)
{
if (mm->context.size) {
#ifdef CONFIG_X86_32
/* CHECKME: Can this ever happen ? */
if (mm == current->active_mm)
clear_LDT();
#endif
paravirt_free_ldt(mm->context.ldt, mm->context.size);
if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(mm->context.ldt);
else
put_page(virt_to_page(mm->context.ldt));
mm->context.size = 0;
}
free_ldt_struct(mm->context.ldt);
mm->context.ldt = NULL;
}
static int read_ldt(void __user *ptr, unsigned long bytecount)
{
int err;
int retval;
unsigned long size;
struct mm_struct *mm = current->mm;
if (!mm->context.size)
return 0;
mutex_lock(&mm->context.lock);
if (!mm->context.ldt) {
retval = 0;
goto out_unlock;
}
if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
mutex_lock(&mm->context.lock);
size = mm->context.size * LDT_ENTRY_SIZE;
size = mm->context.ldt->size * LDT_ENTRY_SIZE;
if (size > bytecount)
size = bytecount;
err = 0;
if (copy_to_user(ptr, mm->context.ldt, size))
err = -EFAULT;
mutex_unlock(&mm->context.lock);
if (err < 0)
goto error_return;
if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
retval = -EFAULT;
goto out_unlock;
}
if (size != bytecount) {
/* zero-fill the rest */
if (clear_user(ptr + size, bytecount - size) != 0) {
err = -EFAULT;
goto error_return;
/* Zero-fill the rest and pretend we read bytecount bytes. */
if (clear_user(ptr + size, bytecount - size)) {
retval = -EFAULT;
goto out_unlock;
}
}
return bytecount;
error_return:
return err;
retval = bytecount;
out_unlock:
mutex_unlock(&mm->context.lock);
return retval;
}
static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
struct desc_struct ldt;
int error;
struct user_desc ldt_info;
int oldsize, newsize;
struct ldt_struct *new_ldt, *old_ldt;
error = -EINVAL;
if (bytecount != sizeof(ldt_info))
@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
goto out;
}
mutex_lock(&mm->context.lock);
if (ldt_info.entry_number >= mm->context.size) {
error = alloc_ldt(&current->mm->context,
ldt_info.entry_number + 1, 1);
if (error < 0)
goto out_unlock;
}
/* Allow LDTs to be cleared by the user. */
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
if (oldmode || LDT_empty(&ldt_info)) {
memset(&ldt, 0, sizeof(ldt));
goto install;
if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
LDT_empty(&ldt_info)) {
/* The user wants to clear the entry. */
memset(&ldt, 0, sizeof(ldt));
} else {
if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
error = -EINVAL;
goto out;
}
fill_ldt(&ldt, &ldt_info);
if (oldmode)
ldt.avl = 0;
}
if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
error = -EINVAL;
mutex_lock(&mm->context.lock);
old_ldt = mm->context.ldt;
oldsize = old_ldt ? old_ldt->size : 0;
newsize = max((int)(ldt_info.entry_number + 1), oldsize);
error = -ENOMEM;
new_ldt = alloc_ldt_struct(newsize);
if (!new_ldt)
goto out_unlock;
}
fill_ldt(&ldt, &ldt_info);
if (oldmode)
ldt.avl = 0;
if (old_ldt)
memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
new_ldt->entries[ldt_info.entry_number] = ldt;
finalize_ldt_struct(new_ldt);
/* Install the new entry ... */
install:
write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
install_ldt(mm, new_ldt);
free_ldt_struct(old_ldt);
error = 0;
out_unlock:

View File

@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all)
void release_thread(struct task_struct *dead_task)
{
if (dead_task->mm) {
if (dead_task->mm->context.size) {
if (dead_task->mm->context.ldt) {
pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
dead_task->comm,
dead_task->mm->context.ldt,
dead_task->mm->context.size);
dead_task->mm->context.ldt->size);
BUG();
}
}

View File

@ -5,6 +5,7 @@
#include <linux/mm.h>
#include <linux/ptrace.h>
#include <asm/desc.h>
#include <asm/mmu_context.h>
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
{
@ -30,10 +31,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
seg &= ~7UL;
mutex_lock(&child->mm->context.lock);
if (unlikely((seg >> 3) >= child->mm->context.size))
if (unlikely(!child->mm->context.ldt ||
(seg >> 3) >= child->mm->context.ldt->size))
addr = -1L; /* bogus selector, access would fault */
else {
desc = child->mm->context.ldt + seg;
desc = &child->mm->context.ldt->entries[seg];
base = get_desc_base(desc);
/* 16-bit code segment? */

View File

@ -269,7 +269,7 @@ static void emit_bpf_tail_call(u8 **pprog)
EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
offsetof(struct bpf_array, map.max_entries));
EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
#define OFFSET1 44 /* number of bytes to jump */
#define OFFSET1 47 /* number of bytes to jump */
EMIT2(X86_JBE, OFFSET1); /* jbe out */
label1 = cnt;
@ -278,15 +278,15 @@ static void emit_bpf_tail_call(u8 **pprog)
*/
EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
#define OFFSET2 33
#define OFFSET2 36
EMIT2(X86_JA, OFFSET2); /* ja out */
label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
/* prog = array->prog[index]; */
EMIT4(0x48, 0x8D, 0x44, 0xD6); /* lea rax, [rsi + rdx * 8 + 0x50] */
EMIT1(offsetof(struct bpf_array, prog));
EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
offsetof(struct bpf_array, prog));
EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
/* if (prog == NULL)

View File

@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
static int __init arch_parse_efi_cmdline(char *str)
{
if (!str) {
pr_warn("need at least one option\n");
return -EINVAL;
}
if (parse_option_str(str, "old_map"))
set_bit(EFI_OLD_MEMMAP, &efi.flags);
if (parse_option_str(str, "debug"))

View File

@ -22,6 +22,7 @@
#include <asm/fpu/internal.h>
#include <asm/debugreg.h>
#include <asm/cpu.h>
#include <asm/mmu_context.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
@ -153,7 +154,7 @@ static void fix_processor_context(void)
syscall_init(); /* This sets MSR_*STAR and related */
#endif
load_TR_desc(); /* This does ltr */
load_LDT(&current->active_mm->context); /* This does lldt */
load_mm_ldt(current->active_mm); /* This does lldt */
fpu__resume_cpu();
}

View File

@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
pte_t pte;
unsigned long pfn;
struct page *page;
unsigned char dummy;
ptep = lookup_address((unsigned long)v, &level);
BUG_ON(ptep == NULL);
@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
pte = pfn_pte(pfn, prot);
/*
* Careful: update_va_mapping() will fail if the virtual address
* we're poking isn't populated in the page tables. We don't
* need to worry about the direct map (that's always in the page
* tables), but we need to be careful about vmap space. In
* particular, the top level page table can lazily propagate
* entries between processes, so if we've switched mms since we
* vmapped the target in the first place, we might not have the
* top-level page table entry populated.
*
* We disable preemption because we want the same mm active when
* we probe the target and when we issue the hypercall. We'll
* have the same nominal mm, but if we're a kernel thread, lazy
* mm dropping could change our pgd.
*
* Out of an abundance of caution, this uses __get_user() to fault
* in the target address just in case there's some obscure case
* in which the target address isn't readable.
*/
preempt_disable();
pagefault_disable(); /* Avoid warnings due to being atomic. */
__get_user(dummy, (unsigned char __user __force *)v);
pagefault_enable();
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
BUG();
@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
BUG();
} else
kmap_flush_unused();
preempt_enable();
}
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
int i;
/*
* We need to mark the all aliases of the LDT pages RO. We
* don't need to call vm_flush_aliases(), though, since that's
* only responsible for flushing aliases out the TLBs, not the
* page tables, and Xen will flush the TLB for us if needed.
*
* To avoid confusing future readers: none of this is necessary
* to load the LDT. The hypervisor only checks this when the
* LDT is faulted in due to subsequent descriptor access.
*/
for(i = 0; i < entries; i += entries_per_page)
set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
}

View File

@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
dev_warn(&device->dev, "Failed to change power state to %s\n",
acpi_power_state_string(state));
} else {
device->power.state = state;
device->power.state = target_state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device [%s] transitioned to %s\n",
device->pnp.bus_id,

File diff suppressed because it is too large Load Diff

View File

@ -36,6 +36,12 @@ struct private_data {
unsigned int voltage_tolerance; /* in percentage */
};
static struct freq_attr *cpufreq_dt_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL, /* Extra space for boost-attr if required */
NULL,
};
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct dev_pm_opp *opp;
@ -184,7 +190,6 @@ try_again:
static int cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_dt_platform_data *pd;
struct cpufreq_frequency_table *freq_table;
struct device_node *np;
struct private_data *priv;
@ -193,6 +198,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
struct clk *cpu_clk;
unsigned long min_uV = ~0, max_uV = 0;
unsigned int transition_latency;
bool need_update = false;
int ret;
ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
@ -208,8 +214,47 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_put_reg_clk;
}
/* OPPs might be populated at runtime, don't check for error here */
of_init_opp_table(cpu_dev);
/* Get OPP-sharing information from "operating-points-v2" bindings */
ret = of_get_cpus_sharing_opps(cpu_dev, policy->cpus);
if (ret) {
/*
* operating-points-v2 not supported, fallback to old method of
* finding shared-OPPs for backward compatibility.
*/
if (ret == -ENOENT)
need_update = true;
else
goto out_node_put;
}
/*
* Initialize OPP tables for all policy->cpus. They will be shared by
* all CPUs which have marked their CPUs shared with OPP bindings.
*
* For platforms not using operating-points-v2 bindings, we do this
* before updating policy->cpus. Otherwise, we will end up creating
* duplicate OPPs for policy->cpus.
*
* OPPs might be populated at runtime, don't check for error here
*/
of_cpumask_init_opp_table(policy->cpus);
if (need_update) {
struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
if (!pd || !pd->independent_clocks)
cpumask_setall(policy->cpus);
/*
* OPP tables are initialized only for policy->cpu, do it for
* others as well.
*/
set_cpus_sharing_opps(cpu_dev, policy->cpus);
of_property_read_u32(np, "clock-latency", &transition_latency);
} else {
transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
}
/*
* But we need OPP table to function so if it is not there let's
@ -230,7 +275,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
if (of_property_read_u32(np, "clock-latency", &transition_latency))
if (!transition_latency)
transition_latency = CPUFREQ_ETERNAL;
if (!IS_ERR(cpu_reg)) {
@ -291,11 +336,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_free_cpufreq_table;
}
policy->cpuinfo.transition_latency = transition_latency;
/* Support turbo/boost mode */
if (policy_has_boost_freq(policy)) {
/* This gets disabled by core on driver unregister */
ret = cpufreq_enable_boost_support();
if (ret)
goto out_free_cpufreq_table;
cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
}
pd = cpufreq_get_driver_data();
if (!pd || !pd->independent_clocks)
cpumask_setall(policy->cpus);
policy->cpuinfo.transition_latency = transition_latency;
of_node_put(np);
@ -306,7 +356,8 @@ out_free_cpufreq_table:
out_free_priv:
kfree(priv);
out_free_opp:
of_free_opp_table(cpu_dev);
of_cpumask_free_opp_table(policy->cpus);
out_node_put:
of_node_put(np);
out_put_reg_clk:
clk_put(cpu_clk);
@ -322,7 +373,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
of_free_opp_table(priv->cpu_dev);
of_cpumask_free_opp_table(policy->related_cpus);
clk_put(policy->clk);
if (!IS_ERR(priv->cpu_reg))
regulator_put(priv->cpu_reg);
@ -367,7 +418,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.exit = cpufreq_exit,
.ready = cpufreq_ready,
.name = "cpufreq-dt",
.attr = cpufreq_generic_attr,
.attr = cpufreq_dt_attr,
};
static int dt_cpufreq_probe(struct platform_device *pdev)

View File

@ -2412,6 +2412,49 @@ int cpufreq_boost_supported(void)
}
EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
static int create_boost_sysfs_file(void)
{
int ret;
if (!cpufreq_boost_supported())
return 0;
/*
* Check if driver provides function to enable boost -
* if not, use cpufreq_boost_set_sw as default
*/
if (!cpufreq_driver->set_boost)
cpufreq_driver->set_boost = cpufreq_boost_set_sw;
ret = cpufreq_sysfs_create_file(&boost.attr);
if (ret)
pr_err("%s: cannot register global BOOST sysfs file\n",
__func__);
return ret;
}
static void remove_boost_sysfs_file(void)
{
if (cpufreq_boost_supported())
cpufreq_sysfs_remove_file(&boost.attr);
}
int cpufreq_enable_boost_support(void)
{
if (!cpufreq_driver)
return -EINVAL;
if (cpufreq_boost_supported())
return 0;
cpufreq_driver->boost_supported = true;
/* This will get removed on driver unregister */
return create_boost_sysfs_file();
}
EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
int cpufreq_boost_enabled(void)
{
return cpufreq_driver->boost_enabled;
@ -2465,21 +2508,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (driver_data->setpolicy)
driver_data->flags |= CPUFREQ_CONST_LOOPS;
if (cpufreq_boost_supported()) {
/*
* Check if driver provides function to enable boost -
* if not, use cpufreq_boost_set_sw as default
*/
if (!cpufreq_driver->set_boost)
cpufreq_driver->set_boost = cpufreq_boost_set_sw;
ret = cpufreq_sysfs_create_file(&boost.attr);
if (ret) {
pr_err("%s: cannot register global BOOST sysfs file\n",
__func__);
goto err_null_driver;
}
}
ret = create_boost_sysfs_file();
if (ret)
goto err_null_driver;
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
@ -2503,8 +2534,7 @@ out:
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg:
if (cpufreq_boost_supported())
cpufreq_sysfs_remove_file(&boost.attr);
remove_boost_sysfs_file();
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
@ -2533,9 +2563,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
/* Protect against concurrent cpu hotplug */
get_online_cpus();
subsys_interface_unregister(&cpufreq_interface);
if (cpufreq_boost_supported())
cpufreq_sysfs_remove_file(&boost.attr);
remove_boost_sysfs_file();
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
write_lock_irqsave(&cpufreq_driver_lock, flags);

View File

@ -75,6 +75,10 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
}
freq_table[i].driver_data = i;
freq_table[i].frequency = rate / 1000;
/* Is Boost/turbo opp ? */
if (dev_pm_opp_is_turbo(opp))
freq_table[i].flags = CPUFREQ_BOOST_FREQ;
}
freq_table[i].driver_data = i;

View File

@ -18,6 +18,21 @@
* FREQUENCY TABLE HELPERS *
*********************************************************************/
bool policy_has_boost_freq(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
if (!table)
return false;
cpufreq_for_each_valid_entry(pos, table)
if (pos->flags & CPUFREQ_BOOST_FREQ)
return true;
return false;
}
EXPORT_SYMBOL_GPL(policy_has_boost_freq);
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{

View File

@ -48,6 +48,8 @@
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
#define ATC_MAX_DSCR_TRIALS 10
/*
* Initial number of descriptors to allocate for each channel. This could
* be increased during dma usage.
@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
*
* @current_len: the number of bytes left before reading CTRLA
* @ctrla: the value of CTRLA
* @desc: the descriptor containing the transfer width
*/
static inline int atc_calc_bytes_left(int current_len, u32 ctrla,
struct at_desc *desc)
static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
{
return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width);
}
u32 btsize = (ctrla & ATC_BTSIZE_MAX);
u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
/**
* atc_calc_bytes_left_from_reg - calculates the number of bytes left according
* to the current value of CTRLA.
*
* @current_len: the number of bytes left before reading CTRLA
* @atchan: the channel to read CTRLA for
* @desc: the descriptor containing the transfer width
*/
static inline int atc_calc_bytes_left_from_reg(int current_len,
struct at_dma_chan *atchan, struct at_desc *desc)
{
u32 ctrla = channel_readl(atchan, CTRLA);
return atc_calc_bytes_left(current_len, ctrla, desc);
/*
* According to the datasheet, when reading the Control A Register
* (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
* number of transfers completed on the Source Interface.
* So btsize is always a number of source width transfers.
*/
return current_len - (btsize << src_width);
}
/**
@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
struct at_desc *desc_first = atc_first_active(atchan);
struct at_desc *desc;
int ret;
u32 ctrla, dscr;
u32 ctrla, dscr, trials;
/*
* If the cookie doesn't match to the currently running transfer then
@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
* the channel's DSCR register and compare it against the value
* of the hardware linked list structure of each child
* descriptor.
*
* The CTRLA register provides us with the amount of data
* already read from the source for the current child
* descriptor. So we can compute a more accurate residue by also
* removing the number of bytes corresponding to this amount of
* data.
*
* However, the DSCR and CTRLA registers cannot be read both
* atomically. Hence a race condition may occur: the first read
* register may refer to one child descriptor whereas the second
* read may refer to a later child descriptor in the list
* because of the DMA transfer progression inbetween the two
* reads.
*
* One solution could have been to pause the DMA transfer, read
* the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
* this approach presents some drawbacks:
* - If the DMA transfer is paused, RX overruns or TX underruns
* are more likey to occur depending on the system latency.
* Taking the USART driver as an example, it uses a cyclic DMA
* transfer to read data from the Receive Holding Register
* (RHR) to avoid RX overruns since the RHR is not protected
* by any FIFO on most Atmel SoCs. So pausing the DMA transfer
* to compute the residue would break the USART driver design.
* - The atc_pause() function masks interrupts but we'd rather
* avoid to do so for system latency purpose.
*
* Then we'd rather use another solution: the DSCR is read a
* first time, the CTRLA is read in turn, next the DSCR is read
* a second time. If the two consecutive read values of the DSCR
* are the same then we assume both refers to the very same
* child descriptor as well as the CTRLA value read inbetween
* does. For cyclic tranfers, the assumption is that a full loop
* is "not so fast".
* If the two DSCR values are different, we read again the CTRLA
* then the DSCR till two consecutive read values from DSCR are
* equal or till the maxium trials is reach.
* This algorithm is very unlikely not to find a stable value for
* DSCR.
*/
ctrla = channel_readl(atchan, CTRLA);
rmb(); /* ensure CTRLA is read before DSCR */
dscr = channel_readl(atchan, DSCR);
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
u32 new_dscr;
rmb(); /* ensure DSCR is read after CTRLA */
new_dscr = channel_readl(atchan, DSCR);
/*
* If the DSCR register value has not changed inside the
* DMA controller since the previous read, we assume
* that both the dscr and ctrla values refers to the
* very same descriptor.
*/
if (likely(new_dscr == dscr))
break;
/*
* DSCR has changed inside the DMA controller, so the
* previouly read value of CTRLA may refer to an already
* processed descriptor hence could be outdated.
* We need to update ctrla to match the current
* descriptor.
*/
dscr = new_dscr;
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
}
if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
return -ETIMEDOUT;
/* for the first descriptor we can be more accurate */
if (desc_first->lli.dscr == dscr)
return atc_calc_bytes_left(ret, ctrla, desc_first);
return atc_calc_bytes_left(ret, ctrla);
ret -= desc_first->len;
list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
}
/*
* For the last descriptor in the chain we can calculate
* For the current descriptor in the chain we can calculate
* the remaining bytes using the channel's register.
* Note that the transfer width of the first and last
* descriptor may differ.
*/
if (!desc->lli.dscr)
ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
ret = atc_calc_bytes_left(ret, ctrla);
} else {
/* single transfer */
ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first);
ctrla = channel_readl(atchan, CTRLA);
ret = atc_calc_bytes_left(ret, ctrla);
}
return ret;
@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
desc->txd.cookie = -EBUSY;
desc->total_len = desc->len = len;
desc->tx_width = dwidth;
/* set end-of-link to the last link descriptor of list*/
set_desc_eol(desc);
@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
first->txd.cookie = -EBUSY;
first->total_len = len;
/* set transfer width for the calculation of the residue */
first->tx_width = src_width;
prev->tx_width = src_width;
/* set end-of-link to the last link descriptor of list*/
set_desc_eol(desc);
@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
first->txd.cookie = -EBUSY;
first->total_len = total_len;
/* set transfer width for the calculation of the residue */
first->tx_width = reg_width;
prev->tx_width = reg_width;
/* first link descriptor of list is responsible of flags */
first->txd.flags = flags; /* client is in control of this ack */
@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan,
desc->txd.cookie = 0;
desc->len = len;
/*
* Although we only need the transfer width for the first and
* the last descriptor, its easier to set it to all descriptors.
*/
desc->tx_width = src_width;
atc_desc_chain(&first, &prev, desc);
/* update the lengths and addresses for the next loop cycle */
@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY;
first->total_len = buf_len;
first->tx_width = reg_width;
return &first->txd;

View File

@ -112,6 +112,7 @@
#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24)
#define ATC_SRC_WIDTH_WORD (0x2 << 24)
#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3)
#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */
#define ATC_DST_WIDTH(x) ((x) << 28)
#define ATC_DST_WIDTH_BYTE (0x0 << 28)
@ -182,7 +183,6 @@ struct at_lli {
* @txd: support for the async_tx api
* @desc_node: node on the channed descriptors list
* @len: descriptor byte count
* @tx_width: transfer width
* @total_len: total transaction byte count
*/
struct at_desc {
@ -194,7 +194,6 @@ struct at_desc {
struct dma_async_tx_descriptor txd;
struct list_head desc_node;
size_t len;
u32 tx_width;
size_t total_len;
/* Interleaved data */

View File

@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
* descriptor view 2 since some fields of the configuration register
* depend on transfer size and src/dest addresses.
*/
if (at_xdmac_chan_is_cyclic(atchan)) {
if (at_xdmac_chan_is_cyclic(atchan))
reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
} else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) {
else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
} else {
/*
* No need to write AT_XDMAC_CC reg, it will be done when the
* descriptor is fecthed.
*/
else
reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
}
/*
* Even if the register will be updated from the configuration in the
* descriptor when using view 2 or higher, the PROT bit won't be set
* properly. This bit can be modified only by using the channel
* configuration register.
*/
at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
reg |= AT_XDMAC_CNDC_NDDUP
| AT_XDMAC_CNDC_NDSUP
@ -681,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc->lld.mbr_sa = mem;
desc->lld.mbr_da = atchan->sconfig.dst_addr;
}
desc->lld.mbr_cfg = atchan->cfg;
dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
dwidth = at_xdmac_get_dwidth(atchan->cfg);
fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
? dwidth
: AT_XDMAC_CC_DWIDTH_BYTE;
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
| AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
| AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
| (len >> fixed_dwidth); /* microblock length */
desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
AT_XDMAC_CC_DWIDTH(fixed_dwidth);
dev_dbg(chan2dev(chan),
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);

View File

@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan,
config &= ~0x7;
config |= op_mode;
if (IS_ENABLED(__BIG_ENDIAN))
config |= XOR_DESCRIPTOR_SWAP;
else
config &= ~XOR_DESCRIPTOR_SWAP;
#if defined(__BIG_ENDIAN)
config |= XOR_DESCRIPTOR_SWAP;
#else
config &= ~XOR_DESCRIPTOR_SWAP;
#endif
writel_relaxed(config, XOR_CONFIG(chan));
chan->current_type = type;

View File

@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
desc->txd.callback = last->txd.callback;
desc->txd.callback_param = last->txd.callback_param;
}
last->last = false;
desc->last = false;
dma_cookie_assign(&desc->txd);
@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
desc->rqcfg.brst_len = 1;
desc->rqcfg.brst_len = get_burst_len(desc, len);
desc->bytes_requested = len;
desc->txd.flags = flags;

View File

@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&vc->lock, flags);
cookie = dma_cookie_assign(tx);
list_move_tail(&vd->node, &vc->desc_submitted);
list_add_tail(&vd->node, &vc->desc_submitted);
spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg)
cb_data = vd->tx.callback_param;
list_del(&vd->node);
if (async_tx_test_ack(&vd->tx))
list_add(&vd->node, &vc->desc_allocated);
else
vc->desc_free(vd);
vc->desc_free(vd);
if (cb)
cb(cb_data);
@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
while (!list_empty(head)) {
struct virt_dma_desc *vd = list_first_entry(head,
struct virt_dma_desc, node);
if (async_tx_test_ack(&vd->tx)) {
list_move_tail(&vd->node, &vc->desc_allocated);
} else {
dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
list_del(&vd->node);
vc->desc_free(vd);
}
list_del(&vd->node);
dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
vc->desc_free(vd);
}
}
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
dma_cookie_init(&vc->chan);
spin_lock_init(&vc->lock);
INIT_LIST_HEAD(&vc->desc_allocated);
INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed);

View File

@ -29,7 +29,6 @@ struct virt_dma_chan {
spinlock_t lock;
/* protected by vc.lock */
struct list_head desc_allocated;
struct list_head desc_submitted;
struct list_head desc_issued;
struct list_head desc_completed;
@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
struct virt_dma_desc *vd, unsigned long tx_flags)
{
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
unsigned long flags;
dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
vd->tx.flags = tx_flags;
vd->tx.tx_submit = vchan_tx_submit;
spin_lock_irqsave(&vc->lock, flags);
list_add_tail(&vd->node, &vc->desc_allocated);
spin_unlock_irqrestore(&vc->lock, flags);
return &vd->tx;
}
@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
}
/**
* vchan_get_all_descriptors - obtain all allocated, submitted and issued
* descriptors
* vchan_get_all_descriptors - obtain all submitted and issued descriptors
* vc: virtual channel to get descriptors from
* head: list of descriptors found
*
@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
struct list_head *head)
{
list_splice_tail_init(&vc->desc_allocated, head);
list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head);
@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
{
struct virt_dma_desc *vd;
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&vc->lock, flags);
vchan_get_all_descriptors(vc, &head);
list_for_each_entry(vd, &head, node)
async_tx_clear_ack(&vd->tx);
spin_unlock_irqrestore(&vc->lock, flags);
vchan_dma_desc_free_list(vc, &head);

View File

@ -111,6 +111,7 @@
#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
#define XGENE_DMA_BLK_MEM_RDY 0xD074
#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000
/* X-Gene SoC EFUSE csr register and bit defination */
#define XGENE_SOC_JTAG1_SHADOW 0x18
@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev,
return -ENOMEM;
}
pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
/* Get efuse csr region */
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
if (!res) {

View File

@ -58,6 +58,11 @@ bool efi_runtime_disabled(void)
static int __init parse_efi_cmdline(char *str)
{
if (!str) {
pr_warn("need at least one option\n");
return -EINVAL;
}
if (parse_option_str(str, "noruntime"))
disable_runtime = true;

View File

@ -1866,6 +1866,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
struct amdgpu_ip_block_status {
bool valid;
bool sw;
bool hw;
};
struct amdgpu_device {
struct device *dev;
struct drm_device *ddev;
@ -2008,7 +2014,7 @@ struct amdgpu_device {
const struct amdgpu_ip_block_version *ip_blocks;
int num_ip_blocks;
bool *ip_block_enabled;
struct amdgpu_ip_block_status *ip_block_status;
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);

View File

@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
if (adev->ip_block_enabled == NULL)
adev->ip_block_status = kcalloc(adev->num_ip_blocks,
sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
if (adev->ip_block_status == NULL)
return -ENOMEM;
if (adev->ip_blocks == NULL) {
@ -1203,18 +1204,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d\n", i);
adev->ip_block_enabled[i] = false;
adev->ip_block_status[i].valid = false;
} else {
if (adev->ip_blocks[i].funcs->early_init) {
r = adev->ip_blocks[i].funcs->early_init((void *)adev);
if (r == -ENOENT)
adev->ip_block_enabled[i] = false;
adev->ip_block_status[i].valid = false;
else if (r)
return r;
else
adev->ip_block_enabled[i] = true;
adev->ip_block_status[i].valid = true;
} else {
adev->ip_block_enabled[i] = true;
adev->ip_block_status[i].valid = true;
}
}
}
@ -1227,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_enabled[i])
if (!adev->ip_block_status[i].valid)
continue;
r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
if (r)
return r;
adev->ip_block_status[i].sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_vram_scratch_init(adev);
@ -1243,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
r = amdgpu_wb_init(adev);
if (r)
return r;
adev->ip_block_status[i].hw = true;
}
}
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_enabled[i])
if (!adev->ip_block_status[i].sw)
continue;
/* gmc hw init is done early */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
@ -1255,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
if (r)
return r;
adev->ip_block_status[i].hw = true;
}
return 0;
@ -1265,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_enabled[i])
if (!adev->ip_block_status[i].valid)
continue;
/* enable clockgating to save power */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@ -1287,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
int i, r;
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_enabled[i])
if (!adev->ip_block_status[i].hw)
continue;
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_wb_fini(adev);
@ -1300,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
return r;
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
/* XXX handle errors */
adev->ip_block_status[i].hw = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_enabled[i])
if (!adev->ip_block_status[i].sw)
continue;
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
/* XXX handle errors */
adev->ip_block_enabled[i] = false;
adev->ip_block_status[i].sw = false;
adev->ip_block_status[i].valid = false;
}
return 0;
@ -1318,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
int i, r;
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_enabled[i])
if (!adev->ip_block_status[i].valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@ -1336,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_enabled[i])
if (!adev->ip_block_status[i].valid)
continue;
r = adev->ip_blocks[i].funcs->resume(adev);
if (r)
@ -1582,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
kfree(adev->ip_block_enabled);
adev->ip_block_enabled = NULL;
kfree(adev->ip_block_status);
adev->ip_block_status = NULL;
adev->accel_working = false;
/* free i2c buses */
amdgpu_i2c_fini(adev);

View File

@ -449,7 +449,7 @@ out:
* vital here, so they are not reported back to userspace.
*/
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va)
struct amdgpu_bo_va *bo_va, uint32_t operation)
{
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry *vm_bos;
@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error_unlock;
r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
if (operation == AMDGPU_VA_OP_MAP)
r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
error_unlock:
mutex_unlock(&bo_va->vm->mutex);
@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
}
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
amdgpu_gem_va_update_vm(adev, bo_va);
amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
drm_gem_object_unreference_unlocked(gobj);
return r;

View File

@ -180,17 +180,17 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
if (vm) {
/* do context switch */
amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
if (ring->funcs->emit_gds_switch)
amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
ib->gds_base, ib->gds_size,
ib->gws_base, ib->gws_size,
ib->oa_base, ib->oa_size);
if (ring->funcs->emit_hdp_flush)
amdgpu_ring_emit_hdp_flush(ring);
}
if (vm && ring->funcs->emit_gds_switch)
amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
ib->gds_base, ib->gds_size,
ib->gws_base, ib->gws_size,
ib->oa_base, ib->oa_size);
if (ring->funcs->emit_hdp_flush)
amdgpu_ring_emit_hdp_flush(ring);
old_ctx = ring->current_ctx;
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];

View File

@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
for (i = 0; i < adev->num_ip_blocks; i++) {
if (adev->ip_blocks[i].type == type &&
adev->ip_block_enabled[i]) {
adev->ip_block_status[i].valid) {
ip.hw_ip_version_major = adev->ip_blocks[i].major;
ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
ip.capabilities_flags = 0;
@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].type == type &&
adev->ip_block_enabled[i] &&
adev->ip_block_status[i].valid &&
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
count++;
@ -416,7 +416,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return n ? -EFAULT : 0;
}
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device dev_info;
struct drm_amdgpu_info_device dev_info = {};
struct amdgpu_cu_info cu_info;
dev_info.device_id = dev->pdev->device;

View File

@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
* sheduling on the ring. This function schedules the IB
* on the gfx ring for execution by the GPU.
*/
static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
bool need_ctx_switch = ring->current_ctx != ib->ctx;
@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
u32 next_rptr = ring->wptr + 5;
/* drop the CE preamble IB for the same context */
if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
(ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
!need_ctx_switch)
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
return;
if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
control |= INDIRECT_BUFFER_VALID;
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
if (need_ctx_switch)
next_rptr += 2;
next_rptr += 4;
@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
if (need_ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, control);
}
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
control |= INDIRECT_BUFFER_VALID;
next_rptr += 4;
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
amdgpu_ring_write(ring, next_rptr);
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw |
(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
amdgpu_ring_write(ring, control);
}
/**
* gfx_v7_0_ring_test_ib - basic ring IB test
*
@ -5555,7 +5579,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
.parse_cs = NULL,
.emit_ib = gfx_v7_0_ring_emit_ib,
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
@ -5571,7 +5595,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
.parse_cs = NULL,
.emit_ib = gfx_v7_0_ring_emit_ib,
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,

View File

@ -3753,7 +3753,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x20); /* poll interval */
}
static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
bool need_ctx_switch = ring->current_ctx != ib->ctx;
@ -3761,15 +3761,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
u32 next_rptr = ring->wptr + 5;
/* drop the CE preamble IB for the same context */
if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
(ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
!need_ctx_switch)
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
return;
if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
control |= INDIRECT_BUFFER_VALID;
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
if (need_ctx_switch)
next_rptr += 2;
next_rptr += 4;
@ -3780,7 +3775,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
if (need_ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
@ -3803,6 +3798,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, control);
}
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
control |= INDIRECT_BUFFER_VALID;
next_rptr += 4;
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
amdgpu_ring_write(ring, next_rptr);
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw |
(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
amdgpu_ring_write(ring, control);
}
static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
@ -4224,7 +4249,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
.parse_cs = NULL,
.emit_ib = gfx_v8_0_ring_emit_ib,
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
@ -4240,7 +4265,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
.parse_cs = NULL,
.emit_ib = gfx_v8_0_ring_emit_ib,
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,

View File

@ -230,10 +230,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
}
connector_state->best_encoder = new_encoder;
idx = drm_crtc_index(connector_state->crtc);
if (connector_state->crtc) {
idx = drm_crtc_index(connector_state->crtc);
crtc_state = state->crtc_states[idx];
crtc_state->mode_changed = true;
crtc_state = state->crtc_states[idx];
crtc_state->mode_changed = true;
}
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
connector->base.id,

View File

@ -3303,15 +3303,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
u32 upper = I915_READ(upper_reg); \
u32 lower = I915_READ(lower_reg); \
u32 tmp = I915_READ(upper_reg); \
if (upper != tmp) { \
upper = tmp; \
lower = I915_READ(lower_reg); \
WARN_ON(I915_READ(upper_reg) != upper); \
} \
(u64)upper << 32 | lower; })
u32 upper, lower, tmp; \
tmp = I915_READ(upper_reg); \
do { \
upper = tmp; \
lower = I915_READ(lower_reg); \
tmp = I915_READ(upper_reg); \
} while (upper != tmp); \
(u64)upper << 32 | lower; })
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)

View File

@ -1923,6 +1923,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
vma->vm->insert_entries(vma->vm, pages,
vma->node.start,
cache_level, pte_flags);
/* Note the inconsistency here is due to absence of the
* aliasing ppgtt on gen4 and earlier. Though we always
* request PIN_USER for execbuffer (translated to LOCAL_BIND),
* without the appgtt, we cannot honour that request and so
* must substitute it with a global binding. Since we do this
* behind the upper layers back, we need to explicitly set
* the bound flag ourselves.
*/
vma->bound |= GLOBAL_BIND;
}
if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {

View File

@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
args->phys_swizzle_mode = args->swizzle_mode;
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
else
args->phys_swizzle_mode = args->swizzle_mode;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)

View File

@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
uint32_t op_mode = 0;
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb);
enum mdp4_frame_format frame_type;
if (!(crtc && fb)) {
DBG("%s: disabled!", mdp4_plane->name);
return 0;
}
frame_type = mdp4_get_frame_format(fb);
/* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16;
src_y = src_y >> 16;

View File

@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
int i;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
for (i = 0; i < nplanes; i++) {
struct drm_plane *plane = state->planes[i];
struct drm_plane_state *plane_state = state->plane_states[i];
if (!plane)
continue;
mdp5_plane_complete_commit(plane, plane_state);
}
mdp5_disable(mdp5_kms);
}

View File

@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
void mdp5_plane_complete_flip(struct drm_plane *plane);
void mdp5_plane_complete_commit(struct drm_plane *plane,
struct drm_plane_state *state);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);

View File

@ -31,8 +31,6 @@ struct mdp5_plane {
uint32_t nformats;
uint32_t formats[32];
bool enabled;
};
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state)
return state->fb && state->crtc;
}
static int mdp5_plane_disable(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
DBG("%s: disable", mdp5_plane->name);
if (mdp5_kms) {
/* Release the memory we requested earlier from the SMP: */
mdp5_smp_release(mdp5_kms->smp, pipe);
}
return 0;
}
static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
if (!plane_enabled(state)) {
to_mdp5_plane_state(state)->pending = true;
mdp5_plane_disable(plane);
} else if (to_mdp5_plane_state(state)->mode_changed) {
int ret;
to_mdp5_plane_state(state)->pending = true;
@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
return mdp5_plane->flush_mask;
}
/* called after vsync in thread context */
void mdp5_plane_complete_commit(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
if (!plane_enabled(plane->state)) {
DBG("%s: free SMP", mdp5_plane->name);
mdp5_smp_release(mdp5_kms->smp, pipe);
}
}
/* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)

View File

@ -34,22 +34,44 @@
* and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
*
* For each block that can be dynamically allocated, it can be either
* free, or pending/in-use by a client. The updates happen in three steps:
* free:
* The block is free.
*
* pending:
* The block is allocated to some client and not free.
*
* configured:
* The block is allocated to some client, and assigned to that
* client in MDP5_MDP_SMP_ALLOC registers.
*
* inuse:
* The block is being actively used by a client.
*
* The updates happen in the following steps:
*
* 1) mdp5_smp_request():
* When plane scanout is setup, calculate required number of
* blocks needed per client, and request. Blocks not inuse or
* pending by any other client are added to client's pending
* set.
* blocks needed per client, and request. Blocks neither inuse nor
* configured nor pending by any other client are added to client's
* pending set.
* For shrinking, blocks in pending but not in configured can be freed
* directly, but those already in configured will be freed later by
* mdp5_smp_commit.
*
* 2) mdp5_smp_configure():
* As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
* are configured for the union(pending, inuse)
* Current pending is copied to configured.
* It is assumed that mdp5_smp_request and mdp5_smp_configure not run
* concurrently for the same pipe.
*
* 3) mdp5_smp_commit():
* After next vblank, copy pending -> inuse. Optionally update
* After next vblank, copy configured -> inuse. Optionally update
* MDP5_SMP_ALLOC registers if there are newly unused blocks
*
* 4) mdp5_smp_release():
* Must be called after the pipe is disabled and no longer uses any SMB
*
* On the next vblank after changes have been committed to hw, the
* client's pending blocks become it's in-use blocks (and no-longer
* in-use blocks become available to other clients).
@ -77,6 +99,9 @@ struct mdp5_smp {
struct mdp5_client_smp_state client_state[MAX_CLIENTS];
};
static void update_smp_state(struct mdp5_smp *smp,
u32 cid, mdp5_smp_state_t *assigned);
static inline
struct mdp5_kms *get_kms(struct mdp5_smp *smp)
{
@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
for (i = cur_nblks; i > nblks; i--) {
int blk = find_first_bit(ps->pending, cnt);
clear_bit(blk, ps->pending);
/* don't clear in global smp_state until _commit() */
/* clear in global smp_state if not in configured
* otherwise until _commit()
*/
if (!test_bit(blk, ps->configured))
clear_bit(blk, smp->state);
}
}
@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
/* Release SMP blocks for all clients of the pipe */
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
int i, nblks;
int i;
unsigned long flags;
int cnt = smp->blk_cnt;
for (i = 0; i < pipe2nclients(pipe); i++) {
mdp5_smp_state_t assigned;
u32 cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
spin_lock_irqsave(&smp->state_lock, flags);
/* clear hw assignment */
bitmap_or(assigned, ps->inuse, ps->configured, cnt);
update_smp_state(smp, CID_UNUSED, &assigned);
/* free to global pool */
bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
bitmap_andnot(smp->state, smp->state, assigned, cnt);
/* clear client's infor */
bitmap_zero(ps->pending, cnt);
bitmap_zero(ps->configured, cnt);
bitmap_zero(ps->inuse, cnt);
spin_unlock_irqrestore(&smp->state_lock, flags);
}
for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
smp_request_block(smp, pipe2client(pipe, i), 0);
set_fifo_thresholds(smp, pipe, 0);
}
@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
u32 cid = pipe2client(pipe, i);
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
bitmap_or(assigned, ps->inuse, ps->pending, cnt);
/*
* if vblank has not happened since last smp_configure
* skip the configure for now
*/
if (!bitmap_equal(ps->inuse, ps->configured, cnt))
continue;
bitmap_copy(ps->configured, ps->pending, cnt);
bitmap_or(assigned, ps->inuse, ps->configured, cnt);
update_smp_state(smp, cid, &assigned);
}
}
/* step #3: after vblank, copy pending -> inuse: */
/* step #3: after vblank, copy configured -> inuse: */
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
int cnt = smp->blk_cnt;
@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
* using, which can be released and made available to other
* clients:
*/
if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
unsigned long flags;
spin_lock_irqsave(&smp->state_lock, flags);
@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
update_smp_state(smp, CID_UNUSED, &released);
}
bitmap_copy(ps->inuse, ps->pending, cnt);
bitmap_copy(ps->inuse, ps->configured, cnt);
}
}

View File

@ -23,6 +23,7 @@
struct mdp5_client_smp_state {
mdp5_smp_state_t inuse;
mdp5_smp_state_t configured;
mdp5_smp_state_t pending;
};

View File

@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
timeout = ktime_add_ms(ktime_get(), 1000);
ret = msm_wait_fence_interruptable(dev, c->fence, &timeout);
if (ret) {
WARN_ON(ret); // TODO unswap state back? or??
commit_destroy(c);
return ret;
}
/* uninterruptible wait */
msm_wait_fence(dev, c->fence, &timeout, false);
complete_commit(c);

View File

@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
* Fences:
*/
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
ktime_t *timeout)
int msm_wait_fence(struct drm_device *dev, uint32_t fence,
ktime_t *timeout , bool interruptible)
{
struct msm_drm_private *priv = dev->dev_private;
int ret;
@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
remaining_jiffies = timespec_to_jiffies(&ts);
}
ret = wait_event_interruptible_timeout(priv->fence_event,
if (interruptible)
ret = wait_event_interruptible_timeout(priv->fence_event,
fence_completed(dev, fence),
remaining_jiffies);
else
ret = wait_event_timeout(priv->fence_event,
fence_completed(dev, fence),
remaining_jiffies);
@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
return -EINVAL;
}
return msm_wait_fence_interruptable(dev, args->fence, &timeout);
return msm_wait_fence(dev, args->fence, &timeout, true);
}
static const struct drm_ioctl_desc msm_ioctls[] = {

View File

@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev,
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
ktime_t *timeout);
int msm_wait_fence(struct drm_device *dev, uint32_t fence,
ktime_t *timeout, bool interruptible);
int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);

View File

@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
if (op & MSM_PREP_NOSYNC)
timeout = NULL;
ret = msm_wait_fence_interruptable(dev, fence, timeout);
ret = msm_wait_fence(dev, fence, timeout, true);
}
/* TODO cache maintenance */

View File

@ -23,8 +23,12 @@
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
BUG_ON(!msm_obj->sgt); /* should have already pinned! */
return msm_obj->sgt;
int npages = obj->size >> PAGE_SHIFT;
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
return NULL;
return drm_prime_pages_to_sg(msm_obj->pages, npages);
}
void *msm_gem_prime_vmap(struct drm_gem_object *obj)

View File

@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
nvif_client_fini(&cli->base);
usif_client_fini(cli);
kfree(cli);
}
static void
@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
pm_runtime_get_sync(dev->dev);
mutex_lock(&cli->mutex);
if (cli->abi16)
nouveau_abi16_fini(cli->abi16);
mutex_unlock(&cli->mutex);
mutex_lock(&drm->client.mutex);
list_del(&cli->head);

View File

@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
return 0;
}
#if IS_ENABLED(CONFIG_IOMMU_API)
static void nouveau_platform_probe_iommu(struct device *dev,
struct nouveau_platform_gpu *gpu)
{
@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev,
}
}
#else
static void nouveau_platform_probe_iommu(struct device *dev,
struct nouveau_platform_gpu *gpu)
{
}
static void nouveau_platform_remove_iommu(struct device *dev,
struct nouveau_platform_gpu *gpu)
{
}
#endif
static int nouveau_platform_probe(struct platform_device *pdev)
{
struct nouveau_platform_gpu *gpu;

View File

@ -175,15 +175,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node->page_shift = 12;
switch (drm->device.info.family) {
case NV_DEVICE_INFO_V0_TNT:
case NV_DEVICE_INFO_V0_CELSIUS:
case NV_DEVICE_INFO_V0_KELVIN:
case NV_DEVICE_INFO_V0_RANKINE:
case NV_DEVICE_INFO_V0_CURIE:
break;
case NV_DEVICE_INFO_V0_TESLA:
if (drm->device.info.chipset != 0x50)
node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
break;
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
break;
default:
NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
drm->device.info.family);
break;
}

View File

@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
if (ret)
return ret;
if (RING_SPACE(chan, 49)) {
if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
nouveau_fbcon_gpu_lockup(info);
return 0;
}

View File

@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
{
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
if (show && nv_crtc->cursor.nvbo)
if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
nv50_crtc_cursor_show(nv_crtc);
else
nv50_crtc_cursor_hide(nv_crtc);

View File

@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
if (ret)
return ret;
ret = RING_SPACE(chan, 59);
ret = RING_SPACE(chan, 58);
if (ret) {
nouveau_fbcon_gpu_lockup(info);
return ret;
@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->var.yres_virtual);
OUT_RING(chan, upper_32_bits(fb->vma.offset));
OUT_RING(chan, lower_32_bits(fb->vma.offset));
FIRE_RING(chan);
return 0;
}

View File

@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
ret = RING_SPACE(chan, 60);
ret = RING_SPACE(chan, 58);
if (ret) {
WARN_ON(1);
nouveau_fbcon_gpu_lockup(info);

View File

@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
default:
nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
return 0x0000;
return NULL;
}
}

View File

@ -165,15 +165,31 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
return 0;
}
static int
gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
{
struct nvkm_object *obj = (void *)chan;
struct gk104_fifo_priv *priv = (void *)obj->engine;
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
nv_error(priv, "channel %d [%s] kick timeout\n",
chan->base.chid, nvkm_client_name(chan));
return -EBUSY;
}
return 0;
}
static int
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
struct nvkm_object *object)
{
struct nvkm_bar *bar = nvkm_bar(parent);
struct gk104_fifo_priv *priv = (void *)parent->engine;
struct gk104_fifo_base *base = (void *)parent->parent;
struct gk104_fifo_chan *chan = (void *)parent;
u32 addr;
int ret;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW : return 0;
@ -188,13 +204,9 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
return -EINVAL;
}
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
nv_error(priv, "channel %d [%s] kick timeout\n",
chan->base.chid, nvkm_client_name(chan));
if (suspend)
return -EBUSY;
}
ret = gk104_fifo_chan_kick(chan);
if (ret && suspend)
return ret;
if (addr) {
nv_wo32(base, addr + 0x00, 0x00000000);
@ -319,6 +331,7 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
gk104_fifo_runlist_update(priv, chan->engine);
}
gk104_fifo_chan_kick(chan);
nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
return nvkm_fifo_channel_fini(&chan->base, suspend);
}

View File

@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
gf100_gr_zbc_clear_depth(priv, index);
}
/**
* Wait until GR goes idle. GR is considered idle if it is disabled by the
* MC (0x200) register, or GR is not busy and a context switch is not in
* progress.
*/
int
gf100_gr_wait_idle(struct gf100_gr_priv *priv)
{
unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
bool gr_enabled, ctxsw_active, gr_busy;
do {
/*
* required to make sure FIFO_ENGINE_STATUS (0x2640) is
* up-to-date
*/
nv_rd32(priv, 0x400700);
gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
if (!gr_enabled || (!gr_busy && !ctxsw_active))
return 0;
} while (time_before(jiffies, end_jiffies));
nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
gr_enabled, ctxsw_active, gr_busy);
return -EAGAIN;
}
void
gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
{
@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
while (addr < next) {
nv_wr32(priv, 0x400200, addr);
nv_wait(priv, 0x400700, 0x00000002, 0x00000000);
/**
* Wait for GR to go idle after submitting a
* GO_IDLE bundle
*/
if ((addr & 0xffff) == 0xe100)
gf100_gr_wait_idle(priv);
nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
addr += init->pitch;
}
}

View File

@ -181,6 +181,7 @@ struct gf100_gr_oclass {
int ppc_nr;
};
int gf100_gr_wait_idle(struct gf100_gr_priv *);
void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);

View File

@ -332,9 +332,12 @@ static void
nvkm_perfctx_dtor(struct nvkm_object *object)
{
struct nvkm_pm *ppm = (void *)object->engine;
struct nvkm_perfctx *ctx = (void *)object;
mutex_lock(&nv_subdev(ppm)->mutex);
nvkm_engctx_destroy(&ppm->context->base);
ppm->context = NULL;
nvkm_engctx_destroy(&ctx->base);
if (ppm->context == ctx)
ppm->context = NULL;
mutex_unlock(&nv_subdev(ppm)->mutex);
}
@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
mutex_lock(&nv_subdev(ppm)->mutex);
if (ppm->context == NULL)
ppm->context = ctx;
if (ctx != ppm->context)
ret = -EBUSY;
mutex_unlock(&nv_subdev(ppm)->mutex);
if (ctx != ppm->context)
return -EBUSY;
return 0;
return ret;
}
struct nvkm_oclass

View File

@ -1284,6 +1284,44 @@ init_zm_reg_sequence(struct nvbios_init *init)
}
}
/**
* INIT_PLL_INDIRECT - opcode 0x59
*
*/
static void
init_pll_indirect(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
u32 reg = nv_ro32(bios, init->offset + 1);
u16 addr = nv_ro16(bios, init->offset + 5);
u32 freq = (u32)nv_ro16(bios, addr) * 1000;
trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
reg, addr, freq);
init->offset += 7;
init_prog_pll(init, reg, freq);
}
/**
* INIT_ZM_REG_INDIRECT - opcode 0x5a
*
*/
static void
init_zm_reg_indirect(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
u32 reg = nv_ro32(bios, init->offset + 1);
u16 addr = nv_ro16(bios, init->offset + 5);
u32 data = nv_ro32(bios, addr);
trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
reg, addr, data);
init->offset += 7;
init_wr32(init, addr, data);
}
/**
* INIT_SUB_DIRECT - opcode 0x5b
*
@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode {
[0x56] = { init_condition_time },
[0x57] = { init_ltime },
[0x58] = { init_zm_reg_sequence },
[0x59] = { init_pll_indirect },
[0x5a] = { init_zm_reg_indirect },
[0x5b] = { init_sub_direct },
[0x5c] = { init_jump },
[0x5e] = { init_i2c_if },

View File

@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
struct gt215_clk_info *info)
{
struct gt215_clk_priv *priv = (void *)clock;
u32 oclk, sclk, sdiv, diff;
u32 oclk, sclk, sdiv;
s32 diff;
info->clk = 0;

View File

@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
nv_wr32(priv, 0x12004c, 0x4);
nv_wr32(priv, 0x122204, 0x2);
nv_rd32(priv, 0x122204);
/*
* Bug: increase clock timeout to avoid operation failure at high
* gpcclk rate.
*/
nv_wr32(priv, 0x122354, 0x800);
nv_wr32(priv, 0x128328, 0x800);
nv_wr32(priv, 0x124320, 0x800);
}
static void

View File

@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
{
struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
struct nv04_instobj_priv *node = (void *)object;
struct nvkm_subdev *subdev = (void *)priv;
mutex_lock(&subdev->mutex);
nvkm_mm_free(&priv->heap, &node->mem);
mutex_unlock(&subdev->mutex);
nvkm_instobj_destroy(&node->base);
}
@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
struct nv04_instobj_priv *node;
struct nvkm_instobj_args *args = data;
struct nvkm_subdev *subdev = (void *)priv;
int ret;
if (!args->align)
@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
mutex_lock(&subdev->mutex);
ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
args->align, &node->mem);
mutex_unlock(&subdev->mutex);
if (ret)
return ret;

View File

@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
encoder_mode = atombios_get_encoder_mode(encoder);
if (connector && (radeon_audio != 0) &&
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
(ENCODER_MODE_IS_DP(encoder_mode) &&
drm_detect_monitor_audio(radeon_connector_edid(connector)))))
ENCODER_MODE_IS_DP(encoder_mode)))
radeon_audio_mode_set(encoder, adjusted_mode);
}

View File

@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 offset;
if (!dig || !dig->afmt || !dig->afmt->pin)
if (!dig || !dig->afmt || !dig->pin)
return;
offset = dig->afmt->offset;
WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
AFMT_AUDIO_SRC_SELECT(dig->pin->id));
}
void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
struct drm_connector *connector, struct drm_display_mode *mode)
struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 tmp = 0, offset;
u32 tmp = 0;
if (!dig || !dig->afmt || !dig->afmt->pin)
if (!dig || !dig->afmt || !dig->pin)
return;
offset = dig->afmt->pin->offset;
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (connector->latency_present[1])
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
else
tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
}
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
WREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}
void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
u8 *sadb, int sad_count)
u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 offset, tmp;
u32 tmp;
if (!dig || !dig->afmt || !dig->afmt->pin)
if (!dig || !dig->afmt || !dig->pin)
return;
offset = dig->afmt->pin->offset;
/* program the speaker allocation */
tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp = RREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set HDMI mode */
tmp |= HDMI_CONNECTION;
@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
WREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
}
void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
u8 *sadb, int sad_count)
u8 *sadb, int sad_count)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 offset, tmp;
u32 tmp;
if (!dig || !dig->afmt || !dig->afmt->pin)
if (!dig || !dig->afmt || !dig->pin)
return;
offset = dig->afmt->pin->offset;
/* program the speaker allocation */
tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp = RREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set DP mode */
tmp |= DP_CONNECTION;
@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
WREG32_ENDPOINT(dig->pin->offset,
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
}
void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
struct cea_sad *sads, int sad_count)
struct cea_sad *sads, int sad_count)
{
u32 offset;
int i;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
if (!dig || !dig->afmt || !dig->afmt->pin)
if (!dig || !dig->afmt || !dig->pin)
return;
offset = dig->afmt->pin->offset;
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
u8 stereo_freqs = 0;
@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
}
}
@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
}
void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
struct radeon_crtc *crtc, unsigned int clock)
struct radeon_crtc *crtc, unsigned int clock)
{
/* Two dtos; generally use dto0 for HDMI */
u32 value = 0;
@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
}
void dce6_dp_audio_set_dto(struct radeon_device *rdev,
struct radeon_crtc *crtc, unsigned int clock)
struct radeon_crtc *crtc, unsigned int clock)
{
/* Two dtos; generally use dto1 for DP */
u32 value = 0;

View File

@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
static void radeon_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin, u8 enable_mask)
{
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
struct radeon_encoder_atom_dig *dig;
int pin_count = 0;
if (!pin)
return;
if (rdev->mode_info.mode_config_initialized) {
list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
if (radeon_encoder_is_digital(encoder)) {
radeon_encoder = to_radeon_encoder(encoder);
dig = radeon_encoder->enc_priv;
if (dig->pin == pin)
pin_count++;
}
}
if ((pin_count > 1) && (enable_mask == 0))
return;
}
if (rdev->audio.funcs->enable)
rdev->audio.funcs->enable(rdev, pin, enable_mask);
}
@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder;
struct drm_connector *connector;
struct radeon_connector *radeon_connector = NULL;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct cea_sad *sads;
int sad_count;
list_for_each_entry(connector,
&encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
break;
}
}
if (!radeon_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
if (!connector)
return;
}
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
}
BUG_ON(!sads);
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector = NULL;
u8 *sadb = NULL;
int sad_count;
list_for_each_entry(connector,
&encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
break;
}
}
if (!radeon_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
if (!connector)
return;
}
sad_count = drm_edid_to_speaker_allocation(
radeon_connector_edid(connector), &sadb);
sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
&sadb);
if (sad_count < 0) {
DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
sad_count);
@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
}
static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
struct drm_display_mode *mode)
{
struct radeon_encoder *radeon_encoder;
struct drm_connector *connector;
struct radeon_connector *radeon_connector = 0;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
list_for_each_entry(connector,
&encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
break;
}
}
if (!radeon_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
if (!connector)
return;
}
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
}
void radeon_audio_detect(struct drm_connector *connector,
struct drm_encoder *encoder,
enum drm_connector_status status)
{
struct radeon_device *rdev;
struct radeon_encoder *radeon_encoder;
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
if (!connector || !connector->encoder)
return;
rdev = connector->encoder->dev->dev_private;
if (!radeon_audio_chipset_supported(rdev))
return;
radeon_encoder = to_radeon_encoder(connector->encoder);
if (!radeon_encoder_is_digital(encoder))
return;
dig = radeon_encoder->enc_priv;
if (status == connector_status_connected) {
if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
radeon_encoder->audio = NULL;
return;
}
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
radeon_encoder->audio = rdev->audio.hdmi_funcs;
}
dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
if (!dig->pin)
dig->pin = radeon_audio_get_pin(encoder);
radeon_audio_enable(rdev, dig->pin, 0xf);
} else {
radeon_audio_enable(rdev, dig->pin, 0);
dig->pin = NULL;
}
} else {
radeon_audio_enable(rdev, dig->afmt->pin, 0);
dig->afmt->pin = NULL;
radeon_audio_enable(rdev, dig->pin, 0);
dig->pin = NULL;
}
}
@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
}
static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
struct drm_display_mode *mode)
struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector;
struct radeon_connector *radeon_connector = NULL;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
struct hdmi_avi_infoframe frame;
int err;
list_for_each_entry(connector,
&encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
break;
}
}
if (!radeon_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
return -ENOENT;
}
if (!connector)
return -EINVAL;
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
return err;
}
if (dig && dig->afmt &&
radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
if (dig && dig->afmt && radeon_encoder->audio &&
radeon_encoder->audio->set_avi_packet)
radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
buffer, sizeof(buffer));
@ -722,30 +696,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
if (!dig || !dig->afmt)
return;
radeon_audio_set_mute(encoder, true);
radeon_audio_write_speaker_allocation(encoder);
radeon_audio_write_sad_regs(encoder);
radeon_audio_write_latency_fields(encoder, mode);
radeon_audio_set_dto(encoder, mode->clock);
radeon_audio_set_vbi_packet(encoder);
radeon_hdmi_set_color_depth(encoder);
radeon_audio_update_acr(encoder, mode->clock);
radeon_audio_set_audio_packet(encoder);
radeon_audio_select_pin(encoder);
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
if (!connector)
return;
radeon_audio_set_mute(encoder, false);
if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
radeon_audio_set_mute(encoder, true);
radeon_audio_write_speaker_allocation(encoder);
radeon_audio_write_sad_regs(encoder);
radeon_audio_write_latency_fields(encoder, mode);
radeon_audio_set_dto(encoder, mode->clock);
radeon_audio_set_vbi_packet(encoder);
radeon_hdmi_set_color_depth(encoder);
radeon_audio_update_acr(encoder, mode->clock);
radeon_audio_set_audio_packet(encoder);
radeon_audio_select_pin(encoder);
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
return;
radeon_audio_set_mute(encoder, false);
} else {
radeon_hdmi_set_color_depth(encoder);
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
return;
}
}
static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode)
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@ -759,22 +744,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
if (!dig || !dig->afmt)
return;
radeon_audio_write_speaker_allocation(encoder);
radeon_audio_write_sad_regs(encoder);
radeon_audio_write_latency_fields(encoder, mode);
if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
else
radeon_audio_set_dto(encoder, dig_connector->dp_clock);
radeon_audio_set_audio_packet(encoder);
radeon_audio_select_pin(encoder);
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
if (!connector)
return;
if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
radeon_audio_write_speaker_allocation(encoder);
radeon_audio_write_sad_regs(encoder);
radeon_audio_write_latency_fields(encoder, mode);
if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
else
radeon_audio_set_dto(encoder, dig_connector->dp_clock);
radeon_audio_set_audio_packet(encoder);
radeon_audio_select_pin(encoder);
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
return;
}
}
void radeon_audio_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode)
struct drm_display_mode *mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);

Some files were not shown because too many files have changed in this diff Show More