1
0
Fork 0

This is the 5.4.69 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl91u0cACgkQONu9yGCS
 aT7KmhAAvuW3edfAfzD/F5h4vHaa9rMRmtvp2/FwefBoE4LEi3F6p2gBrUZMA3ds
 DNQ8Nheafeqd63wFkfE//TXYR0rYTxTxa0jTrhtuJCUZ4+anRyG00fEbHPOxvMnJ
 aPwQQVNOfCaUAvRbFdQ4RbuIm5chhX8Bml0ZtqvsAAFJ9XkCh1UPF0VHtSrS7PRL
 lRMBlamLgZqU72naaJaFY2nMp+pvMFPZrzkR7tpv0Z1bqxuJp6L2n/EmcHpmTOJy
 Ze+Wvt1wKk8Ep5Vql5ekXt5lEiInjacwsJZXbb5HfHO++Y+1b+ABt1kSjJx+R3/q
 2Qdztq+9Eoj0N1A4gXdVFoZHqKihhbD49k8YqX4qO5ujTzqgnNyHGSEXyIKvaU6z
 b3b12IvjbcMhM1zm3qvFfrVbbQI3kJf66zSi9NAwsZHlsvxRzslALR8I7mila4r5
 fVOyfGoZxFs44FNW9JG7I85/isAxgg0ogYraMZbk8gmhTtb1ZaN+r7kJeXuTpzOg
 UBAIDYPclMyZeny6tn1/qFuzNGYQQ0R9kxFcTC21Cf2zNLWHNfwCL1vE3Ob+ROIS
 IHcsce6IqWQKGlD8UPjkZiXTLfqCAVi51PsGTVrnidXfa1IBOuvDsVqlghPsjHSD
 30N4VB++9Gbw7LFEP4e33cOZLBLjDEdYd4VuoQFYywDZ3cy6xXo=
 =OoZD
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.69' into 5.4-2.2.x-imx

This is the 5.4.69 stable release

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
5.4-rM2-2.2.x-imx-squashed
Andrey Zhizhikin 2020-10-01 16:21:52 +00:00
commit e0de7af107
465 changed files with 5170 additions and 2471 deletions

View File

@ -14,9 +14,15 @@ Required properties:
- #gpio-cells : Must be 2. The first cell is the pin number and the - #gpio-cells : Must be 2. The first cell is the pin number and the
second cell is used to specify optional parameters (currently unused). second cell is used to specify optional parameters (currently unused).
- AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply, - power supplies for the device, as covered in
SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered Documentation/devicetree/bindings/regulator/regulator.txt, depending
in Documentation/devicetree/bindings/regulator/regulator.txt on compatible:
- for wlf,wm1811 and wlf,wm8958:
AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply,
DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply
- for wlf,wm8994:
AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply,
SPKVDD1-supply, SPKVDD2-supply
Optional properties: Optional properties:
@ -73,11 +79,11 @@ wm8994: codec@1a {
lineout1-se; lineout1-se;
AVDD1-supply = <&regulator>;
AVDD2-supply = <&regulator>; AVDD2-supply = <&regulator>;
CPVDD-supply = <&regulator>; CPVDD-supply = <&regulator>;
DBVDD1-supply = <&regulator>; DBVDD-supply = <&regulator>;
DBVDD2-supply = <&regulator>; DCVDD-supply = <&regulator>;
DBVDD3-supply = <&regulator>;
SPKVDD1-supply = <&regulator>; SPKVDD1-supply = <&regulator>;
SPKVDD2-supply = <&regulator>; SPKVDD2-supply = <&regulator>;
}; };

View File

@ -250,7 +250,7 @@ High-level taskfile hooks
:: ::
void (*qc_prep) (struct ata_queued_cmd *qc); enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc);
int (*qc_issue) (struct ata_queued_cmd *qc); int (*qc_issue) (struct ata_queued_cmd *qc);

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 68 SUBLEVEL = 69
EXTRAVERSION = EXTRAVERSION =
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus

View File

@ -204,7 +204,7 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
} }
static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
} }
@ -236,16 +236,21 @@ static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & HSR_IL; return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
} }
static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
} }
static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
} }
static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
}
static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC; return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;

View File

@ -116,6 +116,8 @@ static int save_trace(struct stackframe *frame, void *d)
return 0; return 0;
regs = (struct pt_regs *)frame->sp; regs = (struct pt_regs *)frame->sp;
if ((unsigned long)&regs[1] > ALIGN(frame->sp, THREAD_SIZE))
return 0;
trace->entries[trace->nr_entries++] = regs->ARM_pc; trace->entries[trace->nr_entries++] = regs->ARM_pc;

View File

@ -64,14 +64,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
{ {
unsigned long end = frame + 4 + sizeof(struct pt_regs);
#ifdef CONFIG_KALLSYMS #ifdef CONFIG_KALLSYMS
printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
#else #else
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif #endif
if (in_entry_text(from)) if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); dump_mem("", "Exception stack", frame + 4, end);
} }
void dump_backtrace_stm(u32 *stack, u32 instruction) void dump_backtrace_stm(u32 *stack, u32 instruction)

View File

@ -109,6 +109,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
int index) int index)
{ {
struct omap3_idle_statedata *cx = &omap3_idle_data[index]; struct omap3_idle_statedata *cx = &omap3_idle_data[index];
int error;
if (omap_irq_pending() || need_resched()) if (omap_irq_pending() || need_resched())
goto return_sleep_time; goto return_sleep_time;
@ -125,8 +126,11 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
* Call idle CPU PM enter notifier chain so that * Call idle CPU PM enter notifier chain so that
* VFP context is saved. * VFP context is saved.
*/ */
if (cx->mpu_state == PWRDM_POWER_OFF) if (cx->mpu_state == PWRDM_POWER_OFF) {
cpu_pm_enter(); error = cpu_pm_enter();
if (error)
goto out_clkdm_set;
}
/* Execute ARM wfi */ /* Execute ARM wfi */
omap_sram_idle(); omap_sram_idle();
@ -139,6 +143,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF) pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
cpu_pm_exit(); cpu_pm_exit();
out_clkdm_set:
/* Re-allow idle for C1 */ /* Re-allow idle for C1 */
if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE)
clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]); clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);

View File

@ -122,6 +122,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
{ {
struct idle_statedata *cx = state_ptr + index; struct idle_statedata *cx = state_ptr + index;
u32 mpuss_can_lose_context = 0; u32 mpuss_can_lose_context = 0;
int error;
/* /*
* CPU0 has to wait and stay ON until CPU1 is OFF state. * CPU0 has to wait and stay ON until CPU1 is OFF state.
@ -159,7 +160,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
* Call idle CPU PM enter notifier chain so that * Call idle CPU PM enter notifier chain so that
* VFP and per CPU interrupt context is saved. * VFP and per CPU interrupt context is saved.
*/ */
cpu_pm_enter(); error = cpu_pm_enter();
if (error)
goto cpu_pm_out;
if (dev->cpu == 0) { if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
@ -169,13 +172,17 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
* Call idle CPU cluster PM enter notifier chain * Call idle CPU cluster PM enter notifier chain
* to save GIC and wakeupgen context. * to save GIC and wakeupgen context.
*/ */
if (mpuss_can_lose_context) if (mpuss_can_lose_context) {
cpu_cluster_pm_enter(); error = cpu_cluster_pm_enter();
if (error)
goto cpu_cluster_pm_out;
}
} }
omap4_enter_lowpower(dev->cpu, cx->cpu_state); omap4_enter_lowpower(dev->cpu, cx->cpu_state);
cpu_done[dev->cpu] = true; cpu_done[dev->cpu] = true;
cpu_cluster_pm_out:
/* Wakeup CPU1 only if it is not offlined */ /* Wakeup CPU1 only if it is not offlined */
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
@ -197,12 +204,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
} }
} }
/*
* Call idle CPU PM exit notifier chain to restore
* VFP and per CPU IRQ context.
*/
cpu_pm_exit();
/* /*
* Call idle CPU cluster PM exit notifier chain * Call idle CPU cluster PM exit notifier chain
* to restore GIC and wakeupgen context. * to restore GIC and wakeupgen context.
@ -210,6 +211,13 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (dev->cpu == 0 && mpuss_can_lose_context) if (dev->cpu == 0 && mpuss_can_lose_context)
cpu_cluster_pm_exit(); cpu_cluster_pm_exit();
/*
* Call idle CPU PM exit notifier chain to restore
* VFP and per CPU IRQ context.
*/
cpu_pm_exit();
cpu_pm_out:
tick_broadcast_exit(); tick_broadcast_exit();
fail: fail:

View File

@ -194,6 +194,7 @@ void omap_sram_idle(void)
int per_next_state = PWRDM_POWER_ON; int per_next_state = PWRDM_POWER_ON;
int core_next_state = PWRDM_POWER_ON; int core_next_state = PWRDM_POWER_ON;
u32 sdrc_pwr = 0; u32 sdrc_pwr = 0;
int error;
mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
switch (mpu_next_state) { switch (mpu_next_state) {
@ -222,8 +223,11 @@ void omap_sram_idle(void)
pwrdm_pre_transition(NULL); pwrdm_pre_transition(NULL);
/* PER */ /* PER */
if (per_next_state == PWRDM_POWER_OFF) if (per_next_state == PWRDM_POWER_OFF) {
cpu_cluster_pm_enter(); error = cpu_cluster_pm_enter();
if (error)
return;
}
/* CORE */ /* CORE */
if (core_next_state < PWRDM_POWER_ON) { if (core_next_state < PWRDM_POWER_ON) {

View File

@ -299,7 +299,7 @@ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
} }
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
{ {
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
} }
@ -307,7 +307,7 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{ {
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */
} }
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
@ -336,6 +336,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
} }
static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
}
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
@ -373,6 +378,9 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{ {
if (kvm_vcpu_abt_iss1tw(vcpu))
return true;
if (kvm_vcpu_trap_is_iabt(vcpu)) if (kvm_vcpu_trap_is_iabt(vcpu))
return false; return false;

View File

@ -15,6 +15,7 @@ extern char __hyp_text_start[], __hyp_text_end[];
extern char __idmap_text_start[], __idmap_text_end[]; extern char __idmap_text_start[], __idmap_text_end[];
extern char __initdata_begin[], __initdata_end[]; extern char __initdata_begin[], __initdata_end[];
extern char __inittext_begin[], __inittext_end[]; extern char __inittext_begin[], __inittext_end[];
extern char __exittext_begin[], __exittext_end[];
extern char __irqentry_text_start[], __irqentry_text_end[]; extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __mmuoff_data_start[], __mmuoff_data_end[]; extern char __mmuoff_data_start[], __mmuoff_data_end[];
extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; extern char __entry_tramp_text_start[], __entry_tramp_text_end[];

View File

@ -19,6 +19,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/irq_work.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/of_fdt.h> #include <linux/of_fdt.h>
#include <linux/smp.h> #include <linux/smp.h>
@ -269,6 +270,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
int apei_claim_sea(struct pt_regs *regs) int apei_claim_sea(struct pt_regs *regs)
{ {
int err = -ENOENT; int err = -ENOENT;
bool return_to_irqs_enabled;
unsigned long current_flags; unsigned long current_flags;
if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
@ -276,6 +278,12 @@ int apei_claim_sea(struct pt_regs *regs)
current_flags = local_daif_save_flags(); current_flags = local_daif_save_flags();
/* current_flags isn't useful here as daif doesn't tell us about pNMI */
return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags());
if (regs)
return_to_irqs_enabled = interrupts_enabled(regs);
/* /*
* SEA can interrupt SError, mask it and describe this as an NMI so * SEA can interrupt SError, mask it and describe this as an NMI so
* that APEI defers the handling. * that APEI defers the handling.
@ -284,6 +292,23 @@ int apei_claim_sea(struct pt_regs *regs)
nmi_enter(); nmi_enter();
err = ghes_notify_sea(); err = ghes_notify_sea();
nmi_exit(); nmi_exit();
/*
* APEI NMI-like notifications are deferred to irq_work. Unless
* we interrupted irqs-masked code, we can do that now.
*/
if (!err) {
if (return_to_irqs_enabled) {
local_daif_restore(DAIF_PROCCTX_NOIRQ);
__irq_enter();
irq_work_run();
__irq_exit();
} else {
pr_warn_ratelimited("APEI work queued but not completed");
err = -EINPROGRESS;
}
}
local_daif_restore(current_flags); local_daif_restore(current_flags);
return err; return err;

View File

@ -160,11 +160,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
/* Linux doesn't care about the EL3 */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
ARM64_FTR_END, ARM64_FTR_END,
}; };
@ -320,7 +319,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = {
}; };
static const struct arm64_ftr_bits ftr_id_dfr0[] = { static const struct arm64_ftr_bits ftr_id_dfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), /* [31:28] TraceFilt */
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
@ -737,9 +736,6 @@ void update_cpu_features(int cpu,
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
/*
* EL3 is not our concern.
*/
taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,

View File

@ -21,6 +21,7 @@
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/insn.h> #include <asm/insn.h>
#include <asm/kprobes.h> #include <asm/kprobes.h>
#include <asm/sections.h>
#define AARCH64_INSN_SF_BIT BIT(31) #define AARCH64_INSN_SF_BIT BIT(31)
#define AARCH64_INSN_N_BIT BIT(22) #define AARCH64_INSN_N_BIT BIT(22)
@ -78,16 +79,29 @@ bool aarch64_insn_is_branch_imm(u32 insn)
static DEFINE_RAW_SPINLOCK(patch_lock); static DEFINE_RAW_SPINLOCK(patch_lock);
static bool is_exit_text(unsigned long addr)
{
/* discarded with init text/data */
return system_state < SYSTEM_RUNNING &&
addr >= (unsigned long)__exittext_begin &&
addr < (unsigned long)__exittext_end;
}
static bool is_image_text(unsigned long addr)
{
return core_kernel_text(addr) || is_exit_text(addr);
}
static void __kprobes *patch_map(void *addr, int fixmap) static void __kprobes *patch_map(void *addr, int fixmap)
{ {
unsigned long uintaddr = (uintptr_t) addr; unsigned long uintaddr = (uintptr_t) addr;
bool module = !core_kernel_text(uintaddr); bool image = is_image_text(uintaddr);
struct page *page; struct page *page;
if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) if (image)
page = vmalloc_to_page(addr);
else if (!module)
page = phys_to_page(__pa_symbol(addr)); page = phys_to_page(__pa_symbol(addr));
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
page = vmalloc_to_page(addr);
else else
return addr; return addr;

View File

@ -170,9 +170,12 @@ SECTIONS
__inittext_begin = .; __inittext_begin = .;
INIT_TEXT_SECTION(8) INIT_TEXT_SECTION(8)
__exittext_begin = .;
.exit.text : { .exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT) ARM_EXIT_KEEP(EXIT_TEXT)
} }
__exittext_end = .;
. = ALIGN(4); . = ALIGN(4);
.altinstructions : { .altinstructions : {

View File

@ -496,7 +496,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
kvm_vcpu_dabt_isvalid(vcpu) && kvm_vcpu_dabt_isvalid(vcpu) &&
!kvm_vcpu_dabt_isextabt(vcpu) && !kvm_vcpu_dabt_isextabt(vcpu) &&
!kvm_vcpu_dabt_iss1tw(vcpu); !kvm_vcpu_abt_iss1tw(vcpu);
if (valid) { if (valid) {
int ret = __vgic_v2_perform_cpuif_access(vcpu); int ret = __vgic_v2_perform_cpuif_access(vcpu);

View File

@ -654,11 +654,13 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
inf = esr_to_fault_info(esr); inf = esr_to_fault_info(esr);
/* if (user_mode(regs) && apei_claim_sea(regs) == 0) {
* Return value ignored as we rely on signal merging. /*
* Future patches will make this more robust. * APEI claimed this as a firmware-first notification.
*/ * Some processing deferred to task_work before ret_to_user().
apei_claim_sea(regs); */
return 0;
}
if (esr & ESR_ELx_FnV) if (esr & ESR_ELx_FnV)
siaddr = NULL; siaddr = NULL;

View File

@ -264,6 +264,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll)
{ {
int tmp = Q40_RTC_CTRL; int tmp = Q40_RTC_CTRL;
pll->pll_ctrl = 0;
pll->pll_value = tmp & Q40_RTC_PLL_MASK; pll->pll_value = tmp & Q40_RTC_PLL_MASK;
if (tmp & Q40_RTC_PLL_SIGN) if (tmp & Q40_RTC_PLL_SIGN)
pll->pll_value = -pll->pll_value; pll->pll_value = -pll->pll_value;

View File

@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_34K: case CPU_34K:
case CPU_1004K: case CPU_1004K:
case CPU_74K: case CPU_74K:
case CPU_1074K:
case CPU_M14KC: case CPU_M14KC:
case CPU_M14KEC: case CPU_M14KEC:
case CPU_INTERAPTIV: case CPU_INTERAPTIV:

View File

@ -150,4 +150,7 @@
#define KVM_INST_FETCH_FAILED -1 #define KVM_INST_FETCH_FAILED -1
/* Extract PO and XOP opcode fields */
#define PO_XOP_OPCODE_MASK 0xfc0007fe
#endif /* __POWERPC_KVM_ASM_H__ */ #endif /* __POWERPC_KVM_ASM_H__ */

View File

@ -19,6 +19,7 @@ CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector) CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING
ifdef CONFIG_FUNCTION_TRACER ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code # Do not trace early boot code
@ -36,7 +37,6 @@ KASAN_SANITIZE_btext.o := n
ifdef CONFIG_KASAN ifdef CONFIG_KASAN
CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING
endif endif

View File

@ -503,7 +503,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
rc = 1; rc = 1;
if (pe->state & EEH_PE_ISOLATED) { if (pe->state & EEH_PE_ISOLATED) {
pe->check_count++; pe->check_count++;
if (pe->check_count % EEH_MAX_FAILS == 0) { if (pe->check_count == EEH_MAX_FAILS) {
dn = pci_device_to_OF_node(dev); dn = pci_device_to_OF_node(dev);
if (dn) if (dn)
location = of_get_property(dn, "ibm,loc-code", location = of_get_property(dn, "ibm,loc-code",

View File

@ -510,11 +510,11 @@ out:
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
BUG_ON(get_paca()->in_nmi == 0); BUG_ON(get_paca()->in_nmi == 0);
if (get_paca()->in_nmi > 1) if (get_paca()->in_nmi > 1)
nmi_panic(regs, "Unrecoverable nested System Reset"); die("Unrecoverable nested System Reset", regs, SIGABRT);
#endif #endif
/* Must die if the interrupt is not recoverable */ /* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI)) if (!(regs->msr & MSR_RI))
nmi_panic(regs, "Unrecoverable System Reset"); die("Unrecoverable System Reset", regs, SIGABRT);
if (saved_hsrrs) { if (saved_hsrrs) {
mtspr(SPRN_HSRR0, hsrr0); mtspr(SPRN_HSRR0, hsrr0);
@ -858,7 +858,7 @@ void machine_check_exception(struct pt_regs *regs)
/* Must die if the interrupt is not recoverable */ /* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI)) if (!(regs->msr & MSR_RI))
nmi_panic(regs, "Unrecoverable Machine check"); die("Unrecoverable Machine check", regs, SIGBUS);
return; return;

View File

@ -1104,6 +1104,11 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
kvm->arch.lpid); kvm->arch.lpid);
gpa += PAGE_SIZE; gpa += PAGE_SIZE;
} }
/*
* Increase the mmu notifier sequence number to prevent any page
* fault that read the memslot earlier from writing a PTE.
*/
kvm->mmu_notifier_seq++;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }

View File

@ -3,6 +3,8 @@
* Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
@ -44,7 +46,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
u64 newmsr, bescr; u64 newmsr, bescr;
int ra, rs; int ra, rs;
switch (instr & 0xfc0007ff) { /*
* rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
* in these instructions, so masking bit 31 out doesn't change these
* instructions. For treclaim., tsr., and trechkpt. instructions if bit
* 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
* 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
* 31 is an acceptable way to handle these invalid forms that have
* bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
* bit 31 set) can generate a softpatch interrupt. Hence both forms
* are handled below for these instructions so they behave the same way.
*/
switch (instr & PO_XOP_OPCODE_MASK) {
case PPC_INST_RFID: case PPC_INST_RFID:
/* XXX do we need to check for PR=0 here? */ /* XXX do we need to check for PR=0 here? */
newmsr = vcpu->arch.shregs.srr1; newmsr = vcpu->arch.shregs.srr1;
@ -105,7 +118,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr = newmsr; vcpu->arch.shregs.msr = newmsr;
return RESUME_GUEST; return RESUME_GUEST;
case PPC_INST_TSR: /* ignore bit 31, see comment above */
case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
/* check for PR=1 and arch 2.06 bit set in PCR */ /* check for PR=1 and arch 2.06 bit set in PCR */
if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
/* generate an illegal instruction interrupt */ /* generate an illegal instruction interrupt */
@ -140,7 +154,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr = msr; vcpu->arch.shregs.msr = msr;
return RESUME_GUEST; return RESUME_GUEST;
case PPC_INST_TRECLAIM: /* ignore bit 31, see comment above */
case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
/* check for TM disabled in the HFSCR or MSR */ /* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) { if (!(vcpu->arch.hfscr & HFSCR_TM)) {
/* generate an illegal instruction interrupt */ /* generate an illegal instruction interrupt */
@ -176,7 +191,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr &= ~MSR_TS_MASK; vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
return RESUME_GUEST; return RESUME_GUEST;
case PPC_INST_TRECHKPT: /* ignore bit 31, see comment above */
case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
/* XXX do we need to check for PR=0 here? */ /* XXX do we need to check for PR=0 here? */
/* check for TM disabled in the HFSCR or MSR */ /* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) { if (!(vcpu->arch.hfscr & HFSCR_TM)) {
@ -208,6 +224,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
} }
/* What should we do here? We didn't recognize the instruction */ /* What should we do here? We didn't recognize the instruction */
WARN_ON_ONCE(1); kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
return RESUME_GUEST; return RESUME_GUEST;
} }

View File

@ -23,7 +23,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
u64 newmsr, msr, bescr; u64 newmsr, msr, bescr;
int rs; int rs;
switch (instr & 0xfc0007ff) { /*
* rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
* in these instructions, so masking bit 31 out doesn't change these
* instructions. For the tsr. instruction if bit 31 = 0 then it is per
* ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid
* Forms, informs specifically that ignoring bit 31 is an acceptable way
* to handle TM-related invalid forms that have bit 31 = 0. Moreover,
* for emulation purposes both forms (w/ and wo/ bit 31 set) can
* generate a softpatch interrupt. Hence both forms are handled below
* for tsr. to make them behave the same way.
*/
switch (instr & PO_XOP_OPCODE_MASK) {
case PPC_INST_RFID: case PPC_INST_RFID:
/* XXX do we need to check for PR=0 here? */ /* XXX do we need to check for PR=0 here? */
newmsr = vcpu->arch.shregs.srr1; newmsr = vcpu->arch.shregs.srr1;
@ -73,7 +84,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr = newmsr; vcpu->arch.shregs.msr = newmsr;
return 1; return 1;
case PPC_INST_TSR: /* ignore bit 31, see comment above */
case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
/* we know the MSR has the TS field = S (0b01) here */ /* we know the MSR has the TS field = S (0b01) here */
msr = vcpu->arch.shregs.msr; msr = vcpu->arch.shregs.msr;
/* check for PR=1 and arch 2.06 bit set in PCR */ /* check for PR=1 and arch 2.06 bit set in PCR */

View File

@ -121,24 +121,6 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
goto free_exit; goto free_exit;
} }
pageshift = PAGE_SHIFT;
for (i = 0; i < entries; ++i) {
struct page *page = mem->hpages[i];
/*
* Allow to use larger than 64k IOMMU pages. Only do that
* if we are backed by hugetlb.
*/
if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
pageshift = page_shift(compound_head(page));
mem->pageshift = min(mem->pageshift, pageshift);
/*
* We don't need struct page reference any more, switch
* to physical address.
*/
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
}
good_exit: good_exit:
atomic64_set(&mem->mapped, 1); atomic64_set(&mem->mapped, 1);
mem->used = 1; mem->used = 1;
@ -158,6 +140,27 @@ good_exit:
} }
} }
if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
/*
* Allow to use larger than 64k IOMMU pages. Only do that
* if we are backed by hugetlb. Skip device memory as it is not
* backed with page structs.
*/
pageshift = PAGE_SHIFT;
for (i = 0; i < entries; ++i) {
struct page *page = mem->hpages[i];
if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
pageshift = page_shift(compound_head(page));
mem->pageshift = min(mem->pageshift, pageshift);
/*
* We don't need struct page reference any more, switch
* to physical address.
*/
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
}
}
list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
mutex_unlock(&mem_list_mutex); mutex_unlock(&mem_list_mutex);

View File

@ -44,6 +44,16 @@ static DEFINE_PER_CPU(u64 *, trace_imc_mem);
static struct imc_pmu_ref *trace_imc_refc; static struct imc_pmu_ref *trace_imc_refc;
static int trace_imc_mem_size; static int trace_imc_mem_size;
/*
* Global data structure used to avoid races between thread,
* core and trace-imc
*/
static struct imc_pmu_ref imc_global_refc = {
.lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
.id = 0,
.refc = 0,
};
static struct imc_pmu *imc_event_to_pmu(struct perf_event *event) static struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
{ {
return container_of(event->pmu, struct imc_pmu, pmu); return container_of(event->pmu, struct imc_pmu, pmu);
@ -698,6 +708,16 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
return -EINVAL; return -EINVAL;
ref->refc = 0; ref->refc = 0;
/*
* Reduce the global reference count, if this is the
* last cpu in this core and core-imc event running
* in this cpu.
*/
mutex_lock(&imc_global_refc.lock);
if (imc_global_refc.id == IMC_DOMAIN_CORE)
imc_global_refc.refc--;
mutex_unlock(&imc_global_refc.lock);
} }
return 0; return 0;
} }
@ -710,6 +730,23 @@ static int core_imc_pmu_cpumask_init(void)
ppc_core_imc_cpu_offline); ppc_core_imc_cpu_offline);
} }
static void reset_global_refc(struct perf_event *event)
{
mutex_lock(&imc_global_refc.lock);
imc_global_refc.refc--;
/*
* If no other thread is running any
* event for this domain(thread/core/trace),
* set the global id to zero.
*/
if (imc_global_refc.refc <= 0) {
imc_global_refc.refc = 0;
imc_global_refc.id = 0;
}
mutex_unlock(&imc_global_refc.lock);
}
static void core_imc_counters_release(struct perf_event *event) static void core_imc_counters_release(struct perf_event *event)
{ {
int rc, core_id; int rc, core_id;
@ -759,6 +796,8 @@ static void core_imc_counters_release(struct perf_event *event)
ref->refc = 0; ref->refc = 0;
} }
mutex_unlock(&ref->lock); mutex_unlock(&ref->lock);
reset_global_refc(event);
} }
static int core_imc_event_init(struct perf_event *event) static int core_imc_event_init(struct perf_event *event)
@ -819,6 +858,29 @@ static int core_imc_event_init(struct perf_event *event)
++ref->refc; ++ref->refc;
mutex_unlock(&ref->lock); mutex_unlock(&ref->lock);
/*
* Since the system can run either in accumulation or trace-mode
* of IMC at a time, core-imc events are allowed only if no other
* trace/thread imc events are enabled/monitored.
*
* Take the global lock, and check the refc.id
* to know whether any other trace/thread imc
* events are running.
*/
mutex_lock(&imc_global_refc.lock);
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
/*
* No other trace/thread imc events are running in
* the system, so set the refc.id to core-imc.
*/
imc_global_refc.id = IMC_DOMAIN_CORE;
imc_global_refc.refc++;
} else {
mutex_unlock(&imc_global_refc.lock);
return -EBUSY;
}
mutex_unlock(&imc_global_refc.lock);
event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
event->destroy = core_imc_counters_release; event->destroy = core_imc_counters_release;
return 0; return 0;
@ -877,7 +939,23 @@ static int ppc_thread_imc_cpu_online(unsigned int cpu)
static int ppc_thread_imc_cpu_offline(unsigned int cpu) static int ppc_thread_imc_cpu_offline(unsigned int cpu)
{ {
mtspr(SPRN_LDBAR, 0); /*
* Set the bit 0 of LDBAR to zero.
*
* If bit 0 of LDBAR is unset, it will stop posting
* the counter data to memory.
* For thread-imc, bit 0 of LDBAR will be set to 1 in the
* event_add function. So reset this bit here, to stop the updates
* to memory in the cpu_offline path.
*/
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
/* Reduce the refc if thread-imc event running on this cpu */
mutex_lock(&imc_global_refc.lock);
if (imc_global_refc.id == IMC_DOMAIN_THREAD)
imc_global_refc.refc--;
mutex_unlock(&imc_global_refc.lock);
return 0; return 0;
} }
@ -916,7 +994,22 @@ static int thread_imc_event_init(struct perf_event *event)
if (!target) if (!target)
return -EINVAL; return -EINVAL;
mutex_lock(&imc_global_refc.lock);
/*
* Check if any other trace/core imc events are running in the
* system, if not set the global id to thread-imc.
*/
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_THREAD) {
imc_global_refc.id = IMC_DOMAIN_THREAD;
imc_global_refc.refc++;
} else {
mutex_unlock(&imc_global_refc.lock);
return -EBUSY;
}
mutex_unlock(&imc_global_refc.lock);
event->pmu->task_ctx_nr = perf_sw_context; event->pmu->task_ctx_nr = perf_sw_context;
event->destroy = reset_global_refc;
return 0; return 0;
} }
@ -1063,10 +1156,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
int core_id; int core_id;
struct imc_pmu_ref *ref; struct imc_pmu_ref *ref;
mtspr(SPRN_LDBAR, 0);
core_id = smp_processor_id() / threads_per_core; core_id = smp_processor_id() / threads_per_core;
ref = &core_imc_refc[core_id]; ref = &core_imc_refc[core_id];
if (!ref) {
pr_debug("imc: Failed to get event reference count\n");
return;
}
mutex_lock(&ref->lock); mutex_lock(&ref->lock);
ref->refc--; ref->refc--;
@ -1082,6 +1177,10 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
ref->refc = 0; ref->refc = 0;
} }
mutex_unlock(&ref->lock); mutex_unlock(&ref->lock);
/* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
/* /*
* Take a snapshot and calculate the delta and update * Take a snapshot and calculate the delta and update
* the event counter values. * the event counter values.
@ -1133,7 +1232,18 @@ static int ppc_trace_imc_cpu_online(unsigned int cpu)
static int ppc_trace_imc_cpu_offline(unsigned int cpu) static int ppc_trace_imc_cpu_offline(unsigned int cpu)
{ {
mtspr(SPRN_LDBAR, 0); /*
* No need to set bit 0 of LDBAR to zero, as
* it is set to zero for imc trace-mode
*
* Reduce the refc if any trace-imc event running
* on this cpu.
*/
mutex_lock(&imc_global_refc.lock);
if (imc_global_refc.id == IMC_DOMAIN_TRACE)
imc_global_refc.refc--;
mutex_unlock(&imc_global_refc.lock);
return 0; return 0;
} }
@ -1226,15 +1336,14 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
local_mem = get_trace_imc_event_base_addr(); local_mem = get_trace_imc_event_base_addr();
ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE; ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE;
if (core_imc_refc) /* trace-imc reference count */
ref = &core_imc_refc[core_id]; if (trace_imc_refc)
ref = &trace_imc_refc[core_id];
if (!ref) { if (!ref) {
/* If core-imc is not enabled, use trace-imc reference count */ pr_debug("imc: Failed to get the event reference count\n");
if (trace_imc_refc) return -EINVAL;
ref = &trace_imc_refc[core_id];
if (!ref)
return -EINVAL;
} }
mtspr(SPRN_LDBAR, ldbar_value); mtspr(SPRN_LDBAR, ldbar_value);
mutex_lock(&ref->lock); mutex_lock(&ref->lock);
if (ref->refc == 0) { if (ref->refc == 0) {
@ -1242,13 +1351,11 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
get_hard_smp_processor_id(smp_processor_id()))) { get_hard_smp_processor_id(smp_processor_id()))) {
mutex_unlock(&ref->lock); mutex_unlock(&ref->lock);
pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
mtspr(SPRN_LDBAR, 0);
return -EINVAL; return -EINVAL;
} }
} }
++ref->refc; ++ref->refc;
mutex_unlock(&ref->lock); mutex_unlock(&ref->lock);
return 0; return 0;
} }
@ -1274,16 +1381,13 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
int core_id = smp_processor_id() / threads_per_core; int core_id = smp_processor_id() / threads_per_core;
struct imc_pmu_ref *ref = NULL; struct imc_pmu_ref *ref = NULL;
if (core_imc_refc) if (trace_imc_refc)
ref = &core_imc_refc[core_id]; ref = &trace_imc_refc[core_id];
if (!ref) { if (!ref) {
/* If core-imc is not enabled, use trace-imc reference count */ pr_debug("imc: Failed to get event reference count\n");
if (trace_imc_refc) return;
ref = &trace_imc_refc[core_id];
if (!ref)
return;
} }
mtspr(SPRN_LDBAR, 0);
mutex_lock(&ref->lock); mutex_lock(&ref->lock);
ref->refc--; ref->refc--;
if (ref->refc == 0) { if (ref->refc == 0) {
@ -1297,6 +1401,7 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
ref->refc = 0; ref->refc = 0;
} }
mutex_unlock(&ref->lock); mutex_unlock(&ref->lock);
trace_imc_event_stop(event, flags); trace_imc_event_stop(event, flags);
} }
@ -1314,10 +1419,30 @@ static int trace_imc_event_init(struct perf_event *event)
if (event->attr.sample_period == 0) if (event->attr.sample_period == 0)
return -ENOENT; return -ENOENT;
/*
* Take the global lock, and make sure
* no other thread is running any core/thread imc
* events
*/
mutex_lock(&imc_global_refc.lock);
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
/*
* No core/thread imc events are running in the
* system, so set the refc.id to trace-imc.
*/
imc_global_refc.id = IMC_DOMAIN_TRACE;
imc_global_refc.refc++;
} else {
mutex_unlock(&imc_global_refc.lock);
return -EBUSY;
}
mutex_unlock(&imc_global_refc.lock);
event->hw.idx = -1; event->hw.idx = -1;
target = event->hw.target; target = event->hw.target;
event->pmu->task_ctx_nr = perf_hw_context; event->pmu->task_ctx_nr = perf_hw_context;
event->destroy = reset_global_refc;
return 0; return 0;
} }
@ -1429,10 +1554,10 @@ static void cleanup_all_core_imc_memory(void)
static void thread_imc_ldbar_disable(void *dummy) static void thread_imc_ldbar_disable(void *dummy)
{ {
/* /*
* By Zeroing LDBAR, we disable thread-imc * By setting 0th bit of LDBAR to zero, we disable thread-imc
* updates. * updates to memory.
*/ */
mtspr(SPRN_LDBAR, 0); mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
} }
void thread_imc_disable(void) void thread_imc_disable(void)

View File

@ -63,4 +63,11 @@ do { \
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here. * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
*/ */
#define MCOUNT_INSN_SIZE 8 #define MCOUNT_INSN_SIZE 8
#ifndef __ASSEMBLY__
struct dyn_ftrace;
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop
#endif
#endif #endif

View File

@ -88,6 +88,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return __ftrace_modify_call(rec->ip, addr, false); return __ftrace_modify_call(rec->ip, addr, false);
} }
/*
* This is called early on, and isn't wrapped by
* ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
* text_mutex, which triggers a lockdep failure. SMP isn't running so we could
* just directly poke the text, but it's simpler to just take the lock
* ourselves.
*/
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
int out;
ftrace_arch_code_modify_prepare();
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
ftrace_arch_code_modify_post_process();
return out;
}
int ftrace_update_ftrace_func(ftrace_func_t func) int ftrace_update_ftrace_func(ftrace_func_t func)
{ {
int ret = __ftrace_modify_call((unsigned long)&ftrace_call, int ret = __ftrace_modify_call((unsigned long)&ftrace_call,

View File

@ -1247,26 +1247,46 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address) #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
#define pgd_offset_k(address) pgd_offset(&init_mm, address) #define pgd_offset_k(address) pgd_offset(&init_mm, address)
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
{ {
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
return (p4d_t *) pgd_deref(*pgd) + p4d_index(address); return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
return (p4d_t *) pgd; return (p4d_t *) pgdp;
}
#define p4d_offset_lockless p4d_offset_lockless
static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
{
return p4d_offset_lockless(pgdp, *pgdp, address);
} }
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
{ {
if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
return (pud_t *) p4d_deref(*p4d) + pud_index(address); return (pud_t *) p4d_deref(p4d) + pud_index(address);
return (pud_t *) p4d; return (pud_t *) p4dp;
} }
#define pud_offset_lockless pud_offset_lockless
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
{ {
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) return pud_offset_lockless(p4dp, *p4dp, address);
return (pmd_t *) pud_deref(*pud) + pmd_index(address);
return (pmd_t *) pud;
} }
#define pud_offset pud_offset
static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
{
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
return (pmd_t *) pud_deref(pud) + pmd_index(address);
return (pmd_t *) pudp;
}
#define pmd_offset_lockless pmd_offset_lockless
static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
{
return pmd_offset_lockless(pudp, *pudp, address);
}
#define pmd_offset pmd_offset
static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address) static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
{ {

View File

@ -111,4 +111,15 @@ struct stack_frame {
r2; \ r2; \
}) })
#define CALL_ON_STACK_NORETURN(fn, stack) \
({ \
asm volatile( \
" la 15,0(%[_stack])\n" \
" xc %[_bc](8,15),%[_bc](15)\n" \
" brasl 14,%[_fn]\n" \
::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
[_stack] "a" (stack), [_fn] "X" (fn)); \
BUG(); \
})
#endif /* _ASM_S390_STACKTRACE_H */ #endif /* _ASM_S390_STACKTRACE_H */

View File

@ -294,11 +294,6 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static struct irqaction external_interrupt = {
.name = "EXT",
.handler = do_ext_interrupt,
};
void __init init_ext_interrupts(void) void __init init_ext_interrupts(void)
{ {
int idx; int idx;
@ -308,7 +303,8 @@ void __init init_ext_interrupts(void)
irq_set_chip_and_handler(EXT_INTERRUPT, irq_set_chip_and_handler(EXT_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq); &dummy_irq_chip, handle_percpu_irq);
setup_irq(EXT_INTERRUPT, &external_interrupt); if (request_irq(EXT_INTERRUPT, do_ext_interrupt, 0, "EXT", NULL))
panic("Failed to register EXT interrupt\n");
} }
static DEFINE_SPINLOCK(irq_subclass_lock); static DEFINE_SPINLOCK(irq_subclass_lock);

View File

@ -1429,8 +1429,8 @@ static int aux_output_begin(struct perf_output_handle *handle,
idx = aux->empty_mark + 1; idx = aux->empty_mark + 1;
for (i = 0; i < range_scan; i++, idx++) { for (i = 0; i < range_scan; i++, idx++) {
te = aux_sdb_trailer(aux, idx); te = aux_sdb_trailer(aux, idx);
te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; te->flags &= ~(SDB_TE_BUFFER_FULL_MASK |
te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK; SDB_TE_ALERT_REQ_MASK);
te->overflow = 0; te->overflow = 0;
} }
/* Save the position of empty SDBs */ /* Save the position of empty SDBs */
@ -1477,8 +1477,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
te = aux_sdb_trailer(aux, alert_index); te = aux_sdb_trailer(aux, alert_index);
do { do {
orig_flags = te->flags; orig_flags = te->flags;
orig_overflow = te->overflow; *overflow = orig_overflow = te->overflow;
*overflow = orig_overflow;
if (orig_flags & SDB_TE_BUFFER_FULL_MASK) { if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
/* /*
* SDB is already set by hardware. * SDB is already set by hardware.
@ -1712,7 +1711,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
} }
/* Allocate aux_buffer struct for the event */ /* Allocate aux_buffer struct for the event */
aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL); aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL);
if (!aux) if (!aux)
goto no_aux; goto no_aux;
sfb = &aux->sfb; sfb = &aux->sfb;

View File

@ -356,7 +356,6 @@ early_initcall(async_stack_realloc);
void __init arch_call_rest_init(void) void __init arch_call_rest_init(void)
{ {
struct stack_frame *frame;
unsigned long stack; unsigned long stack;
stack = stack_alloc(); stack = stack_alloc();
@ -369,13 +368,7 @@ void __init arch_call_rest_init(void)
set_task_stack_end_magic(current); set_task_stack_end_magic(current);
stack += STACK_INIT_OFFSET; stack += STACK_INIT_OFFSET;
S390_lowcore.kernel_stack = stack; S390_lowcore.kernel_stack = stack;
frame = (struct stack_frame *) stack; CALL_ON_STACK_NORETURN(rest_init, stack);
memset(frame, 0, sizeof(*frame));
/* Branch to rest_init on the new stack, never returns */
asm volatile(
" la 15,0(%[_frame])\n"
" jg rest_init\n"
: : [_frame] "a" (frame));
} }
static void __init setup_lowcore_dat_off(void) static void __init setup_lowcore_dat_off(void)
@ -634,7 +627,7 @@ static struct notifier_block kdump_mem_nb = {
/* /*
* Make sure that the area behind memory_end is protected * Make sure that the area behind memory_end is protected
*/ */
static void reserve_memory_end(void) static void __init reserve_memory_end(void)
{ {
if (memory_end_set) if (memory_end_set)
memblock_reserve(memory_end, ULONG_MAX); memblock_reserve(memory_end, ULONG_MAX);
@ -643,7 +636,7 @@ static void reserve_memory_end(void)
/* /*
* Make sure that oldmem, where the dump is stored, is protected * Make sure that oldmem, where the dump is stored, is protected
*/ */
static void reserve_oldmem(void) static void __init reserve_oldmem(void)
{ {
#ifdef CONFIG_CRASH_DUMP #ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE) if (OLDMEM_BASE)
@ -655,7 +648,7 @@ static void reserve_oldmem(void)
/* /*
* Make sure that oldmem, where the dump is stored, is protected * Make sure that oldmem, where the dump is stored, is protected
*/ */
static void remove_oldmem(void) static void __init remove_oldmem(void)
{ {
#ifdef CONFIG_CRASH_DUMP #ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE) if (OLDMEM_BASE)

View File

@ -878,7 +878,7 @@ static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
S390_lowcore.restart_source = -1UL; S390_lowcore.restart_source = -1UL;
__ctl_load(S390_lowcore.cregs_save_area, 0, 15); __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0); CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack);
} }
/* Upping and downing of CPUs */ /* Upping and downing of CPUs */

View File

@ -10,4 +10,10 @@ int crash_setup_memmap_entries(struct kimage *image,
struct boot_params *params); struct boot_params *params);
void crash_smp_send_stop(void); void crash_smp_send_stop(void);
#ifdef CONFIG_KEXEC_CORE
void __init crash_reserve_low_1M(void);
#else
static inline void __init crash_reserve_low_1M(void) { }
#endif
#endif /* _ASM_X86_CRASH_H */ #endif /* _ASM_X86_CRASH_H */

View File

@ -320,7 +320,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
* combination with microcode which triggers a CPU buffer flush when the * combination with microcode which triggers a CPU buffer flush when the
* instruction is executed. * instruction is executed.
*/ */
static inline void mds_clear_cpu_buffers(void) static __always_inline void mds_clear_cpu_buffers(void)
{ {
static const u16 ds = __KERNEL_DS; static const u16 ds = __KERNEL_DS;
@ -341,7 +341,7 @@ static inline void mds_clear_cpu_buffers(void)
* *
* Clear CPU buffers if the corresponding static key is enabled * Clear CPU buffers if the corresponding static key is enabled
*/ */
static inline void mds_user_clear_cpu_buffers(void) static __always_inline void mds_user_clear_cpu_buffers(void)
{ {
if (static_branch_likely(&mds_user_clear)) if (static_branch_likely(&mds_user_clear))
mds_clear_cpu_buffers(); mds_clear_cpu_buffers();

View File

@ -4,6 +4,11 @@
#define ARCH_DEFAULT_PKEY 0 #define ARCH_DEFAULT_PKEY 0
/*
* If more than 16 keys are ever supported, a thorough audit
* will be necessary to ensure that the types that store key
* numbers and masks have sufficient capacity.
*/
#define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,

View File

@ -2256,6 +2256,7 @@ static inline void __init check_timer(void)
legacy_pic->init(0); legacy_pic->init(0);
legacy_pic->make_irq(0); legacy_pic->make_irq(0);
apic_write(APIC_LVT0, APIC_DM_EXTINT); apic_write(APIC_LVT0, APIC_DM_EXTINT);
legacy_pic->unmask(0);
unlock_ExtINT_logic(); unlock_ExtINT_logic();

View File

@ -24,6 +24,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/memblock.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
@ -39,6 +40,7 @@
#include <asm/virtext.h> #include <asm/virtext.h>
#include <asm/intel_pt.h> #include <asm/intel_pt.h>
#include <asm/crash.h> #include <asm/crash.h>
#include <asm/cmdline.h>
/* Used while preparing memory map entries for second kernel */ /* Used while preparing memory map entries for second kernel */
struct crash_memmap_data { struct crash_memmap_data {
@ -68,6 +70,19 @@ static inline void cpu_crash_vmclear_loaded_vmcss(void)
rcu_read_unlock(); rcu_read_unlock();
} }
/*
* When the crashkernel option is specified, only use the low
* 1M for the real mode trampoline.
*/
void __init crash_reserve_low_1M(void)
{
if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0)
return;
memblock_reserve(0, 1<<20);
pr_info("Reserving the low 1M of memory for crashkernel\n");
}
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct pt_regs *regs) static void kdump_nmi_callback(int cpu, struct pt_regs *regs)

View File

@ -895,8 +895,6 @@ const void *get_xsave_field_ptr(int xfeature_nr)
#ifdef CONFIG_ARCH_HAS_PKEYS #ifdef CONFIG_ARCH_HAS_PKEYS
#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
/* /*
* This will go out and modify PKRU register to set the access * This will go out and modify PKRU register to set the access
* rights for @pkey to @init_val. * rights for @pkey to @init_val.
@ -915,6 +913,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
if (!boot_cpu_has(X86_FEATURE_OSPKE)) if (!boot_cpu_has(X86_FEATURE_OSPKE))
return -EINVAL; return -EINVAL;
/*
* This code should only be called with valid 'pkey'
* values originating from in-kernel users. Complain
* if a bad value is observed.
*/
WARN_ON_ONCE(pkey >= arch_max_pkey());
/* Set the bits we need in PKRU: */ /* Set the bits we need in PKRU: */
if (init_val & PKEY_DISABLE_ACCESS) if (init_val & PKEY_DISABLE_ACCESS)
new_pkru_bits |= PKRU_AD_BIT; new_pkru_bits |= PKRU_AD_BIT;

View File

@ -5836,6 +5836,8 @@ writeback:
} }
ctxt->eip = ctxt->_eip; ctxt->eip = ctxt->_eip;
if (ctxt->mode != X86EMUL_MODE_PROT64)
ctxt->eip = (u32)ctxt->_eip;
done: done:
if (rc == X86EMUL_PROPAGATE_FAULT) { if (rc == X86EMUL_PROPAGATE_FAULT) {

View File

@ -1684,7 +1684,7 @@ static void start_sw_period(struct kvm_lapic *apic)
hrtimer_start(&apic->lapic_timer.timer, hrtimer_start(&apic->lapic_timer.timer,
apic->lapic_timer.target_expiration, apic->lapic_timer.target_expiration,
HRTIMER_MODE_ABS); HRTIMER_MODE_ABS_HARD);
} }
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu) bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)

View File

@ -339,7 +339,7 @@ TRACE_EVENT(
/* These depend on page entry type, so compute them now. */ /* These depend on page entry type, so compute them now. */
__field(bool, r) __field(bool, r)
__field(bool, x) __field(bool, x)
__field(u8, u) __field(signed char, u)
), ),
TP_fast_assign( TP_fast_assign(

View File

@ -787,9 +787,6 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
return 0; return 0;
} else { } else {
if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
pr_err("%s: ip 0x%lx next 0x%llx\n",
__func__, kvm_rip_read(vcpu), svm->next_rip);
kvm_rip_write(vcpu, svm->next_rip); kvm_rip_write(vcpu, svm->next_rip);
} }
svm_set_interrupt_shadow(vcpu, 0); svm_set_interrupt_shadow(vcpu, 0);
@ -3970,6 +3967,12 @@ static int iret_interception(struct vcpu_svm *svm)
return 1; return 1;
} }
static int invd_interception(struct vcpu_svm *svm)
{
/* Treat an INVD instruction as a NOP and just skip it. */
return kvm_skip_emulated_instruction(&svm->vcpu);
}
static int invlpg_interception(struct vcpu_svm *svm) static int invlpg_interception(struct vcpu_svm *svm)
{ {
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
@ -4822,7 +4825,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_RDPMC] = rdpmc_interception, [SVM_EXIT_RDPMC] = rdpmc_interception,
[SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_IRET] = iret_interception, [SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_INVD] = invd_interception,
[SVM_EXIT_PAUSE] = pause_interception, [SVM_EXIT_PAUSE] = pause_interception,
[SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_HLT] = halt_interception,
[SVM_EXIT_INVLPG] = invlpg_interception, [SVM_EXIT_INVLPG] = invlpg_interception,

View File

@ -1130,6 +1130,10 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
vmx->guest_msrs[i].mask); vmx->guest_msrs[i].mask);
} }
if (vmx->nested.need_vmcs12_to_shadow_sync)
nested_sync_vmcs12_to_shadow(vcpu);
if (vmx->guest_state_loaded) if (vmx->guest_state_loaded)
return; return;
@ -1537,7 +1541,7 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
static int skip_emulated_instruction(struct kvm_vcpu *vcpu) static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
{ {
unsigned long rip; unsigned long rip, orig_rip;
/* /*
* Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
@ -1549,8 +1553,17 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
*/ */
if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) { to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) {
rip = kvm_rip_read(vcpu); orig_rip = kvm_rip_read(vcpu);
rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
#ifdef CONFIG_X86_64
/*
* We need to mask out the high 32 bits of RIP if not in 64-bit
* mode, but just finding out that we are in 64-bit mode is
* quite expensive. Only do it if there was a carry.
*/
if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu))
rip = (u32)rip;
#endif
kvm_rip_write(vcpu, rip); kvm_rip_write(vcpu, rip);
} else { } else {
if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
@ -6486,8 +6499,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmcs_write32(PLE_WINDOW, vmx->ple_window); vmcs_write32(PLE_WINDOW, vmx->ple_window);
} }
if (vmx->nested.need_vmcs12_to_shadow_sync) /*
nested_sync_vmcs12_to_shadow(vcpu); * We did this in prepare_switch_to_guest, because it needs to
* be within srcu_read_lock.
*/
WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);

View File

@ -973,6 +973,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
X86_CR4_SMEP; X86_CR4_SMEP;
unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
if (kvm_valid_cr4(vcpu, cr4)) if (kvm_valid_cr4(vcpu, cr4))
return 1; return 1;
@ -1000,7 +1001,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (kvm_x86_ops->set_cr4(vcpu, cr4)) if (kvm_x86_ops->set_cr4(vcpu, cr4))
return 1; return 1;
if (((cr4 ^ old_cr4) & pdptr_bits) || if (((cr4 ^ old_cr4) & mmu_role_bits) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
@ -5050,10 +5051,13 @@ set_identity_unlock:
r = -EFAULT; r = -EFAULT;
if (copy_from_user(&u.ps, argp, sizeof(u.ps))) if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
goto out; goto out;
mutex_lock(&kvm->lock);
r = -ENXIO; r = -ENXIO;
if (!kvm->arch.vpit) if (!kvm->arch.vpit)
goto out; goto set_pit_out;
r = kvm_vm_ioctl_set_pit(kvm, &u.ps); r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
set_pit_out:
mutex_unlock(&kvm->lock);
break; break;
} }
case KVM_GET_PIT2: { case KVM_GET_PIT2: {
@ -5073,10 +5077,13 @@ set_identity_unlock:
r = -EFAULT; r = -EFAULT;
if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
goto out; goto out;
mutex_lock(&kvm->lock);
r = -ENXIO; r = -ENXIO;
if (!kvm->arch.vpit) if (!kvm->arch.vpit)
goto out; goto set_pit2_out;
r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
set_pit2_out:
mutex_unlock(&kvm->lock);
break; break;
} }
case KVM_REINJECT_CONTROL: { case KVM_REINJECT_CONTROL: {

View File

@ -120,7 +120,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
*/ */
if (size < 8) { if (size < 8) {
if (!IS_ALIGNED(dest, 4) || size != 4) if (!IS_ALIGNED(dest, 4) || size != 4)
clean_cache_range(dst, 1); clean_cache_range(dst, size);
} else { } else {
if (!IS_ALIGNED(dest, 8)) { if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);

View File

@ -8,6 +8,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/realmode.h> #include <asm/realmode.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/crash.h>
struct real_mode_header *real_mode_header; struct real_mode_header *real_mode_header;
u32 *trampoline_cr4_features; u32 *trampoline_cr4_features;
@ -34,6 +35,7 @@ void __init reserve_real_mode(void)
memblock_reserve(mem, size); memblock_reserve(mem, size);
set_real_mode_mem(mem); set_real_mode_mem(mem);
crash_reserve_low_1M();
} }
static void __init setup_real_mode(void) static void __init setup_real_mode(void)

View File

@ -1897,6 +1897,7 @@ ENTRY(system_call)
mov a6, a2 mov a6, a2
call4 do_syscall_trace_enter call4 do_syscall_trace_enter
beqz a6, .Lsyscall_exit
l32i a7, a2, PT_SYSCALL l32i a7, a2, PT_SYSCALL
1: 1:
@ -1911,8 +1912,6 @@ ENTRY(system_call)
addx4 a4, a7, a4 addx4 a4, a7, a4
l32i a4, a4, 0 l32i a4, a4, 0
movi a5, sys_ni_syscall;
beq a4, a5, 1f
/* Load args: arg0 - arg5 are passed via regs. */ /* Load args: arg0 - arg5 are passed via regs. */
@ -1932,6 +1931,7 @@ ENTRY(system_call)
s32i a6, a2, PT_AREG2 s32i a6, a2, PT_AREG2
bnez a3, 1f bnez a3, 1f
.Lsyscall_exit:
abi_ret(4) abi_ret(4)
1: 1:

View File

@ -542,14 +542,28 @@ long arch_ptrace(struct task_struct *child, long request,
return ret; return ret;
} }
void do_syscall_trace_enter(struct pt_regs *regs) void do_syscall_trace_leave(struct pt_regs *regs);
int do_syscall_trace_enter(struct pt_regs *regs)
{ {
if (regs->syscall == NO_SYSCALL)
regs->areg[2] = -ENOSYS;
if (test_thread_flag(TIF_SYSCALL_TRACE) && if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs)) tracehook_report_syscall_entry(regs)) {
regs->areg[2] = -ENOSYS;
regs->syscall = NO_SYSCALL; regs->syscall = NO_SYSCALL;
return 0;
}
if (regs->syscall == NO_SYSCALL) {
do_syscall_trace_leave(regs);
return 0;
}
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall_get_nr(current, regs)); trace_sys_enter(regs, syscall_get_nr(current, regs));
return 1;
} }
void do_syscall_trace_leave(struct pt_regs *regs) void do_syscall_trace_leave(struct pt_regs *regs)

View File

@ -1043,29 +1043,21 @@ void acpi_ec_unblock_transactions(void)
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
Event Management Event Management
-------------------------------------------------------------------------- */ -------------------------------------------------------------------------- */
static struct acpi_ec_query_handler *
acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
{
if (handler)
kref_get(&handler->kref);
return handler;
}
static struct acpi_ec_query_handler * static struct acpi_ec_query_handler *
acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
{ {
struct acpi_ec_query_handler *handler; struct acpi_ec_query_handler *handler;
bool found = false;
mutex_lock(&ec->mutex); mutex_lock(&ec->mutex);
list_for_each_entry(handler, &ec->list, node) { list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) { if (value == handler->query_bit) {
found = true; kref_get(&handler->kref);
break; mutex_unlock(&ec->mutex);
return handler;
} }
} }
mutex_unlock(&ec->mutex); mutex_unlock(&ec->mutex);
return found ? acpi_ec_get_query_handler(handler) : NULL; return NULL;
} }
static void acpi_ec_query_handler_release(struct kref *kref) static void acpi_ec_query_handler_release(struct kref *kref)

View File

@ -56,7 +56,7 @@ struct acard_sg {
__le32 size; /* bit 31 (EOT) max==0x10000 (64k) */ __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */
}; };
static void acard_ahci_qc_prep(struct ata_queued_cmd *qc); static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc); static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int acard_ahci_port_start(struct ata_port *ap); static int acard_ahci_port_start(struct ata_port *ap);
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
@ -210,7 +210,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
return si; return si;
} }
static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
{ {
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data; struct ahci_port_priv *pp = ap->private_data;
@ -248,6 +248,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->hw_tag, opts); ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
return AC_ERR_OK;
} }
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)

View File

@ -57,7 +57,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int ahci_port_start(struct ata_port *ap); static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap); static void ahci_port_stop(struct ata_port *ap);
static void ahci_qc_prep(struct ata_queued_cmd *qc); static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
static void ahci_freeze(struct ata_port *ap); static void ahci_freeze(struct ata_port *ap);
static void ahci_thaw(struct ata_port *ap); static void ahci_thaw(struct ata_port *ap);
@ -1624,7 +1624,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
return sata_pmp_qc_defer_cmd_switch(qc); return sata_pmp_qc_defer_cmd_switch(qc);
} }
static void ahci_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
{ {
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data; struct ahci_port_priv *pp = ap->private_data;
@ -1660,6 +1660,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->hw_tag, opts); ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
return AC_ERR_OK;
} }
static void ahci_fbs_dec_intr(struct ata_port *ap) static void ahci_fbs_dec_intr(struct ata_port *ap)

View File

@ -4978,7 +4978,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
return ATA_DEFER_LINK; return ATA_DEFER_LINK;
} }
void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
{
return AC_ERR_OK;
}
/** /**
* ata_sg_init - Associate command with scatter-gather table. * ata_sg_init - Associate command with scatter-gather table.
@ -5465,7 +5468,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
return; return;
} }
ap->ops->qc_prep(qc); qc->err_mask |= ap->ops->qc_prep(qc);
if (unlikely(qc->err_mask))
goto err;
trace_ata_qc_issue(qc); trace_ata_qc_issue(qc);
qc->err_mask |= ap->ops->qc_issue(qc); qc->err_mask |= ap->ops->qc_issue(qc);
if (unlikely(qc->err_mask)) if (unlikely(qc->err_mask))

View File

@ -2679,12 +2679,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
* LOCKING: * LOCKING:
* spin_lock_irqsave(host lock) * spin_lock_irqsave(host lock)
*/ */
void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
{ {
if (!(qc->flags & ATA_QCFLAG_DMAMAP)) if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return; return AC_ERR_OK;
ata_bmdma_fill_sg(qc); ata_bmdma_fill_sg(qc);
return AC_ERR_OK;
} }
EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
@ -2697,12 +2699,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
* LOCKING: * LOCKING:
* spin_lock_irqsave(host lock) * spin_lock_irqsave(host lock)
*/ */
void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
{ {
if (!(qc->flags & ATA_QCFLAG_DMAMAP)) if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return; return AC_ERR_OK;
ata_bmdma_fill_sg_dumb(qc); ata_bmdma_fill_sg_dumb(qc);
return AC_ERR_OK;
} }
EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);

View File

@ -510,7 +510,7 @@ static int pata_macio_cable_detect(struct ata_port *ap)
return ATA_CBL_PATA40; return ATA_CBL_PATA40;
} }
static void pata_macio_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
{ {
unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE); unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
@ -523,7 +523,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
__func__, qc, qc->flags, write, qc->dev->devno); __func__, qc, qc->flags, write, qc->dev->devno);
if (!(qc->flags & ATA_QCFLAG_DMAMAP)) if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return; return AC_ERR_OK;
table = (struct dbdma_cmd *) priv->dma_table_cpu; table = (struct dbdma_cmd *) priv->dma_table_cpu;
@ -568,6 +568,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
table->command = cpu_to_le16(DBDMA_STOP); table->command = cpu_to_le16(DBDMA_STOP);
dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi); dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
return AC_ERR_OK;
} }

View File

@ -44,25 +44,27 @@ static void pxa_ata_dma_irq(void *d)
/* /*
* Prepare taskfile for submission. * Prepare taskfile for submission.
*/ */
static void pxa_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
{ {
struct pata_pxa_data *pd = qc->ap->private_data; struct pata_pxa_data *pd = qc->ap->private_data;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
enum dma_transfer_direction dir; enum dma_transfer_direction dir;
if (!(qc->flags & ATA_QCFLAG_DMAMAP)) if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return; return AC_ERR_OK;
dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir, tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
DMA_PREP_INTERRUPT); DMA_PREP_INTERRUPT);
if (!tx) { if (!tx) {
ata_dev_err(qc->dev, "prep_slave_sg() failed\n"); ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
return; return AC_ERR_OK;
} }
tx->callback = pxa_ata_dma_irq; tx->callback = pxa_ata_dma_irq;
tx->callback_param = pd; tx->callback_param = pd;
pd->dma_cookie = dmaengine_submit(tx); pd->dma_cookie = dmaengine_submit(tx);
return AC_ERR_OK;
} }
/* /*

View File

@ -116,7 +116,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent); const struct pci_device_id *ent);
static int adma_port_start(struct ata_port *ap); static int adma_port_start(struct ata_port *ap);
static void adma_port_stop(struct ata_port *ap); static void adma_port_stop(struct ata_port *ap);
static void adma_qc_prep(struct ata_queued_cmd *qc); static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
static int adma_check_atapi_dma(struct ata_queued_cmd *qc); static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
static void adma_freeze(struct ata_port *ap); static void adma_freeze(struct ata_port *ap);
@ -295,7 +295,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
return i; return i;
} }
static void adma_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
{ {
struct adma_port_priv *pp = qc->ap->private_data; struct adma_port_priv *pp = qc->ap->private_data;
u8 *buf = pp->pkt; u8 *buf = pp->pkt;
@ -306,7 +306,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
adma_enter_reg_mode(qc->ap); adma_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA) if (qc->tf.protocol != ATA_PROT_DMA)
return; return AC_ERR_OK;
buf[i++] = 0; /* Response flags */ buf[i++] = 0; /* Response flags */
buf[i++] = 0; /* reserved */ buf[i++] = 0; /* reserved */
@ -371,6 +371,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
printk("%s\n", obuf); printk("%s\n", obuf);
} }
#endif #endif
return AC_ERR_OK;
} }
static inline void adma_packet_start(struct ata_queued_cmd *qc) static inline void adma_packet_start(struct ata_queued_cmd *qc)

View File

@ -502,7 +502,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
return num_prde; return num_prde;
} }
static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
{ {
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct sata_fsl_port_priv *pp = ap->private_data; struct sata_fsl_port_priv *pp = ap->private_data;
@ -548,6 +548,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n", VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
desc_info, ttl_dwords, num_prde); desc_info, ttl_dwords, num_prde);
return AC_ERR_OK;
} }
static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)

View File

@ -478,7 +478,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
prd[-1].flags |= PRD_END; prd[-1].flags |= PRD_END;
} }
static void inic_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
{ {
struct inic_port_priv *pp = qc->ap->private_data; struct inic_port_priv *pp = qc->ap->private_data;
struct inic_pkt *pkt = pp->pkt; struct inic_pkt *pkt = pp->pkt;
@ -538,6 +538,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc)
inic_fill_sg(prd, qc); inic_fill_sg(prd, qc);
pp->cpb_tbl[0] = pp->pkt_dma; pp->cpb_tbl[0] = pp->pkt_dma;
return AC_ERR_OK;
} }
static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)

View File

@ -592,8 +592,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
static int mv_port_start(struct ata_port *ap); static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap); static void mv_port_stop(struct ata_port *ap);
static int mv_qc_defer(struct ata_queued_cmd *qc); static int mv_qc_defer(struct ata_queued_cmd *qc);
static void mv_qc_prep(struct ata_queued_cmd *qc); static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
static void mv_qc_prep_iie(struct ata_queued_cmd *qc); static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
static int mv_hardreset(struct ata_link *link, unsigned int *class, static int mv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline); unsigned long deadline);
@ -2031,7 +2031,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
* LOCKING: * LOCKING:
* Inherited from caller. * Inherited from caller.
*/ */
static void mv_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
{ {
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data; struct mv_port_priv *pp = ap->private_data;
@ -2043,15 +2043,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
switch (tf->protocol) { switch (tf->protocol) {
case ATA_PROT_DMA: case ATA_PROT_DMA:
if (tf->command == ATA_CMD_DSM) if (tf->command == ATA_CMD_DSM)
return; return AC_ERR_OK;
/* fall-thru */ /* fall-thru */
case ATA_PROT_NCQ: case ATA_PROT_NCQ:
break; /* continue below */ break; /* continue below */
case ATA_PROT_PIO: case ATA_PROT_PIO:
mv_rw_multi_errata_sata24(qc); mv_rw_multi_errata_sata24(qc);
return; return AC_ERR_OK;
default: default:
return; return AC_ERR_OK;
} }
/* Fill in command request block /* Fill in command request block
@ -2098,12 +2098,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
* non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
* of which are defined/used by Linux. If we get here, this * of which are defined/used by Linux. If we get here, this
* driver needs work. * driver needs work.
*
* FIXME: modify libata to give qc_prep a return value and
* return error here.
*/ */
BUG_ON(tf->command); ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
break; tf->command);
return AC_ERR_INVALID;
} }
mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
@ -2116,8 +2114,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
if (!(qc->flags & ATA_QCFLAG_DMAMAP)) if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return; return AC_ERR_OK;
mv_fill_sg(qc); mv_fill_sg(qc);
return AC_ERR_OK;
} }
/** /**
@ -2132,7 +2132,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
* LOCKING: * LOCKING:
* Inherited from caller. * Inherited from caller.
*/ */
static void mv_qc_prep_iie(struct ata_queued_cmd *qc) static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
{ {
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data; struct mv_port_priv *pp = ap->private_data;
@ -2143,9 +2143,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
if ((tf->protocol != ATA_PROT_DMA) && if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ)) (tf->protocol != ATA_PROT_NCQ))
return; return AC_ERR_OK;
if (tf->command == ATA_CMD_DSM) if (tf->command == ATA_CMD_DSM)
return; /* use bmdma for this */ return AC_ERR_OK; /* use bmdma for this */
/* Fill in Gen IIE command request block */ /* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE)) if (!(tf->flags & ATA_TFLAG_WRITE))
@ -2186,8 +2186,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
); );
if (!(qc->flags & ATA_QCFLAG_DMAMAP)) if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return; return AC_ERR_OK;
mv_fill_sg(qc); mv_fill_sg(qc);
return AC_ERR_OK;
} }
/** /**

View File

@ -297,7 +297,7 @@ static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap); static void nv_ck804_thaw(struct ata_port *ap);
static int nv_adma_slave_config(struct scsi_device *sdev); static int nv_adma_slave_config(struct scsi_device *sdev);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
static void nv_adma_qc_prep(struct ata_queued_cmd *qc); static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance); static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
static void nv_adma_irq_clear(struct ata_port *ap); static void nv_adma_irq_clear(struct ata_port *ap);
@ -319,7 +319,7 @@ static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap); static void nv_swncq_error_handler(struct ata_port *ap);
static int nv_swncq_slave_config(struct scsi_device *sdev); static int nv_swncq_slave_config(struct scsi_device *sdev);
static int nv_swncq_port_start(struct ata_port *ap); static int nv_swncq_port_start(struct ata_port *ap);
static void nv_swncq_qc_prep(struct ata_queued_cmd *qc); static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc); static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc); static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis); static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
@ -1344,7 +1344,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
return 1; return 1;
} }
static void nv_adma_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
{ {
struct nv_adma_port_priv *pp = qc->ap->private_data; struct nv_adma_port_priv *pp = qc->ap->private_data;
struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
@ -1356,7 +1356,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
(qc->flags & ATA_QCFLAG_DMAMAP)); (qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap); nv_adma_register_mode(qc->ap);
ata_bmdma_qc_prep(qc); ata_bmdma_qc_prep(qc);
return; return AC_ERR_OK;
} }
cpb->resp_flags = NV_CPB_RESP_DONE; cpb->resp_flags = NV_CPB_RESP_DONE;
@ -1388,6 +1388,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
cpb->ctl_flags = ctl_flags; cpb->ctl_flags = ctl_flags;
wmb(); wmb();
cpb->resp_flags = 0; cpb->resp_flags = 0;
return AC_ERR_OK;
} }
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
@ -1950,17 +1952,19 @@ static int nv_swncq_port_start(struct ata_port *ap)
return 0; return 0;
} }
static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
{ {
if (qc->tf.protocol != ATA_PROT_NCQ) { if (qc->tf.protocol != ATA_PROT_NCQ) {
ata_bmdma_qc_prep(qc); ata_bmdma_qc_prep(qc);
return; return AC_ERR_OK;
} }
if (!(qc->flags & ATA_QCFLAG_DMAMAP)) if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return; return AC_ERR_OK;
nv_swncq_fill_sg(qc); nv_swncq_fill_sg(qc);
return AC_ERR_OK;
} }
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)

View File

@ -139,7 +139,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int pdc_common_port_start(struct ata_port *ap); static int pdc_common_port_start(struct ata_port *ap);
static int pdc_sata_port_start(struct ata_port *ap); static int pdc_sata_port_start(struct ata_port *ap);
static void pdc_qc_prep(struct ata_queued_cmd *qc); static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
@ -633,7 +633,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
} }
static void pdc_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
{ {
struct pdc_port_priv *pp = qc->ap->private_data; struct pdc_port_priv *pp = qc->ap->private_data;
unsigned int i; unsigned int i;
@ -665,6 +665,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
default: default:
break; break;
} }
return AC_ERR_OK;
} }
static int pdc_is_sataii_tx4(unsigned long flags) static int pdc_is_sataii_tx4(unsigned long flags)

View File

@ -100,7 +100,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int qs_port_start(struct ata_port *ap); static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host *host); static void qs_host_stop(struct ata_host *host);
static void qs_qc_prep(struct ata_queued_cmd *qc); static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc);
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
static int qs_check_atapi_dma(struct ata_queued_cmd *qc); static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
static void qs_freeze(struct ata_port *ap); static void qs_freeze(struct ata_port *ap);
@ -260,7 +260,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
return si; return si;
} }
static void qs_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
{ {
struct qs_port_priv *pp = qc->ap->private_data; struct qs_port_priv *pp = qc->ap->private_data;
u8 dflags = QS_DF_PORD, *buf = pp->pkt; u8 dflags = QS_DF_PORD, *buf = pp->pkt;
@ -272,7 +272,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
qs_enter_reg_mode(qc->ap); qs_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA) if (qc->tf.protocol != ATA_PROT_DMA)
return; return AC_ERR_OK;
nelem = qs_fill_sg(qc); nelem = qs_fill_sg(qc);
@ -295,6 +295,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
/* frame information structure (FIS) */ /* frame information structure (FIS) */
ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
return AC_ERR_OK;
} }
static inline void qs_packet_start(struct ata_queued_cmd *qc) static inline void qs_packet_start(struct ata_queued_cmd *qc)

View File

@ -550,12 +550,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND); prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
} }
static void sata_rcar_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc)
{ {
if (!(qc->flags & ATA_QCFLAG_DMAMAP)) if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return; return AC_ERR_OK;
sata_rcar_bmdma_fill_sg(qc); sata_rcar_bmdma_fill_sg(qc);
return AC_ERR_OK;
} }
static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc) static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)

View File

@ -103,7 +103,7 @@ static void sil_dev_config(struct ata_device *dev);
static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
static void sil_qc_prep(struct ata_queued_cmd *qc); static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
static void sil_bmdma_setup(struct ata_queued_cmd *qc); static void sil_bmdma_setup(struct ata_queued_cmd *qc);
static void sil_bmdma_start(struct ata_queued_cmd *qc); static void sil_bmdma_start(struct ata_queued_cmd *qc);
static void sil_bmdma_stop(struct ata_queued_cmd *qc); static void sil_bmdma_stop(struct ata_queued_cmd *qc);
@ -317,12 +317,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT); last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
} }
static void sil_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
{ {
if (!(qc->flags & ATA_QCFLAG_DMAMAP)) if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return; return AC_ERR_OK;
sil_fill_sg(qc); sil_fill_sg(qc);
return AC_ERR_OK;
} }
static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)

View File

@ -326,7 +326,7 @@ static void sil24_dev_config(struct ata_device *dev);
static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val); static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val); static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
static int sil24_qc_defer(struct ata_queued_cmd *qc); static int sil24_qc_defer(struct ata_queued_cmd *qc);
static void sil24_qc_prep(struct ata_queued_cmd *qc); static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc); static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
static void sil24_pmp_attach(struct ata_port *ap); static void sil24_pmp_attach(struct ata_port *ap);
@ -830,7 +830,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc)
return ata_std_qc_defer(qc); return ata_std_qc_defer(qc);
} }
static void sil24_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
{ {
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct sil24_port_priv *pp = ap->private_data; struct sil24_port_priv *pp = ap->private_data;
@ -874,6 +874,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
if (qc->flags & ATA_QCFLAG_DMAMAP) if (qc->flags & ATA_QCFLAG_DMAMAP)
sil24_fill_sg(qc, sge); sil24_fill_sg(qc, sge);
return AC_ERR_OK;
} }
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)

View File

@ -202,7 +202,7 @@ static void pdc_error_handler(struct ata_port *ap);
static void pdc_freeze(struct ata_port *ap); static void pdc_freeze(struct ata_port *ap);
static void pdc_thaw(struct ata_port *ap); static void pdc_thaw(struct ata_port *ap);
static int pdc_port_start(struct ata_port *ap); static int pdc_port_start(struct ata_port *ap);
static void pdc20621_qc_prep(struct ata_queued_cmd *qc); static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static unsigned int pdc20621_dimm_init(struct ata_host *host); static unsigned int pdc20621_dimm_init(struct ata_host *host);
@ -530,7 +530,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
VPRINTK("ata pkt buf ofs %u, mmio copied\n", i); VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
} }
static void pdc20621_qc_prep(struct ata_queued_cmd *qc) static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
{ {
switch (qc->tf.protocol) { switch (qc->tf.protocol) {
case ATA_PROT_DMA: case ATA_PROT_DMA:
@ -542,6 +542,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
default: default:
break; break;
} }
return AC_ERR_OK;
} }
static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,

View File

@ -2245,7 +2245,7 @@ static int eni_init_one(struct pci_dev *pci_dev,
rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
if (rc < 0) if (rc < 0)
goto out; goto err_disable;
rc = -ENOMEM; rc = -ENOMEM;
eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL); eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL);

View File

@ -270,7 +270,7 @@ static int __init get_cpu_for_node(struct device_node *node)
static int __init parse_core(struct device_node *core, int package_id, static int __init parse_core(struct device_node *core, int package_id,
int core_id) int core_id)
{ {
char name[10]; char name[20];
bool leaf = true; bool leaf = true;
int i = 0; int i = 0;
int cpu; int cpu;
@ -317,7 +317,7 @@ static int __init parse_core(struct device_node *core, int package_id,
static int __init parse_cluster(struct device_node *cluster, int depth) static int __init parse_cluster(struct device_node *cluster, int depth)
{ {
char name[10]; char name[20];
bool leaf = true; bool leaf = true;
bool has_cores = false; bool has_cores = false;
struct device_node *c; struct device_node *c;

View File

@ -259,7 +259,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
int regcache_lookup_reg(struct regmap *map, unsigned int reg); int regcache_lookup_reg(struct regmap *map, unsigned int reg);
int _regmap_raw_write(struct regmap *map, unsigned int reg, int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len); const void *val, size_t val_len, bool noinc);
void regmap_async_complete_cb(struct regmap_async *async, int ret); void regmap_async_complete_cb(struct regmap_async *async, int ret);

View File

@ -717,7 +717,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
map->cache_bypass = true; map->cache_bypass = true;
ret = _regmap_raw_write(map, base, *data, count * val_bytes); ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
if (ret) if (ret)
dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n", dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
base, cur - map->reg_stride, ret); base, cur - map->reg_stride, ret);

View File

@ -1466,7 +1466,7 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
} }
static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
const void *val, size_t val_len) const void *val, size_t val_len, bool noinc)
{ {
struct regmap_range_node *range; struct regmap_range_node *range;
unsigned long flags; unsigned long flags;
@ -1525,7 +1525,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
win_residue, val_len / map->format.val_bytes); win_residue, val_len / map->format.val_bytes);
ret = _regmap_raw_write_impl(map, reg, val, ret = _regmap_raw_write_impl(map, reg, val,
win_residue * win_residue *
map->format.val_bytes); map->format.val_bytes, noinc);
if (ret != 0) if (ret != 0)
return ret; return ret;
@ -1539,7 +1539,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
win_residue = range->window_len - win_offset; win_residue = range->window_len - win_offset;
} }
ret = _regmap_select_page(map, &reg, range, val_num); ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
if (ret != 0) if (ret != 0)
return ret; return ret;
} }
@ -1747,7 +1747,8 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
map->work_buf + map->work_buf +
map->format.reg_bytes + map->format.reg_bytes +
map->format.pad_bytes, map->format.pad_bytes,
map->format.val_bytes); map->format.val_bytes,
false);
} }
static inline void *_regmap_map_get_context(struct regmap *map) static inline void *_regmap_map_get_context(struct regmap *map)
@ -1841,7 +1842,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
EXPORT_SYMBOL_GPL(regmap_write_async); EXPORT_SYMBOL_GPL(regmap_write_async);
int _regmap_raw_write(struct regmap *map, unsigned int reg, int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len) const void *val, size_t val_len, bool noinc)
{ {
size_t val_bytes = map->format.val_bytes; size_t val_bytes = map->format.val_bytes;
size_t val_count = val_len / val_bytes; size_t val_count = val_len / val_bytes;
@ -1862,7 +1863,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
/* Write as many bytes as possible with chunk_size */ /* Write as many bytes as possible with chunk_size */
for (i = 0; i < chunk_count; i++) { for (i = 0; i < chunk_count; i++) {
ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
if (ret) if (ret)
return ret; return ret;
@ -1873,7 +1874,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
/* Write remaining bytes */ /* Write remaining bytes */
if (val_len) if (val_len)
ret = _regmap_raw_write_impl(map, reg, val, val_len); ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
return ret; return ret;
} }
@ -1906,7 +1907,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
map->lock(map->lock_arg); map->lock(map->lock_arg);
ret = _regmap_raw_write(map, reg, val, val_len); ret = _regmap_raw_write(map, reg, val, val_len, false);
map->unlock(map->lock_arg); map->unlock(map->lock_arg);
@ -1964,7 +1965,7 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg,
write_len = map->max_raw_write; write_len = map->max_raw_write;
else else
write_len = val_len; write_len = val_len;
ret = _regmap_raw_write(map, reg, val, write_len); ret = _regmap_raw_write(map, reg, val, write_len, true);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
val = ((u8 *)val) + write_len; val = ((u8 *)val) + write_len;
@ -2441,7 +2442,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
map->async = true; map->async = true;
ret = _regmap_raw_write(map, reg, val, val_len); ret = _regmap_raw_write(map, reg, val, val_len, false);
map->async = false; map->async = false;
@ -2452,7 +2453,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
EXPORT_SYMBOL_GPL(regmap_raw_write_async); EXPORT_SYMBOL_GPL(regmap_raw_write_async);
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int val_len) unsigned int val_len, bool noinc)
{ {
struct regmap_range_node *range; struct regmap_range_node *range;
int ret; int ret;
@ -2465,7 +2466,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
range = _regmap_range_lookup(map, reg); range = _regmap_range_lookup(map, reg);
if (range) { if (range) {
ret = _regmap_select_page(map, &reg, range, ret = _regmap_select_page(map, &reg, range,
val_len / map->format.val_bytes); noinc ? 1 : val_len / map->format.val_bytes);
if (ret != 0) if (ret != 0)
return ret; return ret;
} }
@ -2503,7 +2504,7 @@ static int _regmap_bus_read(void *context, unsigned int reg,
if (!map->format.parse_val) if (!map->format.parse_val)
return -EINVAL; return -EINVAL;
ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
if (ret == 0) if (ret == 0)
*val = map->format.parse_val(work_val); *val = map->format.parse_val(work_val);
@ -2619,7 +2620,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
/* Read bytes that fit into whole chunks */ /* Read bytes that fit into whole chunks */
for (i = 0; i < chunk_count; i++) { for (i = 0; i < chunk_count; i++) {
ret = _regmap_raw_read(map, reg, val, chunk_bytes); ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
if (ret != 0) if (ret != 0)
goto out; goto out;
@ -2630,7 +2631,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
/* Read remaining bytes */ /* Read remaining bytes */
if (val_len) { if (val_len) {
ret = _regmap_raw_read(map, reg, val, val_len); ret = _regmap_raw_read(map, reg, val, val_len, false);
if (ret != 0) if (ret != 0)
goto out; goto out;
} }
@ -2705,7 +2706,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
read_len = map->max_raw_read; read_len = map->max_raw_read;
else else
read_len = val_len; read_len = val_len;
ret = _regmap_raw_read(map, reg, val, read_len); ret = _regmap_raw_read(map, reg, val, read_len, true);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
val = ((u8 *)val) + read_len; val = ((u8 *)val) + read_len;

View File

@ -370,11 +370,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
* the end. * the end.
*/ */
len = patch_length; len = patch_length;
buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length, buf = kvmalloc(patch_length, GFP_KERNEL);
GFP_KERNEL);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4);
memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4); memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
*_buf = buf; *_buf = buf;
@ -460,8 +460,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
if (ret < 0) if (ret < 0)
return ret; return ret;
ret = fw->size; ret = fw->size;
*buff = kmemdup(fw->data, ret, GFP_KERNEL); *buff = kvmalloc(fw->size, GFP_KERNEL);
if (!*buff) if (*buff)
memcpy(*buff, fw->data, ret);
else
ret = -ENOMEM; ret = -ENOMEM;
release_firmware(fw); release_firmware(fw);
@ -499,14 +501,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
goto out; goto out;
if (btrtl_dev->cfg_len > 0) { if (btrtl_dev->cfg_len > 0) {
tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
if (!tbuff) { if (!tbuff) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
memcpy(tbuff, fw_data, ret); memcpy(tbuff, fw_data, ret);
kfree(fw_data); kvfree(fw_data);
memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len); memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len);
ret += btrtl_dev->cfg_len; ret += btrtl_dev->cfg_len;
@ -519,14 +521,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
ret = rtl_download_firmware(hdev, fw_data, ret); ret = rtl_download_firmware(hdev, fw_data, ret);
out: out:
kfree(fw_data); kvfree(fw_data);
return ret; return ret;
} }
void btrtl_free(struct btrtl_device_info *btrtl_dev) void btrtl_free(struct btrtl_device_info *btrtl_dev)
{ {
kfree(btrtl_dev->fw_data); kvfree(btrtl_dev->fw_data);
kfree(btrtl_dev->cfg_data); kvfree(btrtl_dev->cfg_data);
kfree(btrtl_dev); kfree(btrtl_dev);
} }
EXPORT_SYMBOL_GPL(btrtl_free); EXPORT_SYMBOL_GPL(btrtl_free);

View File

@ -358,6 +358,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
return 0; return 0;
} }
/*
* Released firmware describes the IO port max address as 0x3fff, which is
* the max host bus address. Fixup to a proper range. This will probably
* never be fixed in firmware.
*/
static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
struct resource *r)
{
if (r->end != 0x3fff)
return;
if (r->start == 0xe4)
r->end = 0xe4 + 0x04 - 1;
else if (r->start == 0x2f8)
r->end = 0x2f8 + 0x08 - 1;
else
dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
r);
}
/* /*
* hisi_lpc_acpi_set_io_res - set the resources for a child * hisi_lpc_acpi_set_io_res - set the resources for a child
* @child: the device node to be updated the I/O resource * @child: the device node to be updated the I/O resource
@ -419,8 +439,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
return -ENOMEM; return -ENOMEM;
} }
count = 0; count = 0;
list_for_each_entry(rentry, &resource_list, node) list_for_each_entry(rentry, &resource_list, node) {
resources[count++] = *rentry->res; resources[count] = *rentry->res;
hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
count++;
}
acpi_dev_free_resource_list(&resource_list); acpi_dev_free_resource_list(&resource_list);

View File

@ -399,15 +399,15 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int rc; int rc;
bt_bmc->irq = platform_get_irq(pdev, 0); bt_bmc->irq = platform_get_irq_optional(pdev, 0);
if (!bt_bmc->irq) if (bt_bmc->irq < 0)
return -ENODEV; return bt_bmc->irq;
rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED, rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
DEVICE_NAME, bt_bmc); DEVICE_NAME, bt_bmc);
if (rc < 0) { if (rc < 0) {
dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq); dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
bt_bmc->irq = 0; bt_bmc->irq = rc;
return rc; return rc;
} }
@ -479,7 +479,7 @@ static int bt_bmc_probe(struct platform_device *pdev)
bt_bmc_config_irq(bt_bmc, pdev); bt_bmc_config_irq(bt_bmc, pdev);
if (bt_bmc->irq) { if (bt_bmc->irq >= 0) {
dev_info(dev, "Using IRQ %d\n", bt_bmc->irq); dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
} else { } else {
dev_info(dev, "No IRQ; using timer\n"); dev_info(dev, "No IRQ; using timer\n");
@ -505,7 +505,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev); struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
misc_deregister(&bt_bmc->miscdev); misc_deregister(&bt_bmc->miscdev);
if (!bt_bmc->irq) if (bt_bmc->irq < 0)
del_timer_sync(&bt_bmc->poll_timer); del_timer_sync(&bt_bmc->poll_timer);
return 0; return 0;
} }

View File

@ -1223,14 +1223,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* We take into account the first, second and third-order deltas * We take into account the first, second and third-order deltas
* in order to make our estimate. * in order to make our estimate.
*/ */
delta = sample.jiffies - state->last_time; delta = sample.jiffies - READ_ONCE(state->last_time);
state->last_time = sample.jiffies; WRITE_ONCE(state->last_time, sample.jiffies);
delta2 = delta - state->last_delta; delta2 = delta - READ_ONCE(state->last_delta);
state->last_delta = delta; WRITE_ONCE(state->last_delta, delta);
delta3 = delta2 - state->last_delta2; delta3 = delta2 - READ_ONCE(state->last_delta2);
state->last_delta2 = delta2; WRITE_ONCE(state->last_delta2, delta2);
if (delta < 0) if (delta < 0)
delta = -delta; delta = -delta;

View File

@ -777,18 +777,22 @@ static int __init tlclk_init(void)
{ {
int ret; int ret;
ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops); telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
if (ret < 0) {
printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
return ret;
}
tlclk_major = ret;
alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
if (!alarm_events) { if (!alarm_events) {
ret = -ENOMEM; ret = -ENOMEM;
goto out1; goto out1;
} }
ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
if (ret < 0) {
printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
kfree(alarm_events);
return ret;
}
tlclk_major = ret;
/* Read telecom clock IRQ number (Set by BIOS) */ /* Read telecom clock IRQ number (Set by BIOS) */
if (!request_region(TLCLK_BASE, 8, "telco_clock")) { if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
printk(KERN_ERR "tlclk: request_region 0x%X failed.\n", printk(KERN_ERR "tlclk: request_region 0x%X failed.\n",
@ -796,7 +800,6 @@ static int __init tlclk_init(void)
ret = -EBUSY; ret = -EBUSY;
goto out2; goto out2;
} }
telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */ if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n", printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
@ -837,8 +840,8 @@ out3:
release_region(TLCLK_BASE, 8); release_region(TLCLK_BASE, 8);
out2: out2:
kfree(alarm_events); kfree(alarm_events);
out1:
unregister_chrdev(tlclk_major, "telco_clock"); unregister_chrdev(tlclk_major, "telco_clock");
out1:
return ret; return ret;
} }

View File

@ -22,6 +22,7 @@
#include "tpm.h" #include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2" #define ACPI_SIG_TPM2 "TPM2"
#define TPM_CRB_MAX_RESOURCES 3
static const guid_t crb_acpi_start_guid = static const guid_t crb_acpi_start_guid =
GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714, GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
@ -91,7 +92,6 @@ enum crb_status {
struct crb_priv { struct crb_priv {
u32 sm; u32 sm;
const char *hid; const char *hid;
void __iomem *iobase;
struct crb_regs_head __iomem *regs_h; struct crb_regs_head __iomem *regs_h;
struct crb_regs_tail __iomem *regs_t; struct crb_regs_tail __iomem *regs_t;
u8 __iomem *cmd; u8 __iomem *cmd;
@ -434,21 +434,27 @@ static const struct tpm_class_ops tpm_crb = {
static int crb_check_resource(struct acpi_resource *ares, void *data) static int crb_check_resource(struct acpi_resource *ares, void *data)
{ {
struct resource *io_res = data; struct resource *iores_array = data;
struct resource_win win; struct resource_win win;
struct resource *res = &(win.res); struct resource *res = &(win.res);
int i;
if (acpi_dev_resource_memory(ares, res) || if (acpi_dev_resource_memory(ares, res) ||
acpi_dev_resource_address_space(ares, &win)) { acpi_dev_resource_address_space(ares, &win)) {
*io_res = *res; for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) {
io_res->name = NULL; if (resource_type(iores_array + i) != IORESOURCE_MEM) {
iores_array[i] = *res;
iores_array[i].name = NULL;
break;
}
}
} }
return 1; return 1;
} }
static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
struct resource *io_res, u64 start, u32 size) void __iomem **iobase_ptr, u64 start, u32 size)
{ {
struct resource new_res = { struct resource new_res = {
.start = start, .start = start,
@ -460,10 +466,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
if (start != new_res.start) if (start != new_res.start)
return (void __iomem *) ERR_PTR(-EINVAL); return (void __iomem *) ERR_PTR(-EINVAL);
if (!resource_contains(io_res, &new_res)) if (!iores)
return devm_ioremap_resource(dev, &new_res); return devm_ioremap_resource(dev, &new_res);
return priv->iobase + (new_res.start - io_res->start); if (!*iobase_ptr) {
*iobase_ptr = devm_ioremap_resource(dev, iores);
if (IS_ERR(*iobase_ptr))
return *iobase_ptr;
}
return *iobase_ptr + (new_res.start - iores->start);
} }
/* /*
@ -490,9 +502,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res,
static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
struct acpi_table_tpm2 *buf) struct acpi_table_tpm2 *buf)
{ {
struct list_head resources; struct list_head acpi_resource_list;
struct resource io_res; struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} };
void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL};
struct device *dev = &device->dev; struct device *dev = &device->dev;
struct resource *iores;
void __iomem **iobase_ptr;
int i;
u32 pa_high, pa_low; u32 pa_high, pa_low;
u64 cmd_pa; u64 cmd_pa;
u32 cmd_size; u32 cmd_size;
@ -501,21 +517,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
u32 rsp_size; u32 rsp_size;
int ret; int ret;
INIT_LIST_HEAD(&resources); INIT_LIST_HEAD(&acpi_resource_list);
ret = acpi_dev_get_resources(device, &resources, crb_check_resource, ret = acpi_dev_get_resources(device, &acpi_resource_list,
&io_res); crb_check_resource, iores_array);
if (ret < 0) if (ret < 0)
return ret; return ret;
acpi_dev_free_resource_list(&resources); acpi_dev_free_resource_list(&acpi_resource_list);
if (resource_type(&io_res) != IORESOURCE_MEM) { if (resource_type(iores_array) != IORESOURCE_MEM) {
dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n"); dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
return -EINVAL; return -EINVAL;
} else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
IORESOURCE_MEM) {
dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
memset(iores_array + TPM_CRB_MAX_RESOURCES,
0, sizeof(*iores_array));
iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
} }
priv->iobase = devm_ioremap_resource(dev, &io_res); iores = NULL;
if (IS_ERR(priv->iobase)) iobase_ptr = NULL;
return PTR_ERR(priv->iobase); for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
if (buf->control_address >= iores_array[i].start &&
buf->control_address + sizeof(struct crb_regs_tail) - 1 <=
iores_array[i].end) {
iores = iores_array + i;
iobase_ptr = iobase_array + i;
break;
}
}
priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address,
sizeof(struct crb_regs_tail));
if (IS_ERR(priv->regs_t))
return PTR_ERR(priv->regs_t);
/* The ACPI IO region starts at the head area and continues to include /* The ACPI IO region starts at the head area and continues to include
* the control area, as one nice sane region except for some older * the control area, as one nice sane region except for some older
@ -523,9 +559,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
*/ */
if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
(priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
if (buf->control_address == io_res.start + if (iores &&
buf->control_address == iores->start +
sizeof(*priv->regs_h)) sizeof(*priv->regs_h))
priv->regs_h = priv->iobase; priv->regs_h = *iobase_ptr;
else else
dev_warn(dev, FW_BUG "Bad ACPI memory layout"); dev_warn(dev, FW_BUG "Bad ACPI memory layout");
} }
@ -534,13 +571,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
if (ret) if (ret)
return ret; return ret;
priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
sizeof(struct crb_regs_tail));
if (IS_ERR(priv->regs_t)) {
ret = PTR_ERR(priv->regs_t);
goto out_relinquish_locality;
}
/* /*
* PTT HW bug w/a: wake up the device to access * PTT HW bug w/a: wake up the device to access
* possibly not retained registers. * possibly not retained registers.
@ -552,13 +582,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high); pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low); pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
cmd_pa = ((u64)pa_high << 32) | pa_low; cmd_pa = ((u64)pa_high << 32) | pa_low;
cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa, cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size);
ioread32(&priv->regs_t->ctrl_cmd_size));
iores = NULL;
iobase_ptr = NULL;
for (i = 0; iores_array[i].end; ++i) {
if (cmd_pa >= iores_array[i].start &&
cmd_pa <= iores_array[i].end) {
iores = iores_array + i;
iobase_ptr = iobase_array + i;
break;
}
}
if (iores)
cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size);
dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n", dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
pa_high, pa_low, cmd_size); pa_high, pa_low, cmd_size);
priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size); priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size);
if (IS_ERR(priv->cmd)) { if (IS_ERR(priv->cmd)) {
ret = PTR_ERR(priv->cmd); ret = PTR_ERR(priv->cmd);
goto out; goto out;
@ -566,11 +609,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8); memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
rsp_pa = le64_to_cpu(__rsp_pa); rsp_pa = le64_to_cpu(__rsp_pa);
rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa, rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size);
ioread32(&priv->regs_t->ctrl_rsp_size));
iores = NULL;
iobase_ptr = NULL;
for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
if (rsp_pa >= iores_array[i].start &&
rsp_pa <= iores_array[i].end) {
iores = iores_array + i;
iobase_ptr = iobase_array + i;
break;
}
}
if (iores)
rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size);
if (cmd_pa != rsp_pa) { if (cmd_pa != rsp_pa) {
priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size); priv->rsp = crb_map_res(dev, iores, iobase_ptr,
rsp_pa, rsp_size);
ret = PTR_ERR_OR_ZERO(priv->rsp); ret = PTR_ERR_OR_ZERO(priv->rsp);
goto out; goto out;
} }

View File

@ -581,6 +581,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
*/ */
while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) { while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
ibmvtpm_crq_process(crq, ibmvtpm); ibmvtpm_crq_process(crq, ibmvtpm);
wake_up_interruptible(&ibmvtpm->crq_queue.wq);
crq->valid = 0; crq->valid = 0;
smp_wmb(); smp_wmb();
} }
@ -628,6 +629,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
} }
crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
init_waitqueue_head(&crq_q->wq);
ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
CRQ_RES_BUF_SIZE, CRQ_RES_BUF_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
@ -680,6 +682,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
if (rc) if (rc)
goto init_irq_cleanup; goto init_irq_cleanup;
if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
ibmvtpm->rtce_buf != NULL,
HZ)) {
dev_err(dev, "CRQ response timed out\n");
goto init_irq_cleanup;
}
return tpm_chip_register(chip); return tpm_chip_register(chip);
init_irq_cleanup: init_irq_cleanup:
do { do {

View File

@ -26,6 +26,7 @@ struct ibmvtpm_crq_queue {
struct ibmvtpm_crq *crq_addr; struct ibmvtpm_crq *crq_addr;
u32 index; u32 index;
u32 num_entry; u32 num_entry;
wait_queue_head_t wq;
}; };
struct ibmvtpm_dev { struct ibmvtpm_dev {

View File

@ -39,7 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
/* read VCO1 reg for numerator and denominator */ /* read VCO1 reg for numerator and denominator */
reg = readl(socfpgaclk->hw.reg); reg = readl(socfpgaclk->hw.reg);
refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT; refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT;
vco_freq = (unsigned long long)parent_rate / refdiv;
vco_freq = parent_rate;
do_div(vco_freq, refdiv);
/* Read mdiv and fdiv from the fdbck register */ /* Read mdiv and fdiv from the fdbck register */
reg = readl(socfpgaclk->hw.reg + 0x4); reg = readl(socfpgaclk->hw.reg + 0x4);

View File

@ -194,15 +194,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d,
if (err) if (err)
return NULL; return NULL;
} else { } else {
const char *base_name = "adpll"; name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s",
char *buf; d->pa, postfix);
buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 +
strlen(postfix), GFP_KERNEL);
if (!buf)
return NULL;
sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix);
name = buf;
} }
return name; return name;

View File

@ -169,7 +169,7 @@ static int __init h8300_8timer_init(struct device_node *node)
return PTR_ERR(clk); return PTR_ERR(clk);
} }
ret = ENXIO; ret = -ENXIO;
base = of_iomap(node, 0); base = of_iomap(node, 0);
if (!base) { if (!base) {
pr_err("failed to map registers for clockevent\n"); pr_err("failed to map registers for clockevent\n");

View File

@ -902,6 +902,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
void powernv_cpufreq_work_fn(struct work_struct *work) void powernv_cpufreq_work_fn(struct work_struct *work)
{ {
struct chip *chip = container_of(work, struct chip, throttle); struct chip *chip = container_of(work, struct chip, throttle);
struct cpufreq_policy *policy;
unsigned int cpu; unsigned int cpu;
cpumask_t mask; cpumask_t mask;
@ -916,12 +917,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
chip->restore = false; chip->restore = false;
for_each_cpu(cpu, &mask) { for_each_cpu(cpu, &mask) {
int index; int index;
struct cpufreq_policy policy;
cpufreq_get_policy(&policy, cpu); policy = cpufreq_cpu_get(cpu);
index = cpufreq_table_find_index_c(&policy, policy.cur); if (!policy)
powernv_cpufreq_target_index(&policy, index); continue;
cpumask_andnot(&mask, &mask, policy.cpus); index = cpufreq_table_find_index_c(policy, policy->cur);
powernv_cpufreq_target_index(policy, index);
cpumask_andnot(&mask, &mask, policy->cpus);
cpufreq_cpu_put(policy);
} }
out: out:
put_online_cpus(); put_online_cpus();

View File

@ -2480,8 +2480,9 @@ int chcr_aead_dma_map(struct device *dev,
else else
reqctx->b0_dma = 0; reqctx->b0_dma = 0;
if (req->src == req->dst) { if (req->src == req->dst) {
error = dma_map_sg(dev, req->src, sg_nents(req->src), error = dma_map_sg(dev, req->src,
DMA_BIDIRECTIONAL); sg_nents_for_len(req->src, dst_size),
DMA_BIDIRECTIONAL);
if (!error) if (!error)
goto err; goto err;
} else { } else {

View File

@ -1437,7 +1437,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
csk->wr_max_credits)) csk->wr_max_credits))
sk->sk_write_space(sk); sk->sk_write_space(sk);
if (copied >= target && !sk->sk_backlog.tail) if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break; break;
if (copied) { if (copied) {
@ -1470,7 +1470,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
break; break;
} }
} }
if (sk->sk_backlog.tail) { if (READ_ONCE(sk->sk_backlog.tail)) {
release_sock(sk); release_sock(sk);
lock_sock(sk); lock_sock(sk);
chtls_cleanup_rbuf(sk, copied); chtls_cleanup_rbuf(sk, copied);
@ -1615,7 +1615,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
break; break;
} }
if (sk->sk_backlog.tail) { if (READ_ONCE(sk->sk_backlog.tail)) {
/* Do not sleep, just process backlog. */ /* Do not sleep, just process backlog. */
release_sock(sk); release_sock(sk);
lock_sock(sk); lock_sock(sk);
@ -1743,7 +1743,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
csk->wr_max_credits)) csk->wr_max_credits))
sk->sk_write_space(sk); sk->sk_write_space(sk);
if (copied >= target && !sk->sk_backlog.tail) if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
break; break;
if (copied) { if (copied) {
@ -1774,7 +1774,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
} }
} }
if (sk->sk_backlog.tail) { if (READ_ONCE(sk->sk_backlog.tail)) {
release_sock(sk); release_sock(sk);
lock_sock(sk); lock_sock(sk);
chtls_cleanup_rbuf(sk, copied); chtls_cleanup_rbuf(sk, copied);

View File

@ -227,7 +227,7 @@ static void dax_region_unregister(void *region)
struct dax_region *alloc_dax_region(struct device *parent, int region_id, struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, int target_node, unsigned int align, struct resource *res, int target_node, unsigned int align,
unsigned long pfn_flags) unsigned long long pfn_flags)
{ {
struct dax_region *dax_region; struct dax_region *dax_region;

View File

@ -11,7 +11,7 @@ struct dax_region;
void dax_region_put(struct dax_region *dax_region); void dax_region_put(struct dax_region *dax_region);
struct dax_region *alloc_dax_region(struct device *parent, int region_id, struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, int target_node, unsigned int align, struct resource *res, int target_node, unsigned int align,
unsigned long flags); unsigned long long flags);
enum dev_dax_subsys { enum dev_dax_subsys {
DEV_DAX_BUS, DEV_DAX_BUS,

View File

@ -32,7 +32,7 @@ struct dax_region {
struct device *dev; struct device *dev;
unsigned int align; unsigned int align;
struct resource res; struct resource res;
unsigned long pfn_flags; unsigned long long pfn_flags;
}; };
/** /**

View File

@ -68,6 +68,8 @@
#define KHZ 1000 #define KHZ 1000
#define KHZ_MAX (ULONG_MAX / KHZ)
/* Assume that the bus is saturated if the utilization is 25% */ /* Assume that the bus is saturated if the utilization is 25% */
#define BUS_SATURATION_RATIO 25 #define BUS_SATURATION_RATIO 25
@ -169,7 +171,7 @@ struct tegra_actmon_emc_ratio {
}; };
static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = { static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
{ 1400000, ULONG_MAX }, { 1400000, KHZ_MAX },
{ 1200000, 750000 }, { 1200000, 750000 },
{ 1100000, 600000 }, { 1100000, 600000 },
{ 1000000, 500000 }, { 1000000, 500000 },

View File

@ -60,6 +60,8 @@ static void dma_buf_release(struct dentry *dentry)
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
dmabuf = dentry->d_fsdata; dmabuf = dentry->d_fsdata;
if (unlikely(!dmabuf))
return;
BUG_ON(dmabuf->vmapping_counter); BUG_ON(dmabuf->vmapping_counter);

View File

@ -273,6 +273,30 @@ void dma_fence_free(struct dma_fence *fence)
} }
EXPORT_SYMBOL(dma_fence_free); EXPORT_SYMBOL(dma_fence_free);
static bool __dma_fence_enable_signaling(struct dma_fence *fence)
{
bool was_set;
lockdep_assert_held(fence->lock);
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return false;
if (!was_set && fence->ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
dma_fence_signal_locked(fence);
return false;
}
}
return true;
}
/** /**
* dma_fence_enable_sw_signaling - enable signaling on fence * dma_fence_enable_sw_signaling - enable signaling on fence
* @fence: the fence to enable * @fence: the fence to enable
@ -285,19 +309,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{ {
unsigned long flags; unsigned long flags;
if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
&fence->flags) && return;
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
fence->ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
spin_lock_irqsave(fence->lock, flags); spin_lock_irqsave(fence->lock, flags);
__dma_fence_enable_signaling(fence);
if (!fence->ops->enable_signaling(fence)) spin_unlock_irqrestore(fence->lock, flags);
dma_fence_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
}
} }
EXPORT_SYMBOL(dma_fence_enable_sw_signaling); EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
@ -331,7 +348,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
{ {
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
bool was_set;
if (WARN_ON(!fence || !func)) if (WARN_ON(!fence || !func))
return -EINVAL; return -EINVAL;
@ -343,25 +359,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
spin_lock_irqsave(fence->lock, flags); spin_lock_irqsave(fence->lock, flags);
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, if (__dma_fence_enable_signaling(fence)) {
&fence->flags);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
ret = -ENOENT;
else if (!was_set && fence->ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
dma_fence_signal_locked(fence);
ret = -ENOENT;
}
}
if (!ret) {
cb->func = func; cb->func = func;
list_add_tail(&cb->node, &fence->cb_list); list_add_tail(&cb->node, &fence->cb_list);
} else } else {
INIT_LIST_HEAD(&cb->node); INIT_LIST_HEAD(&cb->node);
ret = -ENOENT;
}
spin_unlock_irqrestore(fence->lock, flags); spin_unlock_irqrestore(fence->lock, flags);
return ret; return ret;
@ -461,7 +466,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
struct default_wait_cb cb; struct default_wait_cb cb;
unsigned long flags; unsigned long flags;
signed long ret = timeout ? timeout : 1; signed long ret = timeout ? timeout : 1;
bool was_set;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return ret; return ret;
@ -473,21 +477,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
goto out; goto out;
} }
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, if (!__dma_fence_enable_signaling(fence))
&fence->flags);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
goto out; goto out;
if (!was_set && fence->ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
dma_fence_signal_locked(fence);
goto out;
}
}
if (!timeout) { if (!timeout) {
ret = 0; ret = 0;
goto out; goto out;

View File

@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
if (err) { if (err) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"request_irq failed with err %d\n", err); "request_irq failed with err %d\n", err);
goto err_unregister; goto err_free;
} }
platform_set_drvdata(pdev, hsdma); platform_set_drvdata(pdev, hsdma);
@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return 0; return 0;
err_free:
of_dma_controller_free(pdev->dev.of_node);
err_unregister: err_unregister:
dma_async_device_unregister(dd); dma_async_device_unregister(dd);

View File

@ -488,8 +488,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags); spin_lock_irqsave(&chan->vchan.lock, flags);
if (chan->busy) { if (chan->desc) {
stm32_dma_stop(chan); vchan_terminate_vdesc(&chan->desc->vdesc);
if (chan->busy)
stm32_dma_stop(chan);
chan->desc = NULL; chan->desc = NULL;
} }
@ -545,6 +547,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
if (!vdesc) if (!vdesc)
return; return;
list_del(&vdesc->node);
chan->desc = to_stm32_dma_desc(vdesc); chan->desc = to_stm32_dma_desc(vdesc);
chan->next_sg = 0; chan->next_sg = 0;
} }
@ -622,7 +626,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
} else { } else {
chan->busy = false; chan->busy = false;
if (chan->next_sg == chan->desc->num_sgs) { if (chan->next_sg == chan->desc->num_sgs) {
list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc); vchan_cookie_complete(&chan->desc->vdesc);
chan->desc = NULL; chan->desc = NULL;
} }

View File

@ -1127,6 +1127,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
return; return;
} }
list_del(&vdesc->node);
chan->desc = to_stm32_mdma_desc(vdesc); chan->desc = to_stm32_mdma_desc(vdesc);
hwdesc = chan->desc->node[0].hwdesc; hwdesc = chan->desc->node[0].hwdesc;
chan->curr_hwdesc = 0; chan->curr_hwdesc = 0;
@ -1242,8 +1244,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c)
LIST_HEAD(head); LIST_HEAD(head);
spin_lock_irqsave(&chan->vchan.lock, flags); spin_lock_irqsave(&chan->vchan.lock, flags);
if (chan->busy) { if (chan->desc) {
stm32_mdma_stop(chan); vchan_terminate_vdesc(&chan->desc->vdesc);
if (chan->busy)
stm32_mdma_stop(chan);
chan->desc = NULL; chan->desc = NULL;
} }
vchan_get_all_descriptors(&chan->vchan, &head); vchan_get_all_descriptors(&chan->vchan, &head);
@ -1331,7 +1335,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
{ {
list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc); vchan_cookie_complete(&chan->desc->vdesc);
chan->desc = NULL; chan->desc = NULL;
chan->busy = false; chan->busy = false;

View File

@ -1287,8 +1287,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
if (tdc->busy) tegra_dma_terminate_all(dc);
tegra_dma_terminate_all(dc);
spin_lock_irqsave(&tdc->lock, flags); spin_lock_irqsave(&tdc->lock, flags);
list_splice_init(&tdc->pending_sg_req, &sg_req_list); list_splice_init(&tdc->pending_sg_req, &sg_req_list);

View File

@ -123,10 +123,12 @@
/* Max transfer size per descriptor */ /* Max transfer size per descriptor */
#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
/* Max burst lengths */
#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U
#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U
/* Reset values for data attributes */ /* Reset values for data attributes */
#define ZYNQMP_DMA_AXCACHE_VAL 0xF #define ZYNQMP_DMA_AXCACHE_VAL 0xF
#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF
#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF
#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
@ -534,17 +536,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
{ {
u32 val; u32 val, burst_val;
val = readl(chan->regs + ZYNQMP_DMA_CTRL0); val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
val |= ZYNQMP_DMA_POINT_TYPE_SG; val |= ZYNQMP_DMA_POINT_TYPE_SG;
writel(val, chan->regs + ZYNQMP_DMA_CTRL0); writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
burst_val = __ilog2_u32(chan->src_burst_len);
val = (val & ~ZYNQMP_DMA_ARLEN) | val = (val & ~ZYNQMP_DMA_ARLEN) |
(chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
burst_val = __ilog2_u32(chan->dst_burst_len);
val = (val & ~ZYNQMP_DMA_AWLEN) | val = (val & ~ZYNQMP_DMA_AWLEN) |
(chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
} }
@ -560,8 +564,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan,
{ {
struct zynqmp_dma_chan *chan = to_chan(dchan); struct zynqmp_dma_chan *chan = to_chan(dchan);
chan->src_burst_len = config->src_maxburst; chan->src_burst_len = clamp(config->src_maxburst, 1U,
chan->dst_burst_len = config->dst_maxburst; ZYNQMP_DMA_MAX_SRC_BURST_LEN);
chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
ZYNQMP_DMA_MAX_DST_BURST_LEN);
return 0; return 0;
} }
@ -887,8 +893,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
return PTR_ERR(chan->regs); return PTR_ERR(chan->regs);
chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
if (err < 0) { if (err < 0) {
dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); dev_err(&pdev->dev, "missing xlnx,bus-width property\n");

Some files were not shown because too many files have changed in this diff Show More