1
0
Fork 0

This is the 5.4.3 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl3zQekACgkQONu9yGCS
 aT4v0xAAi39hk/9NMVQkOVJEAw7KtrqaidZ21c8DIZKwEEdmh5cgVTKjuF+PggXR
 VrSPSMuvCK9Xp4p4ZUPNr6WFYATxfUINZFvRc5gh1c9XtHrxjlWX9oi4XQ4a3zWC
 56Ak17gCt42eWcArHUslPQLFnq3XBDARs7qK8gv+6Vxt8w/3EaPkm4MKB8nGkdnK
 ItfkxGWnkKyUnwsE/01p1iR+LrbxnALXS+OM1izWBBA6ex2Zl6QDmmwZXXgKHOVt
 RAdO04t8k9jKrydo43DF/icUrYZuLbpIHaKIV6T0YH1Hco5qTZAz5+REmZelcEDH
 /vBNVxTVma5vAdYQsztEITv+qKNEkM2i+xoSoGierE8EPbRqo/ed3tDNdNxTmMcq
 zku+yztWm5VGicSbsGKRElwXt8+eyduIyEHyGKt5AON7n0NRDBmGl+HCnEU7Agoy
 XKJ+aDHjWK4AAYZL9osZ0MQ3QF55Dpaylijjnihc9VlMTVOTDY49MD8XSfoj0VFq
 pOdH7NWNDvh5eJfd+DQQHMtlMxN2dMppRlclh/ecXZ+2r6oKB/E11D9lzCguFZtQ
 HR1vDHl6HpnP08MXn++AZj0/hCx+zS/GzXJccGqjP5GWH10BAA+Kfig+ooo/t9We
 SJc8WcySU+0lmJN8yegcEintDyv/HkPQzJApRw3P0iL0t8oN8Ac=
 =8yzT
 -----END PGP SIGNATURE-----

Merge linux-5.4.y tag 'v5.4.3' into lf-5.4.y

This is the 5.4.3 stable release

 Conflicts:
	drivers/cpufreq/imx-cpufreq-dt.c
	drivers/spi/spi-fsl-qspi.c

The conflict is very minor, fixed it when do the merge. The imx-cpufreq-dt.c
is just one line code-style change, using upstream one, no any function change.

The spi-fsl-qspi.c has minor conflicts when merge upstream fixes: c69b17da53
spi: spi-fsl-qspi: Clear TDH bits in FLSHCR register

After merge, basic boot sanity test and basic qspi test been done on i.mx

Signed-off-by: Jason Liu <jason.hui.liu@nxp.com>
5.4-rM2-2.2.x-imx-squashed
Jason Liu 2019-12-16 10:22:36 +08:00
commit 622141309f
218 changed files with 2203 additions and 1270 deletions

View File

@ -265,8 +265,11 @@ time with the option "mds=". The valid arguments for this option are:
============ ============================================================= ============ =============================================================
Not specifying this option is equivalent to "mds=full". Not specifying this option is equivalent to "mds=full". For processors
that are affected by both TAA (TSX Asynchronous Abort) and MDS,
specifying just "mds=off" without an accompanying "tsx_async_abort=off"
will have no effect as the same mitigation is used for both
vulnerabilities.
Mitigation selection guide Mitigation selection guide
-------------------------- --------------------------

View File

@ -174,7 +174,10 @@ the option "tsx_async_abort=". The valid arguments for this option are:
CPU is not vulnerable to cross-thread TAA attacks. CPU is not vulnerable to cross-thread TAA attacks.
============ ============================================================= ============ =============================================================
Not specifying this option is equivalent to "tsx_async_abort=full". Not specifying this option is equivalent to "tsx_async_abort=full". For
processors that are affected by both TAA and MDS, specifying just
"tsx_async_abort=off" without an accompanying "mds=off" will have no
effect as the same mitigation is used for both vulnerabilities.
The kernel command line also allows to control the TSX feature using the The kernel command line also allows to control the TSX feature using the
parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used

View File

@ -2473,6 +2473,12 @@
SMT on vulnerable CPUs SMT on vulnerable CPUs
off - Unconditionally disable MDS mitigation off - Unconditionally disable MDS mitigation
On TAA-affected machines, mds=off can be prevented by
an active TAA mitigation as both vulnerabilities are
mitigated with the same mechanism so in order to disable
this mitigation, you need to specify tsx_async_abort=off
too.
Not specifying this option is equivalent to Not specifying this option is equivalent to
mds=full. mds=full.
@ -4931,6 +4937,11 @@
vulnerable to cross-thread TAA attacks. vulnerable to cross-thread TAA attacks.
off - Unconditionally disable TAA mitigation off - Unconditionally disable TAA mitigation
On MDS-affected machines, tsx_async_abort=off can be
prevented by an active MDS mitigation as both vulnerabilities
are mitigated with the same mechanism so in order to disable
this mitigation, you need to specify mds=off too.
Not specifying this option is equivalent to Not specifying this option is equivalent to
tsx_async_abort=full. On CPUs which are MDS affected tsx_async_abort=full. On CPUs which are MDS affected
and deploy MDS mitigation, TAA mitigation is not and deploy MDS mitigation, TAA mitigation is not

View File

@ -81,6 +81,12 @@ Optional properties:
Definition: Name of external front end module used. Some valid FEM names Definition: Name of external front end module used. Some valid FEM names
for example: "microsemi-lx5586", "sky85703-11" for example: "microsemi-lx5586", "sky85703-11"
and "sky85803" etc. and "sky85803" etc.
- qcom,snoc-host-cap-8bit-quirk:
Usage: Optional
Value type: <empty>
Definition: Quirk specifying that the firmware expects the 8bit version
of the host capability QMI request
Example (to supply PCI based wifi block details): Example (to supply PCI based wifi block details):

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 0 SUBLEVEL = 3
EXTRAVERSION = EXTRAVERSION =
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus

View File

@ -18,8 +18,8 @@
/ { / {
compatible = "samsung,exynos5433"; compatible = "samsung,exynos5433";
#address-cells = <1>; #address-cells = <2>;
#size-cells = <1>; #size-cells = <2>;
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
@ -311,7 +311,7 @@
compatible = "simple-bus"; compatible = "simple-bus";
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
ranges; ranges = <0x0 0x0 0x0 0x18000000>;
chipid@10000000 { chipid@10000000 {
compatible = "samsung,exynos4210-chipid"; compatible = "samsung,exynos4210-chipid";

View File

@ -12,8 +12,8 @@
/ { / {
compatible = "samsung,exynos7"; compatible = "samsung,exynos7";
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
#address-cells = <1>; #address-cells = <2>;
#size-cells = <1>; #size-cells = <2>;
aliases { aliases {
pinctrl0 = &pinctrl_alive; pinctrl0 = &pinctrl_alive;
@ -98,7 +98,7 @@
compatible = "simple-bus"; compatible = "simple-bus";
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
ranges; ranges = <0 0 0 0x18000000>;
chipid@10000000 { chipid@10000000 {
compatible = "samsung,exynos4210-chipid"; compatible = "samsung,exynos4210-chipid";

View File

@ -309,9 +309,8 @@
regulator-name = "VDD_12V"; regulator-name = "VDD_12V";
regulator-min-microvolt = <1200000>; regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>; regulator-max-microvolt = <1200000>;
gpio = <&gpio TEGRA194_MAIN_GPIO(A, 1) GPIO_ACTIVE_LOW>; gpio = <&gpio TEGRA194_MAIN_GPIO(A, 1) GPIO_ACTIVE_HIGH>;
regulator-boot-on; regulator-boot-on;
enable-active-low;
}; };
}; };
}; };

View File

@ -1612,7 +1612,7 @@
regulator-name = "VDD_HDMI_5V0"; regulator-name = "VDD_HDMI_5V0";
regulator-min-microvolt = <5000000>; regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>; regulator-max-microvolt = <5000000>;
gpio = <&exp1 12 GPIO_ACTIVE_LOW>; gpio = <&exp1 12 GPIO_ACTIVE_HIGH>;
enable-active-high; enable-active-high;
vin-supply = <&vdd_5v0_sys>; vin-supply = <&vdd_5v0_sys>;
}; };

View File

@ -62,8 +62,13 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
{ {
unsigned long ret, limit = current_thread_info()->addr_limit; unsigned long ret, limit = current_thread_info()->addr_limit;
/*
* Asynchronous I/O running in a kernel thread does not have the
* TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
* the user address before checking.
*/
if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
test_thread_flag(TIF_TAGGED_ADDR)) (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
addr = untagged_addr(addr); addr = untagged_addr(addr);
__chk_user_ptr(addr); __chk_user_ptr(addr);

View File

@ -152,9 +152,12 @@ void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
/* Patch sites */ /* Patch sites */
extern s32 patch__call_flush_count_cache; extern s32 patch__call_flush_count_cache;
extern s32 patch__flush_count_cache_return; extern s32 patch__flush_count_cache_return;
extern s32 patch__flush_link_stack_return;
extern s32 patch__call_kvm_flush_link_stack;
extern s32 patch__memset_nocache, patch__memcpy_nocache; extern s32 patch__memset_nocache, patch__memcpy_nocache;
extern long flush_count_cache; extern long flush_count_cache;
extern long kvm_flush_link_stack;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv); void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);

View File

@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
// Software required to flush count cache on context switch // Software required to flush count cache on context switch
#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull #define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
// Software required to flush link stack on context switch
#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
// Features enabled by default // Features enabled by default
#define SEC_FTR_DEFAULT \ #define SEC_FTR_DEFAULT \

View File

@ -537,6 +537,7 @@ flush_count_cache:
/* Save LR into r9 */ /* Save LR into r9 */
mflr r9 mflr r9
// Flush the link stack
.rept 64 .rept 64
bl .+4 bl .+4
.endr .endr
@ -546,6 +547,11 @@ flush_count_cache:
.balign 32 .balign 32
/* Restore LR */ /* Restore LR */
1: mtlr r9 1: mtlr r9
// If we're just flushing the link stack, return here
3: nop
patch_site 3b patch__flush_link_stack_return
li r9,0x7fff li r9,0x7fff
mtctr r9 mtctr r9

View File

@ -24,6 +24,7 @@ enum count_cache_flush_type {
COUNT_CACHE_FLUSH_HW = 0x4, COUNT_CACHE_FLUSH_HW = 0x4,
}; };
static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
static bool link_stack_flush_enabled;
bool barrier_nospec_enabled; bool barrier_nospec_enabled;
static bool no_nospec; static bool no_nospec;
@ -212,11 +213,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
if (ccd) if (ccd)
seq_buf_printf(&s, "Indirect branch cache disabled"); seq_buf_printf(&s, "Indirect branch cache disabled");
if (link_stack_flush_enabled)
seq_buf_printf(&s, ", Software link stack flush");
} else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
seq_buf_printf(&s, "Mitigation: Software count cache flush"); seq_buf_printf(&s, "Mitigation: Software count cache flush");
if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
seq_buf_printf(&s, " (hardware accelerated)"); seq_buf_printf(&s, " (hardware accelerated)");
if (link_stack_flush_enabled)
seq_buf_printf(&s, ", Software link stack flush");
} else if (btb_flush_enabled) { } else if (btb_flush_enabled) {
seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
} else { } else {
@ -377,18 +386,49 @@ static __init int stf_barrier_debugfs_init(void)
device_initcall(stf_barrier_debugfs_init); device_initcall(stf_barrier_debugfs_init);
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */
static void no_count_cache_flush(void)
{
count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
pr_info("count-cache-flush: software flush disabled.\n");
}
static void toggle_count_cache_flush(bool enable) static void toggle_count_cache_flush(bool enable)
{ {
if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) &&
!security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK))
enable = false;
if (!enable) {
patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
pr_info("count-cache-flush: software flush disabled.\n"); patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
#endif
pr_info("link-stack-flush: software flush disabled.\n");
link_stack_flush_enabled = false;
no_count_cache_flush();
return; return;
} }
// This enables the branch from _switch to flush_count_cache
patch_branch_site(&patch__call_flush_count_cache, patch_branch_site(&patch__call_flush_count_cache,
(u64)&flush_count_cache, BRANCH_SET_LINK); (u64)&flush_count_cache, BRANCH_SET_LINK);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
// This enables the branch from guest_exit_cont to kvm_flush_link_stack
patch_branch_site(&patch__call_kvm_flush_link_stack,
(u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
#endif
pr_info("link-stack-flush: software flush enabled.\n");
link_stack_flush_enabled = true;
// If we just need to flush the link stack, patch an early return
if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
no_count_cache_flush();
return;
}
if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
count_cache_flush_type = COUNT_CACHE_FLUSH_SW; count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
pr_info("count-cache-flush: full software flush sequence enabled.\n"); pr_info("count-cache-flush: full software flush sequence enabled.\n");
@ -407,11 +447,20 @@ void setup_count_cache_flush(void)
if (no_spectrev2 || cpu_mitigations_off()) { if (no_spectrev2 || cpu_mitigations_off()) {
if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
pr_warn("Spectre v2 mitigations not under software control, can't disable\n"); pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
enable = false; enable = false;
} }
/*
* There's no firmware feature flag/hypervisor bit to tell us we need to
* flush the link stack on context switch. So we set it here if we see
* either of the Spectre v2 mitigations that aim to protect userspace.
*/
if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
toggle_count_cache_flush(enable); toggle_count_cache_flush(enable);
} }

View File

@ -11,6 +11,7 @@
*/ */
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/code-patching-asm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/mmu.h> #include <asm/mmu.h>
@ -1487,6 +1488,13 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1: 1:
#endif /* CONFIG_KVM_XICS */ #endif /* CONFIG_KVM_XICS */
/*
* Possibly flush the link stack here, before we do a blr in
* guest_exit_short_path.
*/
1: nop
patch_site 1b patch__call_kvm_flush_link_stack
/* If we came in through the P9 short path, go back out to C now */ /* If we came in through the P9 short path, go back out to C now */
lwz r0, STACK_SLOT_SHORT_PATH(r1) lwz r0, STACK_SLOT_SHORT_PATH(r1)
cmpwi r0, 0 cmpwi r0, 0
@ -1963,6 +1971,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtlr r0 mtlr r0
blr blr
.balign 32
.global kvm_flush_link_stack
kvm_flush_link_stack:
/* Save LR into r0 */
mflr r0
/* Flush the link stack. On Power8 it's up to 32 entries in size. */
.rept 32
bl .+4
.endr
/* And on Power9 it's up to 64. */
BEGIN_FTR_SECTION
.rept 32
bl .+4
.endr
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
/* Restore LR */
mtlr r0
blr
kvmppc_guest_external: kvmppc_guest_external:
/* External interrupt, first check for host_ipi. If this is /* External interrupt, first check for host_ipi. If this is
* set, we know the host wants us out so let's do it now * set, we know the host wants us out so let's do it now

View File

@ -2005,6 +2005,10 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
pr_devel("Creating xive for partition\n"); pr_devel("Creating xive for partition\n");
/* Already there ? */
if (kvm->arch.xive)
return -EEXIST;
xive = kvmppc_xive_get_device(kvm, type); xive = kvmppc_xive_get_device(kvm, type);
if (!xive) if (!xive)
return -ENOMEM; return -ENOMEM;
@ -2014,12 +2018,6 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
xive->kvm = kvm; xive->kvm = kvm;
mutex_init(&xive->lock); mutex_init(&xive->lock);
/* Already there ? */
if (kvm->arch.xive)
ret = -EEXIST;
else
kvm->arch.xive = xive;
/* We use the default queue size set by the host */ /* We use the default queue size set by the host */
xive->q_order = xive_native_default_eq_shift(); xive->q_order = xive_native_default_eq_shift();
if (xive->q_order < PAGE_SHIFT) if (xive->q_order < PAGE_SHIFT)
@ -2039,6 +2037,7 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
if (ret) if (ret)
return ret; return ret;
kvm->arch.xive = xive;
return 0; return 0;
} }

View File

@ -50,6 +50,24 @@ static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio)
} }
} }
static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q,
u8 prio, __be32 *qpage,
u32 order, bool can_escalate)
{
int rc;
__be32 *qpage_prev = q->qpage;
rc = xive_native_configure_queue(vp_id, q, prio, qpage, order,
can_escalate);
if (rc)
return rc;
if (qpage_prev)
put_page(virt_to_page(qpage_prev));
return rc;
}
void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
@ -582,19 +600,14 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
q->guest_qaddr = 0; q->guest_qaddr = 0;
q->guest_qshift = 0; q->guest_qshift = 0;
rc = xive_native_configure_queue(xc->vp_id, q, priority, rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
NULL, 0, true); NULL, 0, true);
if (rc) { if (rc) {
pr_err("Failed to reset queue %d for VCPU %d: %d\n", pr_err("Failed to reset queue %d for VCPU %d: %d\n",
priority, xc->server_num, rc); priority, xc->server_num, rc);
return rc; return rc;
} }
if (q->qpage) {
put_page(virt_to_page(q->qpage));
q->qpage = NULL;
}
return 0; return 0;
} }
@ -624,12 +637,6 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
srcu_idx = srcu_read_lock(&kvm->srcu); srcu_idx = srcu_read_lock(&kvm->srcu);
gfn = gpa_to_gfn(kvm_eq.qaddr); gfn = gpa_to_gfn(kvm_eq.qaddr);
page = gfn_to_page(kvm, gfn);
if (is_error_page(page)) {
srcu_read_unlock(&kvm->srcu, srcu_idx);
pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
return -EINVAL;
}
page_size = kvm_host_page_size(kvm, gfn); page_size = kvm_host_page_size(kvm, gfn);
if (1ull << kvm_eq.qshift > page_size) { if (1ull << kvm_eq.qshift > page_size) {
@ -638,6 +645,13 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
return -EINVAL; return -EINVAL;
} }
page = gfn_to_page(kvm, gfn);
if (is_error_page(page)) {
srcu_read_unlock(&kvm->srcu, srcu_idx);
pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
return -EINVAL;
}
qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK); qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
srcu_read_unlock(&kvm->srcu, srcu_idx); srcu_read_unlock(&kvm->srcu, srcu_idx);
@ -653,8 +667,8 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
* OPAL level because the use of END ESBs is not supported by * OPAL level because the use of END ESBs is not supported by
* Linux. * Linux.
*/ */
rc = xive_native_configure_queue(xc->vp_id, q, priority, rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
(__be32 *) qaddr, kvm_eq.qshift, true); (__be32 *) qaddr, kvm_eq.qshift, true);
if (rc) { if (rc) {
pr_err("Failed to configure queue %d for VCPU %d: %d\n", pr_err("Failed to configure queue %d for VCPU %d: %d\n",
priority, xc->server_num, rc); priority, xc->server_num, rc);
@ -1081,7 +1095,6 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
dev->private = xive; dev->private = xive;
xive->dev = dev; xive->dev = dev;
xive->kvm = kvm; xive->kvm = kvm;
kvm->arch.xive = xive;
mutex_init(&xive->mapping_lock); mutex_init(&xive->mapping_lock);
mutex_init(&xive->lock); mutex_init(&xive->lock);
@ -1102,6 +1115,7 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
if (ret) if (ret)
return ret; return ret;
kvm->arch.xive = xive;
return 0; return 0;
} }

View File

@ -407,6 +407,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
} }
#define ioremap_nocache(X,Y) ioremap((X),(Y)) #define ioremap_nocache(X,Y) ioremap((X),(Y))
#define ioremap_uc(X,Y) ioremap((X),(Y))
#define ioremap_wc(X,Y) ioremap((X),(Y)) #define ioremap_wc(X,Y) ioremap((X),(Y))
#define ioremap_wt(X,Y) ioremap((X),(Y)) #define ioremap_wt(X,Y) ioremap((X),(Y))

View File

@ -172,7 +172,7 @@
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
.if \no_user_check == 0 .if \no_user_check == 0
/* coming from usermode? */ /* coming from usermode? */
testl $SEGMENT_RPL_MASK, PT_CS(%esp) testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp)
jz .Lend_\@ jz .Lend_\@
.endif .endif
/* On user-cr3? */ /* On user-cr3? */
@ -205,64 +205,76 @@
#define CS_FROM_ENTRY_STACK (1 << 31) #define CS_FROM_ENTRY_STACK (1 << 31)
#define CS_FROM_USER_CR3 (1 << 30) #define CS_FROM_USER_CR3 (1 << 30)
#define CS_FROM_KERNEL (1 << 29) #define CS_FROM_KERNEL (1 << 29)
#define CS_FROM_ESPFIX (1 << 28)
.macro FIXUP_FRAME .macro FIXUP_FRAME
/* /*
* The high bits of the CS dword (__csh) are used for CS_FROM_*. * The high bits of the CS dword (__csh) are used for CS_FROM_*.
* Clear them in case hardware didn't do this for us. * Clear them in case hardware didn't do this for us.
*/ */
andl $0x0000ffff, 3*4(%esp) andl $0x0000ffff, 4*4(%esp)
#ifdef CONFIG_VM86 #ifdef CONFIG_VM86
testl $X86_EFLAGS_VM, 4*4(%esp) testl $X86_EFLAGS_VM, 5*4(%esp)
jnz .Lfrom_usermode_no_fixup_\@ jnz .Lfrom_usermode_no_fixup_\@
#endif #endif
testl $SEGMENT_RPL_MASK, 3*4(%esp) testl $USER_SEGMENT_RPL_MASK, 4*4(%esp)
jnz .Lfrom_usermode_no_fixup_\@ jnz .Lfrom_usermode_no_fixup_\@
orl $CS_FROM_KERNEL, 3*4(%esp) orl $CS_FROM_KERNEL, 4*4(%esp)
/* /*
* When we're here from kernel mode; the (exception) stack looks like: * When we're here from kernel mode; the (exception) stack looks like:
* *
* 5*4(%esp) - <previous context> * 6*4(%esp) - <previous context>
* 4*4(%esp) - flags * 5*4(%esp) - flags
* 3*4(%esp) - cs * 4*4(%esp) - cs
* 2*4(%esp) - ip * 3*4(%esp) - ip
* 1*4(%esp) - orig_eax * 2*4(%esp) - orig_eax
* 0*4(%esp) - gs / function * 1*4(%esp) - gs / function
* 0*4(%esp) - fs
* *
* Lets build a 5 entry IRET frame after that, such that struct pt_regs * Lets build a 5 entry IRET frame after that, such that struct pt_regs
* is complete and in particular regs->sp is correct. This gives us * is complete and in particular regs->sp is correct. This gives us
* the original 5 enties as gap: * the original 6 enties as gap:
* *
* 12*4(%esp) - <previous context> * 14*4(%esp) - <previous context>
* 11*4(%esp) - gap / flags * 13*4(%esp) - gap / flags
* 10*4(%esp) - gap / cs * 12*4(%esp) - gap / cs
* 9*4(%esp) - gap / ip * 11*4(%esp) - gap / ip
* 8*4(%esp) - gap / orig_eax * 10*4(%esp) - gap / orig_eax
* 7*4(%esp) - gap / gs / function * 9*4(%esp) - gap / gs / function
* 6*4(%esp) - ss * 8*4(%esp) - gap / fs
* 5*4(%esp) - sp * 7*4(%esp) - ss
* 4*4(%esp) - flags * 6*4(%esp) - sp
* 3*4(%esp) - cs * 5*4(%esp) - flags
* 2*4(%esp) - ip * 4*4(%esp) - cs
* 1*4(%esp) - orig_eax * 3*4(%esp) - ip
* 0*4(%esp) - gs / function * 2*4(%esp) - orig_eax
* 1*4(%esp) - gs / function
* 0*4(%esp) - fs
*/ */
pushl %ss # ss pushl %ss # ss
pushl %esp # sp (points at ss) pushl %esp # sp (points at ss)
addl $6*4, (%esp) # point sp back at the previous context addl $7*4, (%esp) # point sp back at the previous context
pushl 6*4(%esp) # flags pushl 7*4(%esp) # flags
pushl 6*4(%esp) # cs pushl 7*4(%esp) # cs
pushl 6*4(%esp) # ip pushl 7*4(%esp) # ip
pushl 6*4(%esp) # orig_eax pushl 7*4(%esp) # orig_eax
pushl 6*4(%esp) # gs / function pushl 7*4(%esp) # gs / function
pushl 7*4(%esp) # fs
.Lfrom_usermode_no_fixup_\@: .Lfrom_usermode_no_fixup_\@:
.endm .endm
.macro IRET_FRAME .macro IRET_FRAME
/*
* We're called with %ds, %es, %fs, and %gs from the interrupted
* frame, so we shouldn't use them. Also, we may be in ESPFIX
* mode and therefore have a nonzero SS base and an offset ESP,
* so any attempt to access the stack needs to use SS. (except for
* accesses through %esp, which automatically use SS.)
*/
testl $CS_FROM_KERNEL, 1*4(%esp) testl $CS_FROM_KERNEL, 1*4(%esp)
jz .Lfinished_frame_\@ jz .Lfinished_frame_\@
@ -276,31 +288,40 @@
movl 5*4(%esp), %eax # (modified) regs->sp movl 5*4(%esp), %eax # (modified) regs->sp
movl 4*4(%esp), %ecx # flags movl 4*4(%esp), %ecx # flags
movl %ecx, -4(%eax) movl %ecx, %ss:-1*4(%eax)
movl 3*4(%esp), %ecx # cs movl 3*4(%esp), %ecx # cs
andl $0x0000ffff, %ecx andl $0x0000ffff, %ecx
movl %ecx, -8(%eax) movl %ecx, %ss:-2*4(%eax)
movl 2*4(%esp), %ecx # ip movl 2*4(%esp), %ecx # ip
movl %ecx, -12(%eax) movl %ecx, %ss:-3*4(%eax)
movl 1*4(%esp), %ecx # eax movl 1*4(%esp), %ecx # eax
movl %ecx, -16(%eax) movl %ecx, %ss:-4*4(%eax)
popl %ecx popl %ecx
lea -16(%eax), %esp lea -4*4(%eax), %esp
popl %eax popl %eax
.Lfinished_frame_\@: .Lfinished_frame_\@:
.endm .endm
.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 .macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
cld cld
.if \skip_gs == 0 .if \skip_gs == 0
PUSH_GS PUSH_GS
.endif .endif
FIXUP_FRAME
pushl %fs pushl %fs
pushl %eax
movl $(__KERNEL_PERCPU), %eax
movl %eax, %fs
.if \unwind_espfix > 0
UNWIND_ESPFIX_STACK
.endif
popl %eax
FIXUP_FRAME
pushl %es pushl %es
pushl %ds pushl %ds
pushl \pt_regs_ax pushl \pt_regs_ax
@ -313,8 +334,6 @@
movl $(__USER_DS), %edx movl $(__USER_DS), %edx
movl %edx, %ds movl %edx, %ds
movl %edx, %es movl %edx, %es
movl $(__KERNEL_PERCPU), %edx
movl %edx, %fs
.if \skip_gs == 0 .if \skip_gs == 0
SET_KERNEL_GS %edx SET_KERNEL_GS %edx
.endif .endif
@ -324,8 +343,8 @@
.endif .endif
.endm .endm
.macro SAVE_ALL_NMI cr3_reg:req .macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0
SAVE_ALL SAVE_ALL unwind_espfix=\unwind_espfix
BUG_IF_WRONG_CR3 BUG_IF_WRONG_CR3
@ -357,6 +376,7 @@
2: popl %es 2: popl %es
3: popl %fs 3: popl %fs
POP_GS \pop POP_GS \pop
IRET_FRAME
.pushsection .fixup, "ax" .pushsection .fixup, "ax"
4: movl $0, (%esp) 4: movl $0, (%esp)
jmp 1b jmp 1b
@ -395,7 +415,8 @@
.macro CHECK_AND_APPLY_ESPFIX .macro CHECK_AND_APPLY_ESPFIX
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) #define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8)
#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET
ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX
@ -1075,7 +1096,6 @@ restore_all:
/* Restore user state */ /* Restore user state */
RESTORE_REGS pop=4 # skip orig_eax/error_code RESTORE_REGS pop=4 # skip orig_eax/error_code
.Lirq_return: .Lirq_return:
IRET_FRAME
/* /*
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
* when returning from IPI handler and when returning from * when returning from IPI handler and when returning from
@ -1128,30 +1148,43 @@ ENDPROC(entry_INT80_32)
* We can't call C functions using the ESPFIX stack. This code reads * We can't call C functions using the ESPFIX stack. This code reads
* the high word of the segment base from the GDT and swiches to the * the high word of the segment base from the GDT and swiches to the
* normal stack and adjusts ESP with the matching offset. * normal stack and adjusts ESP with the matching offset.
*
* We might be on user CR3 here, so percpu data is not mapped and we can't
* access the GDT through the percpu segment. Instead, use SGDT to find
* the cpu_entry_area alias of the GDT.
*/ */
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
/* fixup the stack */ /* fixup the stack */
mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ pushl %ecx
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ subl $2*4, %esp
sgdt (%esp)
movl 2(%esp), %ecx /* GDT address */
/*
* Careful: ECX is a linear pointer, so we need to force base
* zero. %cs is the only known-linear segment we have right now.
*/
mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */
mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */
shl $16, %eax shl $16, %eax
addl $2*4, %esp
popl %ecx
addl %esp, %eax /* the adjusted stack pointer */ addl %esp, %eax /* the adjusted stack pointer */
pushl $__KERNEL_DS pushl $__KERNEL_DS
pushl %eax pushl %eax
lss (%esp), %esp /* switch to the normal stack segment */ lss (%esp), %esp /* switch to the normal stack segment */
#endif #endif
.endm .endm
.macro UNWIND_ESPFIX_STACK .macro UNWIND_ESPFIX_STACK
/* It's safe to clobber %eax, all other regs need to be preserved */
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
movl %ss, %eax movl %ss, %eax
/* see if on espfix stack */ /* see if on espfix stack */
cmpw $__ESPFIX_SS, %ax cmpw $__ESPFIX_SS, %ax
jne 27f jne .Lno_fixup_\@
movl $__KERNEL_DS, %eax
movl %eax, %ds
movl %eax, %es
/* switch to normal stack */ /* switch to normal stack */
FIXUP_ESPFIX_STACK FIXUP_ESPFIX_STACK
27: .Lno_fixup_\@:
#endif #endif
.endm .endm
@ -1341,11 +1374,6 @@ END(spurious_interrupt_bug)
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
ENTRY(xen_hypervisor_callback) ENTRY(xen_hypervisor_callback)
pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
/* /*
* Check to see if we got the event in the critical * Check to see if we got the event in the critical
* region in xen_iret_direct, after we've reenabled * region in xen_iret_direct, after we've reenabled
@ -1353,16 +1381,17 @@ ENTRY(xen_hypervisor_callback)
* iret instruction's behaviour where it delivers a * iret instruction's behaviour where it delivers a
* pending interrupt when enabling interrupts: * pending interrupt when enabling interrupts:
*/ */
movl PT_EIP(%esp), %eax cmpl $xen_iret_start_crit, (%esp)
cmpl $xen_iret_start_crit, %eax
jb 1f jb 1f
cmpl $xen_iret_end_crit, %eax cmpl $xen_iret_end_crit, (%esp)
jae 1f jae 1f
call xen_iret_crit_fixup
jmp xen_iret_crit_fixup 1:
pushl $-1 /* orig_ax = -1 => not a system call */
ENTRY(xen_do_upcall) SAVE_ALL
1: mov %esp, %eax ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
mov %esp, %eax
call xen_evtchn_do_upcall call xen_evtchn_do_upcall
#ifndef CONFIG_PREEMPTION #ifndef CONFIG_PREEMPTION
call xen_maybe_preempt_hcall call xen_maybe_preempt_hcall
@ -1449,10 +1478,9 @@ END(page_fault)
common_exception_read_cr2: common_exception_read_cr2:
/* the function address is in %gs's slot on the stack */ /* the function address is in %gs's slot on the stack */
SAVE_ALL switch_stacks=1 skip_gs=1 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
ENCODE_FRAME_POINTER ENCODE_FRAME_POINTER
UNWIND_ESPFIX_STACK
/* fixup %gs */ /* fixup %gs */
GS_TO_REG %ecx GS_TO_REG %ecx
@ -1474,9 +1502,8 @@ END(common_exception_read_cr2)
common_exception: common_exception:
/* the function address is in %gs's slot on the stack */ /* the function address is in %gs's slot on the stack */
SAVE_ALL switch_stacks=1 skip_gs=1 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
ENCODE_FRAME_POINTER ENCODE_FRAME_POINTER
UNWIND_ESPFIX_STACK
/* fixup %gs */ /* fixup %gs */
GS_TO_REG %ecx GS_TO_REG %ecx
@ -1515,6 +1542,10 @@ ENTRY(nmi)
ASM_CLAC ASM_CLAC
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
/*
* ESPFIX_SS is only ever set on the return to user path
* after we've switched to the entry stack.
*/
pushl %eax pushl %eax
movl %ss, %eax movl %ss, %eax
cmpw $__ESPFIX_SS, %ax cmpw $__ESPFIX_SS, %ax
@ -1550,6 +1581,11 @@ ENTRY(nmi)
movl %ebx, %esp movl %ebx, %esp
.Lnmi_return: .Lnmi_return:
#ifdef CONFIG_X86_ESPFIX32
testl $CS_FROM_ESPFIX, PT_CS(%esp)
jnz .Lnmi_from_espfix
#endif
CHECK_AND_APPLY_ESPFIX CHECK_AND_APPLY_ESPFIX
RESTORE_ALL_NMI cr3_reg=%edi pop=4 RESTORE_ALL_NMI cr3_reg=%edi pop=4
jmp .Lirq_return jmp .Lirq_return
@ -1557,23 +1593,42 @@ ENTRY(nmi)
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
.Lnmi_espfix_stack: .Lnmi_espfix_stack:
/* /*
* create the pointer to lss back * Create the pointer to LSS back
*/ */
pushl %ss pushl %ss
pushl %esp pushl %esp
addl $4, (%esp) addl $4, (%esp)
/* copy the iret frame of 12 bytes */
.rept 3 /* Copy the (short) IRET frame */
pushl 16(%esp) pushl 4*4(%esp) # flags
.endr pushl 4*4(%esp) # cs
pushl %eax pushl 4*4(%esp) # ip
SAVE_ALL_NMI cr3_reg=%edi
pushl %eax # orig_ax
SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1
ENCODE_FRAME_POINTER ENCODE_FRAME_POINTER
FIXUP_ESPFIX_STACK # %eax == %esp
/* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */
xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp)
xorl %edx, %edx # zero error code xorl %edx, %edx # zero error code
call do_nmi movl %esp, %eax # pt_regs pointer
jmp .Lnmi_from_sysenter_stack
.Lnmi_from_espfix:
RESTORE_ALL_NMI cr3_reg=%edi RESTORE_ALL_NMI cr3_reg=%edi
lss 12+4(%esp), %esp # back to espfix stack /*
* Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to
* fix up the gap and long frame:
*
* 3 - original frame (exception)
* 2 - ESPFIX block (above)
* 6 - gap (FIXUP_FRAME)
* 5 - long frame (FIXUP_FRAME)
* 1 - orig_ax
*/
lss (1+5+6)*4(%esp), %esp # back to espfix stack
jmp .Lirq_return jmp .Lirq_return
#endif #endif
END(nmi) END(nmi)

View File

@ -78,8 +78,12 @@ struct cpu_entry_area {
/* /*
* The GDT is just below entry_stack and thus serves (on x86_64) as * The GDT is just below entry_stack and thus serves (on x86_64) as
* a a read-only guard page. * a read-only guard page. On 32-bit the GDT must be writeable, so
* it needs an extra guard page.
*/ */
#ifdef CONFIG_X86_32
char guard_entry_stack[PAGE_SIZE];
#endif
struct entry_stack_page entry_stack_page; struct entry_stack_page entry_stack_page;
/* /*
@ -94,7 +98,6 @@ struct cpu_entry_area {
*/ */
struct cea_exception_stacks estacks; struct cea_exception_stacks estacks;
#endif #endif
#ifdef CONFIG_CPU_SUP_INTEL
/* /*
* Per CPU debug store for Intel performance monitoring. Wastes a * Per CPU debug store for Intel performance monitoring. Wastes a
* full page at the moment. * full page at the moment.
@ -105,11 +108,13 @@ struct cpu_entry_area {
* Reserve enough fixmap PTEs. * Reserve enough fixmap PTEs.
*/ */
struct debug_store_buffers cpu_debug_buffers; struct debug_store_buffers cpu_debug_buffers;
#endif
}; };
#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) #define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
/* Total size includes the readonly IDT mapping page as well: */
#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks); DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
@ -117,13 +122,14 @@ DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
extern void setup_cpu_entry_areas(void); extern void setup_cpu_entry_areas(void);
extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
/* Single page reserved for the readonly IDT mapping: */
#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
#define CPU_ENTRY_AREA_MAP_SIZE \ #define CPU_ENTRY_AREA_MAP_SIZE \
(CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE) (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
extern struct cpu_entry_area *get_cpu_entry_area(int cpu); extern struct cpu_entry_area *get_cpu_entry_area(int cpu);

View File

@ -509,7 +509,7 @@ static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
{ {
return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
} }
/* /*

View File

@ -44,11 +44,11 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
* Define this here and validate with BUILD_BUG_ON() in pgtable_32.c * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
* to avoid include recursion hell * to avoid include recursion hell
*/ */
#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) #define CPU_ENTRY_AREA_PAGES (NR_CPUS * 39)
#define CPU_ENTRY_AREA_BASE \ /* The +1 is for the readonly IDT page: */
((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \ #define CPU_ENTRY_AREA_BASE \
& PMD_MASK) ((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK)
#define LDT_BASE_ADDR \ #define LDT_BASE_ADDR \
((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)

View File

@ -31,6 +31,18 @@
*/ */
#define SEGMENT_RPL_MASK 0x3 #define SEGMENT_RPL_MASK 0x3
/*
* When running on Xen PV, the actual privilege level of the kernel is 1,
* not 0. Testing the Requested Privilege Level in a segment selector to
* determine whether the context is user mode or kernel mode with
* SEGMENT_RPL_MASK is wrong because the PV kernel's privilege level
* matches the 0x3 mask.
*
* Testing with USER_SEGMENT_RPL_MASK is valid for both native and Xen PV
* kernels because privilege level 2 is never used.
*/
#define USER_SEGMENT_RPL_MASK 0x2
/* User mode is privilege level 3: */ /* User mode is privilege level 3: */
#define USER_RPL 0x3 #define USER_RPL 0x3

View File

@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void); static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void); static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void); static void __init mds_select_mitigation(void);
static void __init mds_print_mitigation(void);
static void __init taa_select_mitigation(void); static void __init taa_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */ /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
@ -108,6 +109,12 @@ void __init check_bugs(void)
mds_select_mitigation(); mds_select_mitigation();
taa_select_mitigation(); taa_select_mitigation();
/*
* As MDS and TAA mitigations are inter-related, print MDS
* mitigation until after TAA mitigation selection is done.
*/
mds_print_mitigation();
arch_smt_update(); arch_smt_update();
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
@ -245,6 +252,12 @@ static void __init mds_select_mitigation(void)
(mds_nosmt || cpu_mitigations_auto_nosmt())) (mds_nosmt || cpu_mitigations_auto_nosmt()))
cpu_smt_disable(false); cpu_smt_disable(false);
} }
}
static void __init mds_print_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
return;
pr_info("%s\n", mds_strings[mds_mitigation]); pr_info("%s\n", mds_strings[mds_mitigation]);
} }
@ -304,8 +317,12 @@ static void __init taa_select_mitigation(void)
return; return;
} }
/* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */ /*
if (taa_mitigation == TAA_MITIGATION_OFF) * TAA mitigation via VERW is turned off if both
* tsx_async_abort=off and mds=off are specified.
*/
if (taa_mitigation == TAA_MITIGATION_OFF &&
mds_mitigation == MDS_MITIGATION_OFF)
goto out; goto out;
if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
@ -339,6 +356,15 @@ static void __init taa_select_mitigation(void)
if (taa_nosmt || cpu_mitigations_auto_nosmt()) if (taa_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false); cpu_smt_disable(false);
/*
* Update MDS mitigation, if necessary, as the mds_user_clear is
* now enabled for TAA mitigation.
*/
if (mds_mitigation == MDS_MITIGATION_OFF &&
boot_cpu_has_bug(X86_BUG_MDS)) {
mds_mitigation = MDS_MITIGATION_FULL;
mds_select_mitigation();
}
out: out:
pr_info("%s\n", taa_strings[taa_mitigation]); pr_info("%s\n", taa_strings[taa_mitigation]);
} }

View File

@ -65,6 +65,9 @@ struct x86_hw_tss doublefault_tss __cacheline_aligned = {
.ss = __KERNEL_DS, .ss = __KERNEL_DS,
.ds = __USER_DS, .ds = __USER_DS,
.fs = __KERNEL_PERCPU, .fs = __KERNEL_PERCPU,
#ifndef CONFIG_X86_32_LAZY_GS
.gs = __KERNEL_STACK_CANARY,
#endif
.__cr3 = __pa_nodebug(swapper_pg_dir), .__cr3 = __pa_nodebug(swapper_pg_dir),
}; };

View File

@ -571,6 +571,16 @@ ENTRY(initial_page_table)
# error "Kernel PMDs should be 1, 2 or 3" # error "Kernel PMDs should be 1, 2 or 3"
# endif # endif
.align PAGE_SIZE /* needs to be page-sized too */ .align PAGE_SIZE /* needs to be page-sized too */
#ifdef CONFIG_PAGE_TABLE_ISOLATION
/*
* PTI needs another page so sync_initial_pagetable() works correctly
* and does not scribble over the data which is placed behind the
* actual initial_page_table. See clone_pgd_range().
*/
.fill 1024, 4, 0
#endif
#endif #endif
.data .data

View File

@ -504,7 +504,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
r = -E2BIG; r = -E2BIG;
if (*nent >= maxnent) if (WARN_ON(*nent >= maxnent))
goto out; goto out;
do_host_cpuid(entry, function, 0); do_host_cpuid(entry, function, 0);
@ -810,6 +810,9 @@ out:
static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func, static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func,
int *nent, int maxnent, unsigned int type) int *nent, int maxnent, unsigned int type)
{ {
if (*nent >= maxnent)
return -E2BIG;
if (type == KVM_GET_EMULATED_CPUID) if (type == KVM_GET_EMULATED_CPUID)
return __do_cpuid_func_emulated(entry, func, nent, maxnent); return __do_cpuid_func_emulated(entry, func, nent, maxnent);

View File

@ -2418,6 +2418,16 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
entry_failure_code)) entry_failure_code))
return -EINVAL; return -EINVAL;
/*
* Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
* on nested VM-Exit, which can occur without actually running L2 and
* thus without hitting vmx_set_cr3(), e.g. if L1 is entering L2 with
* vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
* transition to HLT instead of running L2.
*/
if (enable_ept)
vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
/* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */ /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) && if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
is_pae_paging(vcpu)) { is_pae_paging(vcpu)) {

View File

@ -2995,6 +2995,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
bool update_guest_cr3 = true;
unsigned long guest_cr3; unsigned long guest_cr3;
u64 eptp; u64 eptp;
@ -3011,15 +3012,18 @@ void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
} }
if (enable_unrestricted_guest || is_paging(vcpu) || /* Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter. */
is_guest_mode(vcpu)) if (is_guest_mode(vcpu))
update_guest_cr3 = false;
else if (enable_unrestricted_guest || is_paging(vcpu))
guest_cr3 = kvm_read_cr3(vcpu); guest_cr3 = kvm_read_cr3(vcpu);
else else
guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
ept_load_pdptrs(vcpu); ept_load_pdptrs(vcpu);
} }
vmcs_writel(GUEST_CR3, guest_cr3); if (update_guest_cr3)
vmcs_writel(GUEST_CR3, guest_cr3);
} }
int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)

View File

@ -300,13 +300,14 @@ int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
int err; int err;
if (((value ^ smsr->values[slot].curr) & mask) == 0) value = (value & mask) | (smsr->values[slot].host & ~mask);
if (value == smsr->values[slot].curr)
return 0; return 0;
smsr->values[slot].curr = value;
err = wrmsrl_safe(shared_msrs_global.msrs[slot], value); err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
if (err) if (err)
return 1; return 1;
smsr->values[slot].curr = value;
if (!smsr->registered) { if (!smsr->registered) {
smsr->urn.on_user_return = kvm_on_user_return; smsr->urn.on_user_return = kvm_on_user_return;
user_return_notifier_register(&smsr->urn); user_return_notifier_register(&smsr->urn);
@ -1327,10 +1328,15 @@ static u64 kvm_get_arch_capabilities(void)
* If TSX is disabled on the system, guests are also mitigated against * If TSX is disabled on the system, guests are also mitigated against
* TAA and clear CPU buffer mitigation is not required for guests. * TAA and clear CPU buffer mitigation is not required for guests.
*/ */
if (boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM) && if (!boot_cpu_has(X86_FEATURE_RTM))
(data & ARCH_CAP_TSX_CTRL_MSR)) data &= ~ARCH_CAP_TAA_NO;
else if (!boot_cpu_has_bug(X86_BUG_TAA))
data |= ARCH_CAP_TAA_NO;
else if (data & ARCH_CAP_TSX_CTRL_MSR)
data &= ~ARCH_CAP_MDS_NO; data &= ~ARCH_CAP_MDS_NO;
/* KVM does not emulate MSR_IA32_TSX_CTRL. */
data &= ~ARCH_CAP_TSX_CTRL_MSR;
return data; return data;
} }
@ -4421,6 +4427,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_SET_NESTED_STATE: { case KVM_SET_NESTED_STATE: {
struct kvm_nested_state __user *user_kvm_nested_state = argp; struct kvm_nested_state __user *user_kvm_nested_state = argp;
struct kvm_nested_state kvm_state; struct kvm_nested_state kvm_state;
int idx;
r = -EINVAL; r = -EINVAL;
if (!kvm_x86_ops->set_nested_state) if (!kvm_x86_ops->set_nested_state)
@ -4444,7 +4451,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
&& !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
break; break;
idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
break; break;
} }
case KVM_GET_SUPPORTED_HV_CPUID: { case KVM_GET_SUPPORTED_HV_CPUID: {

View File

@ -178,7 +178,9 @@ static __init void setup_cpu_entry_area_ptes(void)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
unsigned long start, end; unsigned long start, end;
BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); /* The +1 is for the readonly IDT: */
BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
start = CPU_ENTRY_AREA_BASE; start = CPU_ENTRY_AREA_BASE;

View File

@ -197,7 +197,7 @@ void vmalloc_sync_all(void)
return; return;
for (address = VMALLOC_START & PMD_MASK; for (address = VMALLOC_START & PMD_MASK;
address >= TASK_SIZE_MAX && address < FIXADDR_TOP; address >= TASK_SIZE_MAX && address < VMALLOC_END;
address += PMD_SIZE) { address += PMD_SIZE) {
struct page *page; struct page *page;

View File

@ -588,6 +588,17 @@ static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
} }
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
/*
* Device [1022:7914]
* When in D0, PME# doesn't get asserted when plugging USB 2.0 device.
*/
static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev)
{
dev_info(&dev->dev, "PME# does not work under D0, disabling it\n");
dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme);
/* /*
* Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff] * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
* *

View File

@ -69,7 +69,7 @@ BEGIN {
lprefix1_expr = "\\((66|!F3)\\)" lprefix1_expr = "\\((66|!F3)\\)"
lprefix2_expr = "\\(F3\\)" lprefix2_expr = "\\(F3\\)"
lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)" lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
lprefix_expr = "\\((66|F2|F3)\\)" lprefix_expr = "\\((66|F2|F3)\\)"
max_lprefix = 4 max_lprefix = 4
@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
return add_flags(imm, mod) return add_flags(imm, mod)
} }
/^[0-9a-f]+\:/ { /^[0-9a-f]+:/ {
if (NR == 1) if (NR == 1)
next next
# get index # get index

View File

@ -126,10 +126,9 @@ hyper_iret:
.globl xen_iret_start_crit, xen_iret_end_crit .globl xen_iret_start_crit, xen_iret_end_crit
/* /*
* This is called by xen_hypervisor_callback in entry.S when it sees * This is called by xen_hypervisor_callback in entry_32.S when it sees
* that the EIP at the time of interrupt was between * that the EIP at the time of interrupt was between
* xen_iret_start_crit and xen_iret_end_crit. We're passed the EIP in * xen_iret_start_crit and xen_iret_end_crit.
* %eax so we can do a more refined determination of what to do.
* *
* The stack format at this point is: * The stack format at this point is:
* ---------------- * ----------------
@ -138,70 +137,46 @@ hyper_iret:
* eflags } outer exception info * eflags } outer exception info
* cs } * cs }
* eip } * eip }
* ---------------- <- edi (copy dest) * ----------------
* eax : outer eax if it hasn't been restored * eax : outer eax if it hasn't been restored
* ---------------- * ----------------
* eflags } nested exception info * eflags }
* cs } (no ss/esp because we're nested * cs } nested exception info
* eip } from the same ring) * eip }
* orig_eax }<- esi (copy src) * return address : (into xen_hypervisor_callback)
* - - - - - - - -
* fs }
* es }
* ds } SAVE_ALL state
* eax }
* : :
* ebx }<- esp
* ----------------
* *
* In order to deliver the nested exception properly, we need to shift * In order to deliver the nested exception properly, we need to discard the
* everything from the return addr up to the error code so it sits * nested exception frame such that when we handle the exception, we do it
* just under the outer exception info. This means that when we * in the context of the outer exception rather than starting a new one.
* handle the exception, we do it in the context of the outer
* exception rather than starting a new one.
* *
* The only caveat is that if the outer eax hasn't been restored yet * The only caveat is that if the outer eax hasn't been restored yet (i.e.
* (ie, it's still on stack), we need to insert its value into the * it's still on stack), we need to restore its value here.
* SAVE_ALL state before going on, since it's usermode state which we
* eventually need to restore.
*/ */
ENTRY(xen_iret_crit_fixup) ENTRY(xen_iret_crit_fixup)
/* /*
* Paranoia: Make sure we're really coming from kernel space. * Paranoia: Make sure we're really coming from kernel space.
* One could imagine a case where userspace jumps into the * One could imagine a case where userspace jumps into the
* critical range address, but just before the CPU delivers a * critical range address, but just before the CPU delivers a
* GP, it decides to deliver an interrupt instead. Unlikely? * PF, it decides to deliver an interrupt instead. Unlikely?
* Definitely. Easy to avoid? Yes. The Intel documents * Definitely. Easy to avoid? Yes.
* explicitly say that the reported EIP for a bad jump is the
* jump instruction itself, not the destination, but some
* virtual environments get this wrong.
*/ */
movl PT_CS(%esp), %ecx testb $2, 2*4(%esp) /* nested CS */
andl $SEGMENT_RPL_MASK, %ecx jnz 2f
cmpl $USER_RPL, %ecx
je 2f
lea PT_ORIG_EAX(%esp), %esi
lea PT_EFLAGS(%esp), %edi
/* /*
* If eip is before iret_restore_end then stack * If eip is before iret_restore_end then stack
* hasn't been restored yet. * hasn't been restored yet.
*/ */
cmp $iret_restore_end, %eax cmpl $iret_restore_end, 1*4(%esp)
jae 1f jae 1f
movl 0+4(%edi), %eax /* copy EAX (just above top of frame) */ movl 4*4(%esp), %eax /* load outer EAX */
movl %eax, PT_EAX(%esp) ret $4*4 /* discard nested EIP, CS, and EFLAGS as
* well as the just restored EAX */
lea ESP_OFFSET(%edi), %edi /* move dest up over saved regs */ 1:
ret $3*4 /* discard nested EIP, CS, and EFLAGS */
/* set up the copy */
1: std
mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */
rep movsl
cld
lea 4(%edi), %esp /* point esp to new frame */
2: jmp xen_do_upcall
2:
ret
END(xen_iret_crit_fixup)

View File

@ -1043,7 +1043,7 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
af_alg_free_resources(areq); af_alg_free_resources(areq);
sock_put(sk); sock_put(sk);
iocb->ki_complete(iocb, err ? err : resultlen, 0); iocb->ki_complete(iocb, err ? err : (int)resultlen, 0);
} }
EXPORT_SYMBOL_GPL(af_alg_async_cb); EXPORT_SYMBOL_GPL(af_alg_async_cb);

View File

@ -213,8 +213,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
drop_alg: drop_alg:
crypto_mod_put(alg); crypto_mod_put(alg);
if (err) if (err) {
kfree_skb(skb);
return err; return err;
}
return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid); return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
} }

View File

@ -328,8 +328,10 @@ int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
drop_alg: drop_alg:
crypto_mod_put(alg); crypto_mod_put(alg);
if (err) if (err) {
kfree_skb(skb);
return err; return err;
}
return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid); return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
} }

View File

@ -1284,10 +1284,11 @@ EXPORT_SYMBOL(ecc_point_mult_shamir);
static inline void ecc_swap_digits(const u64 *in, u64 *out, static inline void ecc_swap_digits(const u64 *in, u64 *out,
unsigned int ndigits) unsigned int ndigits)
{ {
const __be64 *src = (__force __be64 *)in;
int i; int i;
for (i = 0; i < ndigits; i++) for (i = 0; i < ndigits; i++)
out[i] = __swab64(in[ndigits - 1 - i]); out[i] = be64_to_cpu(src[ndigits - 1 - i]);
} }
static int __ecc_is_key_valid(const struct ecc_curve *curve, static int __ecc_is_key_valid(const struct ecc_curve *curve,

View File

@ -277,8 +277,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
return 0; return 0;
free_range: free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start; for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
page_addr -= PAGE_SIZE) {
bool ret; bool ret;
size_t index; size_t index;
@ -291,6 +290,8 @@ free_range:
WARN_ON(!ret); WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index); trace_binder_free_lru_end(alloc, index);
if (page_addr == start)
break;
continue; continue;
err_vm_insert_page_failed: err_vm_insert_page_failed:
@ -298,7 +299,8 @@ err_vm_insert_page_failed:
page->page_ptr = NULL; page->page_ptr = NULL;
err_alloc_page_failed: err_alloc_page_failed:
err_page_ptr_cleared: err_page_ptr_cleared:
; if (page_addr == start)
break;
} }
err_no_vma: err_no_vma:
if (mm) { if (mm) {
@ -681,17 +683,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct binder_buffer *buffer; struct binder_buffer *buffer;
mutex_lock(&binder_alloc_mmap_lock); mutex_lock(&binder_alloc_mmap_lock);
if (alloc->buffer) { if (alloc->buffer_size) {
ret = -EBUSY; ret = -EBUSY;
failure_string = "already mapped"; failure_string = "already mapped";
goto err_already_mapped; goto err_already_mapped;
} }
alloc->buffer = (void __user *)vma->vm_start;
mutex_unlock(&binder_alloc_mmap_lock);
alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
SZ_4M); SZ_4M);
mutex_unlock(&binder_alloc_mmap_lock);
alloc->buffer = (void __user *)vma->vm_start;
alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]), sizeof(alloc->pages[0]),
GFP_KERNEL); GFP_KERNEL);
@ -722,8 +724,9 @@ err_alloc_buf_struct_failed:
kfree(alloc->pages); kfree(alloc->pages);
alloc->pages = NULL; alloc->pages = NULL;
err_alloc_pages_failed: err_alloc_pages_failed:
mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer = NULL; alloc->buffer = NULL;
mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer_size = 0;
err_already_mapped: err_already_mapped:
mutex_unlock(&binder_alloc_mmap_lock); mutex_unlock(&binder_alloc_mmap_lock);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR, binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
@ -841,14 +844,20 @@ void binder_alloc_print_pages(struct seq_file *m,
int free = 0; int free = 0;
mutex_lock(&alloc->mutex); mutex_lock(&alloc->mutex);
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { /*
page = &alloc->pages[i]; * Make sure the binder_alloc is fully initialized, otherwise we might
if (!page->page_ptr) * read inconsistent state.
free++; */
else if (list_empty(&page->lru)) if (binder_alloc_get_vma(alloc) != NULL) {
active++; for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
else page = &alloc->pages[i];
lru++; if (!page->page_ptr)
free++;
else if (list_empty(&page->lru))
active++;
else
lru++;
}
} }
mutex_unlock(&alloc->mutex); mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);

View File

@ -1278,6 +1278,11 @@ struct bus_type platform_bus_type = {
}; };
EXPORT_SYMBOL_GPL(platform_bus_type); EXPORT_SYMBOL_GPL(platform_bus_type);
static inline int __platform_match(struct device *dev, const void *drv)
{
return platform_match(dev, (struct device_driver *)drv);
}
/** /**
* platform_find_device_by_driver - Find a platform device with a given * platform_find_device_by_driver - Find a platform device with a given
* driver. * driver.
@ -1288,7 +1293,7 @@ struct device *platform_find_device_by_driver(struct device *start,
const struct device_driver *drv) const struct device_driver *drv)
{ {
return bus_find_device(&platform_bus_type, start, drv, return bus_find_device(&platform_bus_type, start, drv,
(void *)platform_match); __platform_match);
} }
EXPORT_SYMBOL_GPL(platform_find_device_by_driver); EXPORT_SYMBOL_GPL(platform_find_device_by_driver);

View File

@ -1032,14 +1032,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
sockfd_put(sock); sockfd_put(sock);
return -ENOMEM; return -ENOMEM;
} }
config->socks = socks;
nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
if (!nsock) { if (!nsock) {
sockfd_put(sock); sockfd_put(sock);
return -ENOMEM; return -ENOMEM;
} }
config->socks = socks;
nsock->fallback_index = -1; nsock->fallback_index = -1;
nsock->dead = false; nsock->dead = false;
mutex_init(&nsock->tx_lock); mutex_init(&nsock->tx_lock);

View File

@ -591,6 +591,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
if (*ptr == 0xc0) { if (*ptr == 0xc0) {
BT_ERR("Short BCSP packet"); BT_ERR("Short BCSP packet");
kfree_skb(bcsp->rx_skb); kfree_skb(bcsp->rx_skb);
bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_START; bcsp->rx_state = BCSP_W4_PKT_START;
bcsp->rx_count = 0; bcsp->rx_count = 0;
} else } else
@ -606,6 +607,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) { bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
BT_ERR("Error in BCSP hdr checksum"); BT_ERR("Error in BCSP hdr checksum");
kfree_skb(bcsp->rx_skb); kfree_skb(bcsp->rx_skb);
bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0; bcsp->rx_count = 0;
continue; continue;
@ -630,6 +632,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bscp_get_crc(bcsp)); bscp_get_crc(bcsp));
kfree_skb(bcsp->rx_skb); kfree_skb(bcsp->rx_skb);
bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0; bcsp->rx_count = 0;
continue; continue;

View File

@ -621,13 +621,6 @@ static int ll_setup(struct hci_uart *hu)
serdev_device_set_flow_control(serdev, true); serdev_device_set_flow_control(serdev, true);
if (hu->oper_speed)
speed = hu->oper_speed;
else if (hu->proto->oper_speed)
speed = hu->proto->oper_speed;
else
speed = 0;
do { do {
/* Reset the Bluetooth device */ /* Reset the Bluetooth device */
gpiod_set_value_cansleep(lldev->enable_gpio, 0); gpiod_set_value_cansleep(lldev->enable_gpio, 0);
@ -639,20 +632,6 @@ static int ll_setup(struct hci_uart *hu)
return err; return err;
} }
if (speed) {
__le32 speed_le = cpu_to_le32(speed);
struct sk_buff *skb;
skb = __hci_cmd_sync(hu->hdev,
HCI_VS_UPDATE_UART_HCI_BAUDRATE,
sizeof(speed_le), &speed_le,
HCI_INIT_TIMEOUT);
if (!IS_ERR(skb)) {
kfree_skb(skb);
serdev_device_set_baudrate(serdev, speed);
}
}
err = download_firmware(lldev); err = download_firmware(lldev);
if (!err) if (!err)
break; break;
@ -677,7 +656,25 @@ static int ll_setup(struct hci_uart *hu)
} }
/* Operational speed if any */ /* Operational speed if any */
if (hu->oper_speed)
speed = hu->oper_speed;
else if (hu->proto->oper_speed)
speed = hu->proto->oper_speed;
else
speed = 0;
if (speed) {
__le32 speed_le = cpu_to_le32(speed);
struct sk_buff *skb;
skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
sizeof(speed_le), &speed_le,
HCI_INIT_TIMEOUT);
if (!IS_ERR(skb)) {
kfree_skb(skb);
serdev_device_set_baudrate(serdev, speed);
}
}
return 0; return 0;
} }

View File

@ -713,6 +713,10 @@ static int lp_set_timeout64(unsigned int minor, void __user *arg)
if (copy_from_user(karg, arg, sizeof(karg))) if (copy_from_user(karg, arg, sizeof(karg)))
return -EFAULT; return -EFAULT;
/* sparc64 suseconds_t is 32-bit only */
if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
karg[1] >>= 32;
return lp_set_timeout(minor, karg[0], karg[1]); return lp_set_timeout(minor, karg[0], karg[1]);
} }

View File

@ -933,6 +933,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct freq_attr *fattr = to_attr(attr); struct freq_attr *fattr = to_attr(attr);
ssize_t ret; ssize_t ret;
if (!fattr->show)
return -EIO;
down_read(&policy->rwsem); down_read(&policy->rwsem);
ret = fattr->show(policy, buf); ret = fattr->show(policy, buf);
up_read(&policy->rwsem); up_read(&policy->rwsem);
@ -947,6 +950,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct freq_attr *fattr = to_attr(attr); struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL; ssize_t ret = -EINVAL;
if (!fattr->store)
return -EIO;
/* /*
* cpus_read_trylock() is used here to work around a circular lock * cpus_read_trylock() is used here to work around a circular lock
* dependency problem with respect to the cpufreq_register_driver(). * dependency problem with respect to the cpufreq_register_driver().

View File

@ -52,7 +52,7 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
*/ */
if (mkt_segment == 0 && speed_grade == 0) { if (mkt_segment == 0 && speed_grade == 0) {
if (of_machine_is_compatible("fsl,imx8mm") || if (of_machine_is_compatible("fsl,imx8mm") ||
of_machine_is_compatible("fsl,imx8mq")) of_machine_is_compatible("fsl,imx8mq"))
speed_grade = 1; speed_grade = 1;
if (of_machine_is_compatible("fsl,imx8mn")) if (of_machine_is_compatible("fsl,imx8mn"))
speed_grade = 0xb; speed_grade = 0xb;

View File

@ -287,6 +287,7 @@ config CRYPTO_DEV_TALITOS
select CRYPTO_AUTHENC select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER select CRYPTO_BLKCIPHER
select CRYPTO_HASH select CRYPTO_HASH
select CRYPTO_LIB_DES
select HW_RANDOM select HW_RANDOM
depends on FSL_SOC depends on FSL_SOC
help help

View File

@ -365,12 +365,8 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
dma_alloc_coherent(dev->core_dev->device, dma_alloc_coherent(dev->core_dev->device,
PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD, PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
&dev->scatter_buffer_pa, GFP_ATOMIC); &dev->scatter_buffer_pa, GFP_ATOMIC);
if (!dev->scatter_buffer_va) { if (!dev->scatter_buffer_va)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_sd) * PPC4XX_NUM_SD,
dev->sdr, dev->sdr_pa);
return -ENOMEM; return -ENOMEM;
}
for (i = 0; i < PPC4XX_NUM_SD; i++) { for (i = 0; i < PPC4XX_NUM_SD; i++) {
dev->sdr[i].ptr = dev->scatter_buffer_pa + dev->sdr[i].ptr = dev->scatter_buffer_pa +

View File

@ -490,6 +490,29 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err); static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
#endif #endif
static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
{
struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
if (req->nbytes < ivsize)
return;
if (rctx->mode & AES_FLAGS_ENCRYPT) {
scatterwalk_map_and_copy(req->info, req->dst,
req->nbytes - ivsize, ivsize, 0);
} else {
if (req->src == req->dst)
memcpy(req->info, rctx->lastc, ivsize);
else
scatterwalk_map_and_copy(req->info, req->src,
req->nbytes - ivsize,
ivsize, 0);
}
}
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
{ {
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC #ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
@ -500,26 +523,8 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
clk_disable(dd->iclk); clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY; dd->flags &= ~AES_FLAGS_BUSY;
if (!dd->ctx->is_aead) { if (!dd->ctx->is_aead)
struct ablkcipher_request *req = atmel_aes_set_iv_as_last_ciphertext_block(dd);
ablkcipher_request_cast(dd->areq);
struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct crypto_ablkcipher *ablkcipher =
crypto_ablkcipher_reqtfm(req);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
if (rctx->mode & AES_FLAGS_ENCRYPT) {
scatterwalk_map_and_copy(req->info, req->dst,
req->nbytes - ivsize, ivsize, 0);
} else {
if (req->src == req->dst) {
memcpy(req->info, rctx->lastc, ivsize);
} else {
scatterwalk_map_and_copy(req->info, req->src,
req->nbytes - ivsize, ivsize, 0);
}
}
}
if (dd->is_async) if (dd->is_async)
dd->areq->complete(dd->areq, err); dd->areq->complete(dd->areq, err);
@ -1125,10 +1130,12 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx->mode = mode; rctx->mode = mode;
if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) { if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
int ivsize = crypto_ablkcipher_ivsize(ablkcipher); unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
scatterwalk_map_and_copy(rctx->lastc, req->src, if (req->nbytes >= ivsize)
(req->nbytes - ivsize), ivsize, 0); scatterwalk_map_and_copy(rctx->lastc, req->src,
req->nbytes - ivsize,
ivsize, 0);
} }
return atmel_aes_handle_queue(dd, &req->base); return atmel_aes_handle_queue(dd, &req->base);

View File

@ -342,6 +342,7 @@ static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
desc->tx_desc.flags = flags; desc->tx_desc.flags = flags;
desc->tx_desc.tx_submit = ccp_tx_submit; desc->tx_desc.tx_submit = ccp_tx_submit;
desc->ccp = chan->ccp; desc->ccp = chan->ccp;
INIT_LIST_HEAD(&desc->entry);
INIT_LIST_HEAD(&desc->pending); INIT_LIST_HEAD(&desc->pending);
INIT_LIST_HEAD(&desc->active); INIT_LIST_HEAD(&desc->active);
desc->status = DMA_IN_PROGRESS; desc->status = DMA_IN_PROGRESS;

View File

@ -10,6 +10,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/aes.h> #include <crypto/aes.h>
#include <crypto/skcipher.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/delay.h> #include <linux/delay.h>
@ -166,13 +167,15 @@ static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
/* /*
* The requested key size is not supported by HW, do a fallback * The requested key size is not supported by HW, do a fallback
*/ */
op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; crypto_sync_skcipher_clear_flags(op->fallback.blk, CRYPTO_TFM_REQ_MASK);
op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); crypto_sync_skcipher_set_flags(op->fallback.blk,
tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
ret = crypto_blkcipher_setkey(op->fallback.blk, key, len); ret = crypto_sync_skcipher_setkey(op->fallback.blk, key, len);
if (ret) { if (ret) {
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK); tfm->crt_flags |= crypto_sync_skcipher_get_flags(op->fallback.blk) &
CRYPTO_TFM_RES_MASK;
} }
return ret; return ret;
} }
@ -181,33 +184,28 @@ static int fallback_blk_dec(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src, struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes) unsigned int nbytes)
{ {
unsigned int ret;
struct crypto_blkcipher *tfm;
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
tfm = desc->tfm; skcipher_request_set_sync_tfm(req, op->fallback.blk);
desc->tfm = op->fallback.blk; skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); return crypto_skcipher_decrypt(req);
desc->tfm = tfm;
return ret;
} }
static int fallback_blk_enc(struct blkcipher_desc *desc, static int fallback_blk_enc(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src, struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes) unsigned int nbytes)
{ {
unsigned int ret;
struct crypto_blkcipher *tfm;
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
tfm = desc->tfm; skcipher_request_set_sync_tfm(req, op->fallback.blk);
desc->tfm = op->fallback.blk; skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); return crypto_skcipher_encrypt(req);
desc->tfm = tfm;
return ret;
} }
static void static void
@ -307,6 +305,9 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk; struct blkcipher_walk walk;
int err, ret; int err, ret;
if (nbytes % AES_BLOCK_SIZE)
return -EINVAL;
if (unlikely(op->keylen != AES_KEYSIZE_128)) if (unlikely(op->keylen != AES_KEYSIZE_128))
return fallback_blk_dec(desc, dst, src, nbytes); return fallback_blk_dec(desc, dst, src, nbytes);
@ -339,6 +340,9 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk; struct blkcipher_walk walk;
int err, ret; int err, ret;
if (nbytes % AES_BLOCK_SIZE)
return -EINVAL;
if (unlikely(op->keylen != AES_KEYSIZE_128)) if (unlikely(op->keylen != AES_KEYSIZE_128))
return fallback_blk_enc(desc, dst, src, nbytes); return fallback_blk_enc(desc, dst, src, nbytes);
@ -366,9 +370,8 @@ static int fallback_init_blk(struct crypto_tfm *tfm)
const char *name = crypto_tfm_alg_name(tfm); const char *name = crypto_tfm_alg_name(tfm);
struct geode_aes_op *op = crypto_tfm_ctx(tfm); struct geode_aes_op *op = crypto_tfm_ctx(tfm);
op->fallback.blk = crypto_alloc_blkcipher(name, 0, op->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback.blk)) { if (IS_ERR(op->fallback.blk)) {
printk(KERN_ERR "Error allocating fallback algo %s\n", name); printk(KERN_ERR "Error allocating fallback algo %s\n", name);
return PTR_ERR(op->fallback.blk); return PTR_ERR(op->fallback.blk);
@ -381,7 +384,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
{ {
struct geode_aes_op *op = crypto_tfm_ctx(tfm); struct geode_aes_op *op = crypto_tfm_ctx(tfm);
crypto_free_blkcipher(op->fallback.blk); crypto_free_sync_skcipher(op->fallback.blk);
op->fallback.blk = NULL; op->fallback.blk = NULL;
} }
@ -420,6 +423,9 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk; struct blkcipher_walk walk;
int err, ret; int err, ret;
if (nbytes % AES_BLOCK_SIZE)
return -EINVAL;
if (unlikely(op->keylen != AES_KEYSIZE_128)) if (unlikely(op->keylen != AES_KEYSIZE_128))
return fallback_blk_dec(desc, dst, src, nbytes); return fallback_blk_dec(desc, dst, src, nbytes);
@ -450,6 +456,9 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk; struct blkcipher_walk walk;
int err, ret; int err, ret;
if (nbytes % AES_BLOCK_SIZE)
return -EINVAL;
if (unlikely(op->keylen != AES_KEYSIZE_128)) if (unlikely(op->keylen != AES_KEYSIZE_128))
return fallback_blk_enc(desc, dst, src, nbytes); return fallback_blk_enc(desc, dst, src, nbytes);

View File

@ -60,7 +60,7 @@ struct geode_aes_op {
u8 *iv; u8 *iv;
union { union {
struct crypto_blkcipher *blk; struct crypto_sync_skcipher *blk;
struct crypto_cipher *cip; struct crypto_cipher *cip;
} fallback; } fallback;
u32 keylen; u32 keylen;

View File

@ -221,9 +221,9 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
/* Step #3: Determine log2 of hash table size */ /* Step #3: Determine log2 of hash table size */
cs_ht_sz = __fls(asize - cs_rc_max) - 2; cs_ht_sz = __fls(asize - cs_rc_max) - 2;
/* Step #4: determine current size of hash table in dwords */ /* Step #4: determine current size of hash table in dwords */
cs_ht_wc = 16<<cs_ht_sz; /* dwords, not admin words */ cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
/* Step #5: add back excess words and see if we can fit more records */ /* Step #5: add back excess words and see if we can fit more records */
cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 4)); cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
/* Clear the cache RAMs */ /* Clear the cache RAMs */
eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc); eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);

View File

@ -26,9 +26,18 @@ struct ghes_edac_pvt {
char msg[80]; char msg[80];
}; };
static atomic_t ghes_init = ATOMIC_INIT(0); static refcount_t ghes_refcount = REFCOUNT_INIT(0);
/*
* Access to ghes_pvt must be protected by ghes_lock. The spinlock
* also provides the necessary (implicit) memory barrier for the SMP
* case to make the pointer visible on another CPU.
*/
static struct ghes_edac_pvt *ghes_pvt; static struct ghes_edac_pvt *ghes_pvt;
/* GHES registration mutex */
static DEFINE_MUTEX(ghes_reg_mutex);
/* /*
* Sync with other, potentially concurrent callers of * Sync with other, potentially concurrent callers of
* ghes_edac_report_mem_error(). We don't know what the * ghes_edac_report_mem_error(). We don't know what the
@ -79,9 +88,8 @@ static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
(*num_dimm)++; (*num_dimm)++;
} }
static int get_dimm_smbios_index(u16 handle) static int get_dimm_smbios_index(struct mem_ctl_info *mci, u16 handle)
{ {
struct mem_ctl_info *mci = ghes_pvt->mci;
int i; int i;
for (i = 0; i < mci->tot_dimms; i++) { for (i = 0; i < mci->tot_dimms; i++) {
@ -198,14 +206,11 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
enum hw_event_mc_err_type type; enum hw_event_mc_err_type type;
struct edac_raw_error_desc *e; struct edac_raw_error_desc *e;
struct mem_ctl_info *mci; struct mem_ctl_info *mci;
struct ghes_edac_pvt *pvt = ghes_pvt; struct ghes_edac_pvt *pvt;
unsigned long flags; unsigned long flags;
char *p; char *p;
u8 grain_bits; u8 grain_bits;
if (!pvt)
return;
/* /*
* We can do the locking below because GHES defers error processing * We can do the locking below because GHES defers error processing
* from NMI to IRQ context. Whenever that changes, we'd at least * from NMI to IRQ context. Whenever that changes, we'd at least
@ -216,6 +221,10 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
spin_lock_irqsave(&ghes_lock, flags); spin_lock_irqsave(&ghes_lock, flags);
pvt = ghes_pvt;
if (!pvt)
goto unlock;
mci = pvt->mci; mci = pvt->mci;
e = &mci->error_desc; e = &mci->error_desc;
@ -348,7 +357,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
p += sprintf(p, "DIMM DMI handle: 0x%.4x ", p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
mem_err->mem_dev_handle); mem_err->mem_dev_handle);
index = get_dimm_smbios_index(mem_err->mem_dev_handle); index = get_dimm_smbios_index(mci, mem_err->mem_dev_handle);
if (index >= 0) { if (index >= 0) {
e->top_layer = index; e->top_layer = index;
e->enable_per_layer_report = true; e->enable_per_layer_report = true;
@ -443,6 +452,8 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
grain_bits, e->syndrome, pvt->detail_location); grain_bits, e->syndrome, pvt->detail_location);
edac_raw_mc_handle_error(type, mci, e); edac_raw_mc_handle_error(type, mci, e);
unlock:
spin_unlock_irqrestore(&ghes_lock, flags); spin_unlock_irqrestore(&ghes_lock, flags);
} }
@ -457,10 +468,12 @@ static struct acpi_platform_list plat_list[] = {
int ghes_edac_register(struct ghes *ghes, struct device *dev) int ghes_edac_register(struct ghes *ghes, struct device *dev)
{ {
bool fake = false; bool fake = false;
int rc, num_dimm = 0; int rc = 0, num_dimm = 0;
struct mem_ctl_info *mci; struct mem_ctl_info *mci;
struct ghes_edac_pvt *pvt;
struct edac_mc_layer layers[1]; struct edac_mc_layer layers[1];
struct ghes_edac_dimm_fill dimm_fill; struct ghes_edac_dimm_fill dimm_fill;
unsigned long flags;
int idx = -1; int idx = -1;
if (IS_ENABLED(CONFIG_X86)) { if (IS_ENABLED(CONFIG_X86)) {
@ -472,11 +485,14 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
idx = 0; idx = 0;
} }
/* finish another registration/unregistration instance first */
mutex_lock(&ghes_reg_mutex);
/* /*
* We have only one logical memory controller to which all DIMMs belong. * We have only one logical memory controller to which all DIMMs belong.
*/ */
if (atomic_inc_return(&ghes_init) > 1) if (refcount_inc_not_zero(&ghes_refcount))
return 0; goto unlock;
/* Get the number of DIMMs */ /* Get the number of DIMMs */
dmi_walk(ghes_edac_count_dimms, &num_dimm); dmi_walk(ghes_edac_count_dimms, &num_dimm);
@ -494,12 +510,13 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt)); mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt));
if (!mci) { if (!mci) {
pr_info("Can't allocate memory for EDAC data\n"); pr_info("Can't allocate memory for EDAC data\n");
return -ENOMEM; rc = -ENOMEM;
goto unlock;
} }
ghes_pvt = mci->pvt_info; pvt = mci->pvt_info;
ghes_pvt->ghes = ghes; pvt->ghes = ghes;
ghes_pvt->mci = mci; pvt->mci = mci;
mci->pdev = dev; mci->pdev = dev;
mci->mtype_cap = MEM_FLAG_EMPTY; mci->mtype_cap = MEM_FLAG_EMPTY;
@ -541,23 +558,48 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
if (rc < 0) { if (rc < 0) {
pr_info("Can't register at EDAC core\n"); pr_info("Can't register at EDAC core\n");
edac_mc_free(mci); edac_mc_free(mci);
return -ENODEV; rc = -ENODEV;
goto unlock;
} }
return 0;
spin_lock_irqsave(&ghes_lock, flags);
ghes_pvt = pvt;
spin_unlock_irqrestore(&ghes_lock, flags);
/* only increment on success */
refcount_inc(&ghes_refcount);
unlock:
mutex_unlock(&ghes_reg_mutex);
return rc;
} }
void ghes_edac_unregister(struct ghes *ghes) void ghes_edac_unregister(struct ghes *ghes)
{ {
struct mem_ctl_info *mci; struct mem_ctl_info *mci;
unsigned long flags;
if (!ghes_pvt) mutex_lock(&ghes_reg_mutex);
return;
if (atomic_dec_return(&ghes_init)) if (!refcount_dec_and_test(&ghes_refcount))
return; goto unlock;
mci = ghes_pvt->mci; /*
* Wait for the irq handler being finished.
*/
spin_lock_irqsave(&ghes_lock, flags);
mci = ghes_pvt ? ghes_pvt->mci : NULL;
ghes_pvt = NULL; ghes_pvt = NULL;
edac_mc_del_mc(mci->pdev); spin_unlock_irqrestore(&ghes_lock, flags);
edac_mc_free(mci);
if (!mci)
goto unlock;
mci = edac_mc_del_mc(mci->pdev);
if (mci)
edac_mc_free(mci);
unlock:
mutex_unlock(&ghes_reg_mutex);
} }

View File

@ -212,8 +212,14 @@ retry:
drm_for_each_plane(plane, fb->dev) { drm_for_each_plane(plane, fb->dev) {
struct drm_plane_state *plane_state; struct drm_plane_state *plane_state;
if (plane->state->fb != fb) ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
if (ret)
goto out;
if (plane->state->fb != fb) {
drm_modeset_unlock(&plane->mutex);
continue; continue;
}
plane_state = drm_atomic_get_plane_state(state, plane); plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) { if (IS_ERR(plane_state)) {

View File

@ -728,7 +728,7 @@ static void i810_dma_dispatch_vertex(struct drm_device *dev,
if (nbox > I810_NR_SAREA_CLIPRECTS) if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS; nbox = I810_NR_SAREA_CLIPRECTS;
if (used > 4 * 1024) if (used < 0 || used > 4 * 1024)
used = 0; used = 0;
if (sarea_priv->dirty) if (sarea_priv->dirty)
@ -1048,7 +1048,7 @@ static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, in
if (u != I810_BUF_CLIENT) if (u != I810_BUF_CLIENT)
DRM_DEBUG("MC found buffer that isn't mine!\n"); DRM_DEBUG("MC found buffer that isn't mine!\n");
if (used > 4 * 1024) if (used < 0 || used > 4 * 1024)
used = 0; used = 0;
sarea_priv->dirty = 0x7f; sarea_priv->dirty = 0x7f;

View File

@ -484,7 +484,8 @@ static int mcde_probe(struct platform_device *pdev)
} }
if (!match) { if (!match) {
dev_err(dev, "no matching components\n"); dev_err(dev, "no matching components\n");
return -ENODEV; ret = -ENODEV;
goto clk_disable;
} }
if (IS_ERR(match)) { if (IS_ERR(match)) {
dev_err(dev, "could not create component match\n"); dev_err(dev, "could not create component match\n");

View File

@ -47,12 +47,8 @@ static int msm_gpu_release(struct inode *inode, struct file *file)
struct msm_gpu_show_priv *show_priv = m->private; struct msm_gpu_show_priv *show_priv = m->private;
struct msm_drm_private *priv = show_priv->dev->dev_private; struct msm_drm_private *priv = show_priv->dev->dev_private;
struct msm_gpu *gpu = priv->gpu; struct msm_gpu *gpu = priv->gpu;
int ret;
ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
if (ret)
return ret;
mutex_lock(&show_priv->dev->struct_mutex);
gpu->funcs->gpu_state_put(show_priv->state); gpu->funcs->gpu_state_put(show_priv->state);
mutex_unlock(&show_priv->dev->struct_mutex); mutex_unlock(&show_priv->dev->struct_mutex);

View File

@ -211,6 +211,18 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
return 0; /* we know nothing about this usage type */ return 0; /* we know nothing about this usage type */
} }
/*
* Concatenate usage which defines 16 bits or less with the
* currently defined usage page to form a 32 bit usage
*/
static void complete_usage(struct hid_parser *parser, unsigned int index)
{
parser->local.usage[index] &= 0xFFFF;
parser->local.usage[index] |=
(parser->global.usage_page & 0xFFFF) << 16;
}
/* /*
* Add a usage to the temporary parser table. * Add a usage to the temporary parser table.
*/ */
@ -222,6 +234,14 @@ static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
return -1; return -1;
} }
parser->local.usage[parser->local.usage_index] = usage; parser->local.usage[parser->local.usage_index] = usage;
/*
* If Usage item only includes usage id, concatenate it with
* currently defined usage page
*/
if (size <= 2)
complete_usage(parser, parser->local.usage_index);
parser->local.usage_size[parser->local.usage_index] = size; parser->local.usage_size[parser->local.usage_index] = size;
parser->local.collection_index[parser->local.usage_index] = parser->local.collection_index[parser->local.usage_index] =
parser->collection_stack_ptr ? parser->collection_stack_ptr ?
@ -543,13 +563,32 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
* usage value." * usage value."
*/ */
static void hid_concatenate_usage_page(struct hid_parser *parser) static void hid_concatenate_last_usage_page(struct hid_parser *parser)
{ {
int i; int i;
unsigned int usage_page;
unsigned int current_page;
for (i = 0; i < parser->local.usage_index; i++) if (!parser->local.usage_index)
if (parser->local.usage_size[i] <= 2) return;
parser->local.usage[i] += parser->global.usage_page << 16;
usage_page = parser->global.usage_page;
/*
* Concatenate usage page again only if last declared Usage Page
* has not been already used in previous usages concatenation
*/
for (i = parser->local.usage_index - 1; i >= 0; i--) {
if (parser->local.usage_size[i] > 2)
/* Ignore extended usages */
continue;
current_page = parser->local.usage[i] >> 16;
if (current_page == usage_page)
break;
complete_usage(parser, i);
}
} }
/* /*
@ -561,7 +600,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
__u32 data; __u32 data;
int ret; int ret;
hid_concatenate_usage_page(parser); hid_concatenate_last_usage_page(parser);
data = item_udata(item); data = item_udata(item);
@ -772,7 +811,7 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
__u32 data; __u32 data;
int i; int i;
hid_concatenate_usage_page(parser); hid_concatenate_last_usage_page(parser);
data = item_udata(item); data = item_udata(item);

View File

@ -652,10 +652,13 @@ static ssize_t cyc_threshold_store(struct device *dev,
if (kstrtoul(buf, 16, &val)) if (kstrtoul(buf, 16, &val))
return -EINVAL; return -EINVAL;
/* mask off max threshold before checking min value */
val &= ETM_CYC_THRESHOLD_MASK;
if (val < drvdata->ccitmin) if (val < drvdata->ccitmin)
return -EINVAL; return -EINVAL;
config->ccctlr = val & ETM_CYC_THRESHOLD_MASK; config->ccctlr = val;
return size; return size;
} }
static DEVICE_ATTR_RW(cyc_threshold); static DEVICE_ATTR_RW(cyc_threshold);
@ -686,14 +689,16 @@ static ssize_t bb_ctrl_store(struct device *dev,
return -EINVAL; return -EINVAL;
if (!drvdata->nr_addr_cmp) if (!drvdata->nr_addr_cmp)
return -EINVAL; return -EINVAL;
/* /*
* Bit[7:0] selects which address range comparator is used for * Bit[8] controls include(1) / exclude(0), bits[0-7] select
* branch broadcast control. * individual range comparators. If include then at least 1
* range must be selected.
*/ */
if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp) if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
return -EINVAL; return -EINVAL;
config->bb_ctrl = val; config->bb_ctrl = val & GENMASK(8, 0);
return size; return size;
} }
static DEVICE_ATTR_RW(bb_ctrl); static DEVICE_ATTR_RW(bb_ctrl);
@ -1324,8 +1329,8 @@ static ssize_t seq_event_store(struct device *dev,
spin_lock(&drvdata->spinlock); spin_lock(&drvdata->spinlock);
idx = config->seq_idx; idx = config->seq_idx;
/* RST, bits[7:0] */ /* Seq control has two masks B[15:8] F[7:0] */
config->seq_ctrl[idx] = val & 0xFF; config->seq_ctrl[idx] = val & 0xFFFF;
spin_unlock(&drvdata->spinlock); spin_unlock(&drvdata->spinlock);
return size; return size;
} }
@ -1580,7 +1585,7 @@ static ssize_t res_ctrl_store(struct device *dev,
if (idx % 2 != 0) if (idx % 2 != 0)
/* PAIRINV, bit[21] */ /* PAIRINV, bit[21] */
val &= ~BIT(21); val &= ~BIT(21);
config->res_ctrl[idx] = val; config->res_ctrl[idx] = val & GENMASK(21, 0);
spin_unlock(&drvdata->spinlock); spin_unlock(&drvdata->spinlock);
return size; return size;
} }

View File

@ -301,6 +301,9 @@ static ssize_t qib_portattr_show(struct kobject *kobj,
struct qib_pportdata *ppd = struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj); container_of(kobj, struct qib_pportdata, pport_kobj);
if (!pattr->show)
return -EIO;
return pattr->show(ppd, buf); return pattr->show(ppd, buf);
} }
@ -312,6 +315,9 @@ static ssize_t qib_portattr_store(struct kobject *kobj,
struct qib_pportdata *ppd = struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj); container_of(kobj, struct qib_pportdata, pport_kobj);
if (!pattr->store)
return -EIO;
return pattr->store(ppd, buf, len); return pattr->store(ppd, buf, len);
} }

View File

@ -292,7 +292,7 @@ static int psxpad_spi_probe(struct spi_device *spi)
if (!pad) if (!pad)
return -ENOMEM; return -ENOMEM;
pdev = input_allocate_polled_device(); pdev = devm_input_allocate_polled_device(&spi->dev);
if (!pdev) { if (!pdev) {
dev_err(&spi->dev, "failed to allocate input device\n"); dev_err(&spi->dev, "failed to allocate input device\n");
return -ENOMEM; return -ENOMEM;

View File

@ -172,6 +172,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0071", /* T480 */ "LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */ "LEN0073", /* X1 Carbon G5 (Elantech) */
"LEN0091", /* X1 Carbon 6 */
"LEN0092", /* X1 Carbon 6 */ "LEN0092", /* X1 Carbon 6 */
"LEN0093", /* T480 */ "LEN0093", /* T480 */
"LEN0096", /* X280 */ "LEN0096", /* X280 */

View File

@ -1189,6 +1189,9 @@ int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw)
{ {
int ret; int ret;
f34->fn->rmi_dev->driver->set_irq_bits(f34->fn->rmi_dev,
f34->fn->irq_mask);
rmi_f34v7_read_queries_bl_version(f34); rmi_f34v7_read_queries_bl_version(f34);
f34->v7.image = fw->data; f34->v7.image = fw->data;

View File

@ -163,7 +163,6 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
/* prepare to write next block of bytes */ /* prepare to write next block of bytes */
cur_len -= SMB_MAX_COUNT; cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT; databuff += SMB_MAX_COUNT;
rmiaddr += SMB_MAX_COUNT;
} }
exit: exit:
mutex_unlock(&rmi_smb->page_mutex); mutex_unlock(&rmi_smb->page_mutex);
@ -215,7 +214,6 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
/* prepare to read next block of bytes */ /* prepare to read next block of bytes */
cur_len -= SMB_MAX_COUNT; cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT; databuff += SMB_MAX_COUNT;
rmiaddr += SMB_MAX_COUNT;
} }
retval = 0; retval = 0;

View File

@ -128,6 +128,15 @@ static const unsigned long goodix_irq_flags[] = {
*/ */
static const struct dmi_system_id rotated_screen[] = { static const struct dmi_system_id rotated_screen[] = {
#if defined(CONFIG_DMI) && defined(CONFIG_X86) #if defined(CONFIG_DMI) && defined(CONFIG_X86)
{
.ident = "Teclast X89",
.matches = {
/* tPAD is too generic, also match on bios date */
DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
},
},
{ {
.ident = "WinBook TW100", .ident = "WinBook TW100",
.matches = { .matches = {

View File

@ -657,7 +657,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK; hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK; hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
err = platform_get_irq_byname(pdev, "doorbell"); err = platform_get_irq_byname_optional(pdev, "doorbell");
if (err >= 0) if (err >= 0)
hsp->doorbell_irq = err; hsp->doorbell_irq = err;
@ -677,7 +677,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
if (!name) if (!name)
return -ENOMEM; return -ENOMEM;
err = platform_get_irq_byname(pdev, name); err = platform_get_irq_byname_optional(pdev, name);
if (err >= 0) { if (err >= 0) {
hsp->shared_irqs[i] = err; hsp->shared_irqs[i] = err;
count++; count++;

View File

@ -2700,21 +2700,18 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
} }
ret = -ENOMEM; ret = -ENOMEM;
cc->io_queue = alloc_workqueue("kcryptd_io/%s", cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
1, devname);
if (!cc->io_queue) { if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue"; ti->error = "Couldn't create kcryptd io queue";
goto bad; goto bad;
} }
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
cc->crypt_queue = alloc_workqueue("kcryptd/%s", cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
1, devname); 1, devname);
else else
cc->crypt_queue = alloc_workqueue("kcryptd/%s", cc->crypt_queue = alloc_workqueue("kcryptd/%s",
WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
num_online_cpus(), devname); num_online_cpus(), devname);
if (!cc->crypt_queue) { if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue"; ti->error = "Couldn't create kcryptd queue";

View File

@ -615,7 +615,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
tmp_dev = map_sector(mddev, zone, sector, &sector); tmp_dev = map_sector(mddev, zone, sector, &sector);
break; break;
default: default:
WARN("md/raid0:%s: Invalid layout\n", mdname(mddev)); WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
bio_io_error(bio); bio_io_error(bio);
return true; return true;
} }

View File

@ -191,7 +191,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
out_free_pages: out_free_pages:
while (--j >= 0) while (--j >= 0)
resync_free_pages(&rps[j * 2]); resync_free_pages(&rps[j]);
j = 0; j = 0;
out_free_bio: out_free_bio:

View File

@ -796,7 +796,11 @@ static int vivid_thread_vid_cap(void *data)
if (kthread_should_stop()) if (kthread_should_stop())
break; break;
mutex_lock(&dev->mutex); if (!mutex_trylock(&dev->mutex)) {
schedule_timeout_uninterruptible(1);
continue;
}
cur_jiffies = jiffies; cur_jiffies = jiffies;
if (dev->cap_seq_resync) { if (dev->cap_seq_resync) {
dev->jiffies_vid_cap = cur_jiffies; dev->jiffies_vid_cap = cur_jiffies;
@ -956,8 +960,6 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
/* shutdown control thread */ /* shutdown control thread */
vivid_grab_controls(dev, false); vivid_grab_controls(dev, false);
mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_cap); kthread_stop(dev->kthread_vid_cap);
dev->kthread_vid_cap = NULL; dev->kthread_vid_cap = NULL;
mutex_lock(&dev->mutex);
} }

View File

@ -143,7 +143,11 @@ static int vivid_thread_vid_out(void *data)
if (kthread_should_stop()) if (kthread_should_stop())
break; break;
mutex_lock(&dev->mutex); if (!mutex_trylock(&dev->mutex)) {
schedule_timeout_uninterruptible(1);
continue;
}
cur_jiffies = jiffies; cur_jiffies = jiffies;
if (dev->out_seq_resync) { if (dev->out_seq_resync) {
dev->jiffies_vid_out = cur_jiffies; dev->jiffies_vid_out = cur_jiffies;
@ -301,8 +305,6 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
/* shutdown control thread */ /* shutdown control thread */
vivid_grab_controls(dev, false); vivid_grab_controls(dev, false);
mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_out); kthread_stop(dev->kthread_vid_out);
dev->kthread_vid_out = NULL; dev->kthread_vid_out = NULL;
mutex_lock(&dev->mutex);
} }

View File

@ -141,7 +141,11 @@ static int vivid_thread_sdr_cap(void *data)
if (kthread_should_stop()) if (kthread_should_stop())
break; break;
mutex_lock(&dev->mutex); if (!mutex_trylock(&dev->mutex)) {
schedule_timeout_uninterruptible(1);
continue;
}
cur_jiffies = jiffies; cur_jiffies = jiffies;
if (dev->sdr_cap_seq_resync) { if (dev->sdr_cap_seq_resync) {
dev->jiffies_sdr_cap = cur_jiffies; dev->jiffies_sdr_cap = cur_jiffies;
@ -303,10 +307,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
} }
/* shutdown control thread */ /* shutdown control thread */
mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_sdr_cap); kthread_stop(dev->kthread_sdr_cap);
dev->kthread_sdr_cap = NULL; dev->kthread_sdr_cap = NULL;
mutex_lock(&dev->mutex);
} }
static void sdr_cap_buf_request_complete(struct vb2_buffer *vb) static void sdr_cap_buf_request_complete(struct vb2_buffer *vb)

View File

@ -223,9 +223,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_out_q)) if (vb2_is_streaming(&dev->vb_vid_out_q))
dev->can_loop_video = vivid_vid_can_loop(dev); dev->can_loop_video = vivid_vid_can_loop(dev);
if (dev->kthread_vid_cap)
return 0;
dev->vid_cap_seq_count = 0; dev->vid_cap_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__); dprintk(dev, 1, "%s\n", __func__);
for (i = 0; i < VIDEO_MAX_FRAME; i++) for (i = 0; i < VIDEO_MAX_FRAME; i++)

View File

@ -161,9 +161,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_cap_q)) if (vb2_is_streaming(&dev->vb_vid_cap_q))
dev->can_loop_video = vivid_vid_can_loop(dev); dev->can_loop_video = vivid_vid_can_loop(dev);
if (dev->kthread_vid_out)
return 0;
dev->vid_out_seq_count = 0; dev->vid_out_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__); dprintk(dev, 1, "%s\n", __func__);
if (dev->start_streaming_error) { if (dev->start_streaming_error) {

View File

@ -1598,8 +1598,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_unlock_irqrestore(&ictx->kc_lock, flags); spin_unlock_irqrestore(&ictx->kc_lock, flags);
/* send touchscreen events through input subsystem if touchpad data */ /* send touchscreen events through input subsystem if touchpad data */
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 && if (ictx->touch && len == 8 && buf[7] == 0x86) {
buf[7] == 0x86) {
imon_touch_event(ictx, buf); imon_touch_event(ictx, buf);
return; return;

View File

@ -564,7 +564,7 @@ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd)
datasize = 4; datasize = 4;
break; break;
case MCE_CMD_G_REVISION: case MCE_CMD_G_REVISION:
datasize = 2; datasize = 4;
break; break;
case MCE_RSP_EQWAKESUPPORT: case MCE_RSP_EQWAKESUPPORT:
case MCE_RSP_GETWAKESOURCE: case MCE_RSP_GETWAKESOURCE:
@ -600,14 +600,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
char *inout; char *inout;
u8 cmd, subcmd, *data; u8 cmd, subcmd, *data;
struct device *dev = ir->dev; struct device *dev = ir->dev;
int start, skip = 0;
u32 carrier, period; u32 carrier, period;
/* skip meaningless 0xb1 0x60 header bytes on orig receiver */ if (offset < 0 || offset >= buf_len)
if (ir->flags.microsoft_gen1 && !out && !offset)
skip = 2;
if (len <= skip)
return; return;
dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)", dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
@ -616,11 +611,32 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
inout = out ? "Request" : "Got"; inout = out ? "Request" : "Got";
start = offset + skip; cmd = buf[offset];
cmd = buf[start] & 0xff; subcmd = (offset + 1 < buf_len) ? buf[offset + 1] : 0;
subcmd = buf[start + 1] & 0xff; data = &buf[offset] + 2;
data = buf + start + 2;
/* Trace meaningless 0xb1 0x60 header bytes on original receiver */
if (ir->flags.microsoft_gen1 && !out && !offset) {
dev_dbg(dev, "MCE gen 1 header");
return;
}
/* Trace IR data header or trailer */
if (cmd != MCE_CMD_PORT_IR &&
(cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA) {
if (cmd == MCE_IRDATA_TRAILER)
dev_dbg(dev, "End of raw IR data");
else
dev_dbg(dev, "Raw IR data, %d pulse/space samples",
cmd & MCE_PACKET_LENGTH_MASK);
return;
}
/* Unexpected end of buffer? */
if (offset + len > buf_len)
return;
/* Decode MCE command/response */
switch (cmd) { switch (cmd) {
case MCE_CMD_NULL: case MCE_CMD_NULL:
if (subcmd == MCE_CMD_NULL) if (subcmd == MCE_CMD_NULL)
@ -644,7 +660,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
dev_dbg(dev, "Get hw/sw rev?"); dev_dbg(dev, "Get hw/sw rev?");
else else
dev_dbg(dev, "hw/sw rev %*ph", dev_dbg(dev, "hw/sw rev %*ph",
4, &buf[start + 2]); 4, &buf[offset + 2]);
break; break;
case MCE_CMD_RESUME: case MCE_CMD_RESUME:
dev_dbg(dev, "Device resume requested"); dev_dbg(dev, "Device resume requested");
@ -746,13 +762,6 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
default: default:
break; break;
} }
if (cmd == MCE_IRDATA_TRAILER)
dev_dbg(dev, "End of raw IR data");
else if ((cmd != MCE_CMD_PORT_IR) &&
((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
dev_dbg(dev, "Raw IR data, %d pulse/space samples",
cmd & MCE_PACKET_LENGTH_MASK);
#endif #endif
} }
@ -1136,32 +1145,62 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
} }
/* /*
* Handle PORT_SYS/IR command response received from the MCE device.
*
* Assumes single response with all its data (not truncated)
* in buf_in[]. The response itself determines its total length
* (mceusb_cmd_datasize() + 2) and hence the minimum size of buf_in[].
*
* We don't do anything but print debug spew for many of the command bits * We don't do anything but print debug spew for many of the command bits
* we receive from the hardware, but some of them are useful information * we receive from the hardware, but some of them are useful information
* we want to store so that we can use them. * we want to store so that we can use them.
*/ */
static void mceusb_handle_command(struct mceusb_dev *ir, int index) static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
{ {
u8 cmd = buf_in[0];
u8 subcmd = buf_in[1];
u8 *hi = &buf_in[2]; /* read only when required */
u8 *lo = &buf_in[3]; /* read only when required */
struct ir_raw_event rawir = {}; struct ir_raw_event rawir = {};
u8 hi = ir->buf_in[index + 1] & 0xff;
u8 lo = ir->buf_in[index + 2] & 0xff;
u32 carrier_cycles; u32 carrier_cycles;
u32 cycles_fix; u32 cycles_fix;
switch (ir->buf_in[index]) { if (cmd == MCE_CMD_PORT_SYS) {
/* the one and only 5-byte return value command */ switch (subcmd) {
case MCE_RSP_GETPORTSTATUS: /* the one and only 5-byte return value command */
if ((ir->buf_in[index + 4] & 0xff) == 0x00) case MCE_RSP_GETPORTSTATUS:
ir->txports_cabled |= 1 << hi; if (buf_in[5] == 0)
break; ir->txports_cabled |= 1 << *hi;
break;
/* 1-byte return value commands */
case MCE_RSP_EQEMVER:
ir->emver = *hi;
break;
/* No return value commands */
case MCE_RSP_CMD_ILLEGAL:
ir->need_reset = true;
break;
default:
break;
}
return;
}
if (cmd != MCE_CMD_PORT_IR)
return;
switch (subcmd) {
/* 2-byte return value commands */ /* 2-byte return value commands */
case MCE_RSP_EQIRTIMEOUT: case MCE_RSP_EQIRTIMEOUT:
ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT); ir->rc->timeout = US_TO_NS((*hi << 8 | *lo) * MCE_TIME_UNIT);
break; break;
case MCE_RSP_EQIRNUMPORTS: case MCE_RSP_EQIRNUMPORTS:
ir->num_txports = hi; ir->num_txports = *hi;
ir->num_rxports = lo; ir->num_rxports = *lo;
break; break;
case MCE_RSP_EQIRRXCFCNT: case MCE_RSP_EQIRRXCFCNT:
/* /*
@ -1174,7 +1213,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
*/ */
if (ir->carrier_report_enabled && ir->learning_active && if (ir->carrier_report_enabled && ir->learning_active &&
ir->pulse_tunit > 0) { ir->pulse_tunit > 0) {
carrier_cycles = (hi << 8 | lo); carrier_cycles = (*hi << 8 | *lo);
/* /*
* Adjust carrier cycle count by adding * Adjust carrier cycle count by adding
* 1 missed count per pulse "on" * 1 missed count per pulse "on"
@ -1192,24 +1231,24 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
break; break;
/* 1-byte return value commands */ /* 1-byte return value commands */
case MCE_RSP_EQEMVER:
ir->emver = hi;
break;
case MCE_RSP_EQIRTXPORTS: case MCE_RSP_EQIRTXPORTS:
ir->tx_mask = hi; ir->tx_mask = *hi;
break; break;
case MCE_RSP_EQIRRXPORTEN: case MCE_RSP_EQIRRXPORTEN:
ir->learning_active = ((hi & 0x02) == 0x02); ir->learning_active = ((*hi & 0x02) == 0x02);
if (ir->rxports_active != hi) { if (ir->rxports_active != *hi) {
dev_info(ir->dev, "%s-range (0x%x) receiver active", dev_info(ir->dev, "%s-range (0x%x) receiver active",
ir->learning_active ? "short" : "long", hi); ir->learning_active ? "short" : "long", *hi);
ir->rxports_active = hi; ir->rxports_active = *hi;
} }
break; break;
/* No return value commands */
case MCE_RSP_CMD_ILLEGAL: case MCE_RSP_CMD_ILLEGAL:
case MCE_RSP_TX_TIMEOUT: case MCE_RSP_TX_TIMEOUT:
ir->need_reset = true; ir->need_reset = true;
break; break;
default: default:
break; break;
} }
@ -1235,7 +1274,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]); ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]);
mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1, mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1,
ir->rem + 2, false); ir->rem + 2, false);
mceusb_handle_command(ir, i); if (i + ir->rem < buf_len)
mceusb_handle_command(ir, &ir->buf_in[i - 1]);
ir->parser_state = CMD_DATA; ir->parser_state = CMD_DATA;
break; break;
case PARSE_IRDATA: case PARSE_IRDATA:
@ -1264,15 +1304,22 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir->rem--; ir->rem--;
break; break;
case CMD_HEADER: case CMD_HEADER:
/* decode mce packets of the form (84),AA,BB,CC,DD */
/* IR data packets can span USB messages - rem */
ir->cmd = ir->buf_in[i]; ir->cmd = ir->buf_in[i];
if ((ir->cmd == MCE_CMD_PORT_IR) || if ((ir->cmd == MCE_CMD_PORT_IR) ||
((ir->cmd & MCE_PORT_MASK) != ((ir->cmd & MCE_PORT_MASK) !=
MCE_COMMAND_IRDATA)) { MCE_COMMAND_IRDATA)) {
/*
* got PORT_SYS, PORT_IR, or unknown
* command response prefix
*/
ir->parser_state = SUBCMD; ir->parser_state = SUBCMD;
continue; continue;
} }
/*
* got IR data prefix (0x80 + num_bytes)
* decode MCE packets of the form {0x83, AA, BB, CC}
* IR data packets can span USB messages
*/
ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK); ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
mceusb_dev_printdata(ir, ir->buf_in, buf_len, mceusb_dev_printdata(ir, ir->buf_in, buf_len,
i, ir->rem + 1, false); i, ir->rem + 1, false);
@ -1296,6 +1343,14 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
if (ir->parser_state != CMD_HEADER && !ir->rem) if (ir->parser_state != CMD_HEADER && !ir->rem)
ir->parser_state = CMD_HEADER; ir->parser_state = CMD_HEADER;
} }
/*
* Accept IR data spanning multiple rx buffers.
* Reject MCE command response spanning multiple rx buffers.
*/
if (ir->parser_state != PARSE_IRDATA || !ir->rem)
ir->parser_state = CMD_HEADER;
if (event) { if (event) {
dev_dbg(ir->dev, "processed IR data"); dev_dbg(ir->dev, "processed IR data");
ir_raw_event_handle(ir->rc); ir_raw_event_handle(ir->rc);

View File

@ -1773,6 +1773,7 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
set_bit(MSC_SCAN, dev->input_dev->mscbit); set_bit(MSC_SCAN, dev->input_dev->mscbit);
/* Pointer/mouse events */ /* Pointer/mouse events */
set_bit(INPUT_PROP_POINTING_STICK, dev->input_dev->propbit);
set_bit(EV_REL, dev->input_dev->evbit); set_bit(EV_REL, dev->input_dev->evbit);
set_bit(REL_X, dev->input_dev->relbit); set_bit(REL_X, dev->input_dev->relbit);
set_bit(REL_Y, dev->input_dev->relbit); set_bit(REL_Y, dev->input_dev->relbit);

View File

@ -538,6 +538,9 @@ static int flexcop_usb_probe(struct usb_interface *intf,
struct flexcop_device *fc = NULL; struct flexcop_device *fc = NULL;
int ret; int ret;
if (intf->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) { if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
err("out of memory\n"); err("out of memory\n");
return -ENOMEM; return -ENOMEM;

View File

@ -521,7 +521,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d)
{ {
u8 ircode[4]; u8 ircode[4];
cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4); if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0)
return 0;
if (ircode[2] || ircode[3]) if (ircode[2] || ircode[3])
rc_keydown(d->rc_dev, RC_PROTO_NEC, rc_keydown(d->rc_dev, RC_PROTO_NEC,

View File

@ -314,6 +314,10 @@ static int usbvision_v4l2_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock)) if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS; return -ERESTARTSYS;
if (usbvision->remove_pending) {
err_code = -ENODEV;
goto unlock;
}
if (usbvision->user) { if (usbvision->user) {
err_code = -EBUSY; err_code = -EBUSY;
} else { } else {
@ -377,6 +381,7 @@ unlock:
static int usbvision_v4l2_close(struct file *file) static int usbvision_v4l2_close(struct file *file)
{ {
struct usb_usbvision *usbvision = video_drvdata(file); struct usb_usbvision *usbvision = video_drvdata(file);
int r;
PDEBUG(DBG_IO, "close"); PDEBUG(DBG_IO, "close");
@ -391,9 +396,10 @@ static int usbvision_v4l2_close(struct file *file)
usbvision_scratch_free(usbvision); usbvision_scratch_free(usbvision);
usbvision->user--; usbvision->user--;
r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock); mutex_unlock(&usbvision->v4l2_lock);
if (usbvision->remove_pending) { if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__); printk(KERN_INFO "%s: Final disconnect\n", __func__);
usbvision_release(usbvision); usbvision_release(usbvision);
return 0; return 0;
@ -453,6 +459,9 @@ static int vidioc_querycap(struct file *file, void *priv,
{ {
struct usb_usbvision *usbvision = video_drvdata(file); struct usb_usbvision *usbvision = video_drvdata(file);
if (!usbvision->dev)
return -ENODEV;
strscpy(vc->driver, "USBVision", sizeof(vc->driver)); strscpy(vc->driver, "USBVision", sizeof(vc->driver));
strscpy(vc->card, strscpy(vc->card,
usbvision_device_data[usbvision->dev_model].model_string, usbvision_device_data[usbvision->dev_model].model_string,
@ -1061,6 +1070,11 @@ static int usbvision_radio_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock)) if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS; return -ERESTARTSYS;
if (usbvision->remove_pending) {
err_code = -ENODEV;
goto out;
}
err_code = v4l2_fh_open(file); err_code = v4l2_fh_open(file);
if (err_code) if (err_code)
goto out; goto out;
@ -1093,21 +1107,24 @@ out:
static int usbvision_radio_close(struct file *file) static int usbvision_radio_close(struct file *file)
{ {
struct usb_usbvision *usbvision = video_drvdata(file); struct usb_usbvision *usbvision = video_drvdata(file);
int r;
PDEBUG(DBG_IO, ""); PDEBUG(DBG_IO, "");
mutex_lock(&usbvision->v4l2_lock); mutex_lock(&usbvision->v4l2_lock);
/* Set packet size to 0 */ /* Set packet size to 0 */
usbvision->iface_alt = 0; usbvision->iface_alt = 0;
usb_set_interface(usbvision->dev, usbvision->iface, if (usbvision->dev)
usbvision->iface_alt); usb_set_interface(usbvision->dev, usbvision->iface,
usbvision->iface_alt);
usbvision_audio_off(usbvision); usbvision_audio_off(usbvision);
usbvision->radio = 0; usbvision->radio = 0;
usbvision->user--; usbvision->user--;
r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock); mutex_unlock(&usbvision->v4l2_lock);
if (usbvision->remove_pending) { if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__); printk(KERN_INFO "%s: Final disconnect\n", __func__);
v4l2_fh_release(file); v4l2_fh_release(file);
usbvision_release(usbvision); usbvision_release(usbvision);
@ -1539,6 +1556,7 @@ err_usb:
static void usbvision_disconnect(struct usb_interface *intf) static void usbvision_disconnect(struct usb_interface *intf)
{ {
struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf)); struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
int u;
PDEBUG(DBG_PROBE, ""); PDEBUG(DBG_PROBE, "");
@ -1555,13 +1573,14 @@ static void usbvision_disconnect(struct usb_interface *intf)
v4l2_device_disconnect(&usbvision->v4l2_dev); v4l2_device_disconnect(&usbvision->v4l2_dev);
usbvision_i2c_unregister(usbvision); usbvision_i2c_unregister(usbvision);
usbvision->remove_pending = 1; /* Now all ISO data will be ignored */ usbvision->remove_pending = 1; /* Now all ISO data will be ignored */
u = usbvision->user;
usb_put_dev(usbvision->dev); usb_put_dev(usbvision->dev);
usbvision->dev = NULL; /* USB device is no more */ usbvision->dev = NULL; /* USB device is no more */
mutex_unlock(&usbvision->v4l2_lock); mutex_unlock(&usbvision->v4l2_lock);
if (usbvision->user) { if (u) {
printk(KERN_INFO "%s: In use, disconnect pending\n", printk(KERN_INFO "%s: In use, disconnect pending\n",
__func__); __func__);
wake_up_interruptible(&usbvision->wait_frame); wake_up_interruptible(&usbvision->wait_frame);

View File

@ -2151,6 +2151,20 @@ static int uvc_probe(struct usb_interface *intf,
sizeof(dev->name) - len); sizeof(dev->name) - len);
} }
/* Initialize the media device. */
#ifdef CONFIG_MEDIA_CONTROLLER
dev->mdev.dev = &intf->dev;
strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
if (udev->serial)
strscpy(dev->mdev.serial, udev->serial,
sizeof(dev->mdev.serial));
usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
media_device_init(&dev->mdev);
dev->vdev.mdev = &dev->mdev;
#endif
/* Parse the Video Class control descriptor. */ /* Parse the Video Class control descriptor. */
if (uvc_parse_control(dev) < 0) { if (uvc_parse_control(dev) < 0) {
uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC " uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC "
@ -2171,19 +2185,7 @@ static int uvc_probe(struct usb_interface *intf,
"linux-uvc-devel mailing list.\n"); "linux-uvc-devel mailing list.\n");
} }
/* Initialize the media device and register the V4L2 device. */ /* Register the V4L2 device. */
#ifdef CONFIG_MEDIA_CONTROLLER
dev->mdev.dev = &intf->dev;
strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
if (udev->serial)
strscpy(dev->mdev.serial, udev->serial,
sizeof(dev->mdev.serial));
usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
media_device_init(&dev->mdev);
dev->vdev.mdev = &dev->mdev;
#endif
if (v4l2_device_register(&intf->dev, &dev->vdev) < 0) if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
goto error; goto error;

View File

@ -873,15 +873,16 @@ static const struct device_type mei_cl_device_type = {
/** /**
* mei_cl_bus_set_name - set device name for me client device * mei_cl_bus_set_name - set device name for me client device
* <controller>-<client device>
* Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
* *
* @cldev: me client device * @cldev: me client device
*/ */
static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev) static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
{ {
dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X", dev_set_name(&cldev->dev, "%s-%pUl",
cldev->name, dev_name(cldev->bus->dev),
mei_me_cl_uuid(cldev->me_cl), mei_me_cl_uuid(cldev->me_cl));
mei_me_cl_ver(cldev->me_cl));
} }
/** /**

View File

@ -81,6 +81,7 @@
#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */ #define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */
#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */ #define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */
#define MEI_DEV_ID_CMP_V 0xA3BA /* Comet Point Lake V */
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */

View File

@ -98,6 +98,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},

View File

@ -617,6 +617,7 @@ err_free_chan:
sl->tty = NULL; sl->tty = NULL;
tty->disc_data = NULL; tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags); clear_bit(SLF_INUSE, &sl->flags);
slc_free_netdev(sl->dev);
free_netdev(sl->dev); free_netdev(sl->dev);
err_exit: err_exit:

View File

@ -792,7 +792,7 @@ resubmit:
up); up);
usb_anchor_urb(urb, &up->rx_urbs); usb_anchor_urb(urb, &up->rx_urbs);
ret = usb_submit_urb(urb, GFP_KERNEL); ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0) { if (ret < 0) {
netdev_err(up->netdev, netdev_err(up->netdev,

View File

@ -594,15 +594,15 @@ static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
int i; int i;
for (i = 0; i < SJA1105_NUM_PORTS; i++) { for (i = 0; i < SJA1105_NUM_PORTS; i++) {
if (ports->role == XMII_MAC) if (ports[i].role == XMII_MAC)
continue; continue;
if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
priv->rgmii_rx_delay[i] = true; priv->rgmii_rx_delay[i] = true;
if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
priv->rgmii_tx_delay[i] = true; priv->rgmii_tx_delay[i] = true;
if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&

View File

@ -4392,6 +4392,7 @@ static int macb_remove(struct platform_device *pdev)
mdiobus_free(bp->mii_bus); mdiobus_free(bp->mii_bus);
unregister_netdev(dev); unregister_netdev(dev);
tasklet_kill(&bp->hresp_err_tasklet);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) { if (!pm_runtime_suspended(&pdev->dev)) {

View File

@ -544,7 +544,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
} }
qpl->id = id; qpl->id = id;
qpl->num_entries = pages; qpl->num_entries = 0;
qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL); qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
/* caller handles clean up */ /* caller handles clean up */
if (!qpl->pages) if (!qpl->pages)
@ -562,6 +562,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
/* caller handles clean up */ /* caller handles clean up */
if (err) if (err)
return -ENOMEM; return -ENOMEM;
qpl->num_entries++;
} }
priv->num_registered_pages += pages; priv->num_registered_pages += pages;

View File

@ -1516,6 +1516,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
rtl_lock_config_regs(tp); rtl_lock_config_regs(tp);
device_set_wakeup_enable(tp_to_dev(tp), wolopts); device_set_wakeup_enable(tp_to_dev(tp), wolopts);
tp->dev->wol_enabled = wolopts ? 1 : 0;
} }
static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@ -4118,7 +4119,7 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
r8168dp_hw_jumbo_enable(tp); r8168dp_hw_jumbo_enable(tp);
break; break;
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
r8168e_hw_jumbo_enable(tp); r8168e_hw_jumbo_enable(tp);
break; break;
default: default:

View File

@ -359,10 +359,11 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
} }
spin_unlock(&port->bc_queue.lock); spin_unlock(&port->bc_queue.lock);
schedule_work(&port->bc_work);
if (err) if (err)
goto free_nskb; goto free_nskb;
schedule_work(&port->bc_work);
return; return;
free_nskb: free_nskb:

View File

@ -62,8 +62,8 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev)
struct reset_control *reset = NULL; struct reset_control *reset = NULL;
if (mdiodev->dev.of_node) if (mdiodev->dev.of_node)
reset = devm_reset_control_get_exclusive(&mdiodev->dev, reset = of_reset_control_get_exclusive(mdiodev->dev.of_node,
"phy"); "phy");
if (IS_ERR(reset)) { if (IS_ERR(reset)) {
if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP) if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP)
reset = NULL; reset = NULL;
@ -107,6 +107,8 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev)
if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev) if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev)
return -EINVAL; return -EINVAL;
reset_control_put(mdiodev->reset_ctrl);
mdiodev->bus->mdio_map[mdiodev->addr] = NULL; mdiodev->bus->mdio_map[mdiodev->addr] = NULL;
return 0; return 0;

View File

@ -855,6 +855,7 @@ err_free_chan:
sl->tty = NULL; sl->tty = NULL;
tty->disc_data = NULL; tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags); clear_bit(SLF_INUSE, &sl->flags);
sl_free_netdev(sl->dev);
free_netdev(sl->dev); free_netdev(sl->dev);
err_exit: err_exit:

View File

@ -3490,7 +3490,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
struct ath10k_pci *ar_pci; struct ath10k_pci *ar_pci;
enum ath10k_hw_rev hw_rev; enum ath10k_hw_rev hw_rev;
struct ath10k_bus_params bus_params = {}; struct ath10k_bus_params bus_params = {};
bool pci_ps; bool pci_ps, is_qca988x = false;
int (*pci_soft_reset)(struct ath10k *ar); int (*pci_soft_reset)(struct ath10k *ar);
int (*pci_hard_reset)(struct ath10k *ar); int (*pci_hard_reset)(struct ath10k *ar);
u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
@ -3500,6 +3500,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
case QCA988X_2_0_DEVICE_ID: case QCA988X_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA988X; hw_rev = ATH10K_HW_QCA988X;
pci_ps = false; pci_ps = false;
is_qca988x = true;
pci_soft_reset = ath10k_pci_warm_reset; pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset; pci_hard_reset = ath10k_pci_qca988x_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
@ -3619,25 +3620,34 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_deinit_irq; goto err_deinit_irq;
} }
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
bus_params.link_can_suspend = true;
/* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
* fall off the bus during chip_reset. These chips have the same pci
* device id as the QCA9880 BR4A or 2R4E. So that's why the check.
*/
if (is_qca988x) {
bus_params.chip_id =
ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (bus_params.chip_id != 0xffffffff) {
if (!ath10k_pci_chip_is_supported(pdev->device,
bus_params.chip_id))
goto err_unsupported;
}
}
ret = ath10k_pci_chip_reset(ar); ret = ath10k_pci_chip_reset(ar);
if (ret) { if (ret) {
ath10k_err(ar, "failed to reset chip: %d\n", ret); ath10k_err(ar, "failed to reset chip: %d\n", ret);
goto err_free_irq; goto err_free_irq;
} }
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
bus_params.link_can_suspend = true;
bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (bus_params.chip_id == 0xffffffff) { if (bus_params.chip_id == 0xffffffff)
ath10k_err(ar, "failed to get chip id\n"); goto err_unsupported;
goto err_free_irq;
}
if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) { if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id))
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
pdev->device, bus_params.chip_id);
goto err_free_irq; goto err_free_irq;
}
ret = ath10k_core_register(ar, &bus_params); ret = ath10k_core_register(ar, &bus_params);
if (ret) { if (ret) {
@ -3647,6 +3657,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
return 0; return 0;
err_unsupported:
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
pdev->device, bus_params.chip_id);
err_free_irq: err_free_irq:
ath10k_pci_free_irq(ar); ath10k_pci_free_irq(ar);
ath10k_pci_rx_retry_sync(ar); ath10k_pci_rx_retry_sync(ar);

View File

@ -581,22 +581,29 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
{ {
struct wlfw_host_cap_resp_msg_v01 resp = {}; struct wlfw_host_cap_resp_msg_v01 resp = {};
struct wlfw_host_cap_req_msg_v01 req = {}; struct wlfw_host_cap_req_msg_v01 req = {};
struct qmi_elem_info *req_ei;
struct ath10k *ar = qmi->ar; struct ath10k *ar = qmi->ar;
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct qmi_txn txn; struct qmi_txn txn;
int ret; int ret;
req.daemon_support_valid = 1; req.daemon_support_valid = 1;
req.daemon_support = 0; req.daemon_support = 0;
ret = qmi_txn_init(&qmi->qmi_hdl, &txn, ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei,
wlfw_host_cap_resp_msg_v01_ei, &resp); &resp);
if (ret < 0) if (ret < 0)
goto out; goto out;
if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags))
req_ei = wlfw_host_cap_8bit_req_msg_v01_ei;
else
req_ei = wlfw_host_cap_req_msg_v01_ei;
ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
QMI_WLFW_HOST_CAP_REQ_V01, QMI_WLFW_HOST_CAP_REQ_V01,
WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN, WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
wlfw_host_cap_req_msg_v01_ei, &req); req_ei, &req);
if (ret < 0) { if (ret < 0) {
qmi_txn_cancel(&txn); qmi_txn_cancel(&txn);
ath10k_err(ar, "failed to send host capability request: %d\n", ret); ath10k_err(ar, "failed to send host capability request: %d\n", ret);

View File

@ -1988,6 +1988,28 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
{} {}
}; };
struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
daemon_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
daemon_support),
},
{}
};
struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = { struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
{ {
.data_type = QMI_STRUCT, .data_type = QMI_STRUCT,

View File

@ -575,6 +575,7 @@ struct wlfw_host_cap_req_msg_v01 {
#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189 #define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189
extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[]; extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
extern struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[];
struct wlfw_host_cap_resp_msg_v01 { struct wlfw_host_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp; struct qmi_response_type_v01 resp;

View File

@ -1261,6 +1261,15 @@ out:
return ret; return ret;
} }
static void ath10k_snoc_quirks_init(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct device *dev = &ar_snoc->dev->dev;
if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
}
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type) int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
{ {
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
@ -1678,6 +1687,8 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ar->ce_priv = &ar_snoc->ce; ar->ce_priv = &ar_snoc->ce;
msa_size = drv_data->msa_size; msa_size = drv_data->msa_size;
ath10k_snoc_quirks_init(ar);
ret = ath10k_snoc_resource_init(ar); ret = ath10k_snoc_resource_init(ar);
if (ret) { if (ret) {
ath10k_warn(ar, "failed to initialize resource: %d\n", ret); ath10k_warn(ar, "failed to initialize resource: %d\n", ret);

Some files were not shown because too many files have changed in this diff Show More