1
0
Fork 0

This is the 5.4.81 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl/HR0oACgkQONu9yGCS
 aT5jnQ//ZvbqP065egdcWW43TiTudZwFVS996V/thRT0KrNEIL7S8KfuCtCJ1m5r
 mPkM1yNs/hNj0tUY2U/GLbDaBk98qbSd7LVghdf/xI+CVlkjp7uhTIjnl0y9X/+S
 tQ4a+kboQJOUi1Q48cjB7L/J6ihtLcEOFUTZovd/CmPEMNFxULO9rwHcsCJYWrg+
 jyniyk0NApZ2tNK3wtpDgbA6+LfFMyxPXJh4aPHG0CrjGQxcIYh6udxqQZ17L236
 NQVBrwZiaeSlh7l1ISlnagVU7YQSQcIuHIngmMc5zlH9HGEbLKCgHUFpVFBCiTgu
 8CJfULWdD7sDRUoeIT4S126sZVQJZj5xDLB+pxa8YD6E3bNDqKD8tq1kmXHRM5vk
 tr42Ve7QhkBl2I94iyAa+yFSyDXyr6NWYuapgmYNGurqQKm1gtD7ndRqmDyaKTcQ
 yH60V5eRyvRvffjcNXcFjeJNtO86AFPCNIQ6NpyQIlci6OVWSuECKRlovgozdESd
 NSl3rA30jk0IacaP4USx3ZJ6u1OCMtfaCbBD27yATovARayUmHizi1+PgLZeRyIN
 P2SBkLOm8pMc0XxH+ZJGU8n1gQ7IOGlZWQ7xFH4GJJ0LqEZeMfjFDJcHmm7GHrlD
 TzCX+BYg2InR0dtcGivGT7OisdhE/kmPGlPdUh4fse9ypfnS7nw=
 =CUC6
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.81' into 5.4-2.2.x-imx

This is the 5.4.81 stable release

Conflicts (manual resolve):
- drivers/tee/optee/call.c:
Drop commit e0238fcd9f ("MLK-21698: tee:optee: fix shared memory
page attribute checks") from NXP in favor of 0e467f6af99f ("optee:
add writeback to valid memory type") from upstream as including the
WT-marked memory blocks is not compatible with OP-TEE design.
Link: https://lore.kernel.org/lkml/AM6PR06MB4691D4988AC57DD24424D40CA6F30@AM6PR06MB4691.eurprd06.prod.outlook.com/

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
5.4-rM2-2.2.x-imx-squashed
Andrey Zhizhikin 2020-12-02 19:09:30 +00:00
commit 4797de450c
104 changed files with 1013 additions and 457 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 80
SUBLEVEL = 81
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -135,8 +135,10 @@
#ifdef CONFIG_ARC_HAS_PAE40
#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
#define MAX_POSSIBLE_PHYSMEM_BITS 40
#else
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
/**************************************************************************

View File

@ -32,8 +32,8 @@
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "int0", "int1";
clocks = <&mcan_clk>, <&l3_iclk_div>;
clock-names = "cclk", "hclk";
clocks = <&l3_iclk_div>, <&mcan_clk>;
clock-names = "hclk", "cclk";
bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
};
};

View File

@ -75,6 +75,8 @@
#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
#define MAX_POSSIBLE_PHYSMEM_BITS 32
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
* PGDIR_SHIFT determines what a third-level page table entry can map

View File

@ -25,6 +25,8 @@
#define PTE_HWTABLE_OFF (0)
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
#define MAX_POSSIBLE_PHYSMEM_BITS 40
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
*/

View File

@ -175,8 +175,11 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (mpuss_can_lose_context) {
error = cpu_cluster_pm_enter();
if (error) {
omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
goto cpu_cluster_pm_out;
index = 0;
cx = state_ptr + index;
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
mpuss_can_lose_context = 0;
}
}
}
@ -184,7 +187,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
cpu_done[dev->cpu] = true;
cpu_cluster_pm_out:
/* Wakeup CPU1 only if it is not offlined */
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {

View File

@ -692,7 +692,7 @@
hsp_aon: hsp@c150000 {
compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp";
reg = <0x0c150000 0xa0000>;
reg = <0x0c150000 0x90000>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,

View File

@ -98,8 +98,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
#define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
@ -107,9 +105,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
* so that we don't erroneously return false for pages that have been
* remapped as PROT_NONE but are yet to be flushed from the TLB.
* Note that we can't make any assumptions based on the state of the access
* flag, since ptep_clear_flush_young() elides a DSB when invalidating the
* TLB.
*/
#define pte_accessible(mm, pte) \
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
/*
* p??_access_permitted() is true for valid user mappings (subject to the
@ -135,13 +136,6 @@ static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
return pte;
}
static inline pte_t pte_wrprotect(pte_t pte)
{
pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
}
static inline pte_t pte_mkwrite(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
@ -167,6 +161,20 @@ static inline pte_t pte_mkdirty(pte_t pte)
return pte;
}
static inline pte_t pte_wrprotect(pte_t pte)
{
/*
* If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
* clear), set the PTE_DIRTY bit.
*/
if (pte_hw_dirty(pte))
pte = pte_mkdirty(pte);
pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
return clear_pte_bit(pte, __pgprot(PTE_AF));
@ -787,12 +795,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
pte = READ_ONCE(*ptep);
do {
old_pte = pte;
/*
* If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
* clear), set the PTE_DIRTY bit.
*/
if (pte_hw_dirty(pte))
pte = pte_mkdirty(pte);
pte = pte_wrprotect(pte);
pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
pte_val(old_pte), pte_val(pte));

View File

@ -155,6 +155,7 @@ static inline void pmd_clear(pmd_t *pmdp)
#if defined(CONFIG_XPA)
#define MAX_POSSIBLE_PHYSMEM_BITS 40
#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
static inline pte_t
pfn_pte(unsigned long pfn, pgprot_t prot)
@ -170,6 +171,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#define MAX_POSSIBLE_PHYSMEM_BITS 36
#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
@ -184,6 +186,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
#else
#define MAX_POSSIBLE_PHYSMEM_BITS 32
#ifdef CONFIG_CPU_VR41XX
#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))

View File

@ -37,8 +37,10 @@ static inline bool pte_user(pte_t pte)
*/
#ifdef CONFIG_PTE_64BIT
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
#define MAX_POSSIBLE_PHYSMEM_BITS 36
#else
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
/*

View File

@ -54,6 +54,8 @@
#else /* !__ASSEMBLY__ */
#include <linux/jump_label.h>
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
#ifdef CONFIG_PPC_KUAP

View File

@ -148,8 +148,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
*/
#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
#define MAX_POSSIBLE_PHYSMEM_BITS 36
#else
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
/*

View File

@ -252,6 +252,13 @@ static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
}
state = &sb->irq_state[src];
/* Some sanity checking */
if (!state->valid) {
pr_devel("%s: source %lx invalid !\n", __func__, irq);
return VM_FAULT_SIGBUS;
}
kvmppc_xive_select_irq(state, &hw_num, &xd);
arch_spin_lock(&sb->lock);

View File

@ -14,4 +14,6 @@
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
#define MAX_POSSIBLE_PHYSMEM_BITS 34
#endif /* _ASM_RISCV_PGTABLE_32_H */

View File

@ -107,14 +107,14 @@
MODULE_LICENSE("GPL");
#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
static ssize_t __cstate_##_var##_show(struct kobject *kobj, \
struct kobj_attribute *attr, \
static ssize_t __cstate_##_var##_show(struct device *dev, \
struct device_attribute *attr, \
char *page) \
{ \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sprintf(page, _format "\n"); \
} \
static struct kobj_attribute format_attr_##_var = \
static struct device_attribute format_attr_##_var = \
__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
static ssize_t cstate_get_attr_cpumask(struct device *dev,

View File

@ -92,8 +92,8 @@ end:
return map;
}
ssize_t uncore_event_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
ssize_t uncore_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uncore_event_desc *event =
container_of(attr, struct uncore_event_desc, attr);

View File

@ -144,7 +144,7 @@ struct intel_uncore_box {
#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
struct uncore_event_desc {
struct kobj_attribute attr;
struct device_attribute attr;
const char *config;
};
@ -165,8 +165,8 @@ struct pci2phy_map {
struct pci2phy_map *__find_pci2phy_map(int segment);
int uncore_pcibus_to_physid(struct pci_bus *bus);
ssize_t uncore_event_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf);
ssize_t uncore_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
{ \
@ -175,14 +175,14 @@ ssize_t uncore_event_show(struct kobject *kobj,
}
#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
struct kobj_attribute *attr, \
static ssize_t __uncore_##_var##_show(struct device *dev, \
struct device_attribute *attr, \
char *page) \
{ \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sprintf(page, _format "\n"); \
} \
static struct kobj_attribute format_attr_##_var = \
static struct device_attribute format_attr_##_var = \
__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
static inline bool uncore_pmc_fixed(int idx)

View File

@ -93,18 +93,6 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
* any other bit is reserved
*/
#define RAPL_EVENT_MASK 0xFFULL
#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
struct kobj_attribute *attr, \
char *page) \
{ \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sprintf(page, _format "\n"); \
} \
static struct kobj_attribute format_attr_##_var = \
__ATTR(_name, 0444, __rapl_##_var##_show, NULL)
#define RAPL_CNTR_WIDTH 32
#define RAPL_EVENT_ATTR_STR(_name, v, str) \
@ -433,7 +421,7 @@ static struct attribute_group rapl_pmu_events_group = {
.attrs = attrs_empty,
};
DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
PMU_FORMAT_ATTR(event, "config:0-7");
static struct attribute *rapl_formats_attr[] = {
&format_attr_event.attr,
NULL,

View File

@ -1560,6 +1560,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_cpu_has_extint(struct kvm_vcpu *v);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);

View File

@ -733,11 +733,13 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
spectre_v2_user_ibpb = mode;
switch (cmd) {
case SPECTRE_V2_USER_CMD_FORCE:
case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
static_branch_enable(&switch_mm_always_ibpb);
spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
break;
case SPECTRE_V2_USER_CMD_PRCTL:
case SPECTRE_V2_USER_CMD_AUTO:
@ -751,8 +753,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
static_key_enabled(&switch_mm_always_ibpb) ?
"always-on" : "conditional");
spectre_v2_user_ibpb = mode;
}
/*

View File

@ -1361,8 +1361,10 @@ void do_machine_check(struct pt_regs *regs, long error_code)
* When there's any problem use only local no_way_out state.
*/
if (!lmce) {
if (mce_end(order) < 0)
no_way_out = worst >= MCE_PANIC_SEVERITY;
if (mce_end(order) < 0) {
if (!no_way_out)
no_way_out = worst >= MCE_PANIC_SEVERITY;
}
} else {
/*
* If there was a fatal machine check we should have

View File

@ -507,6 +507,24 @@ unlock:
return ret ?: nbytes;
}
/**
* rdtgroup_remove - the helper to remove resource group safely
* @rdtgrp: resource group to remove
*
* On resource group creation via a mkdir, an extra kernfs_node reference is
* taken to ensure that the rdtgroup structure remains accessible for the
* rdtgroup_kn_unlock() calls where it is removed.
*
* Drop the extra reference here, then free the rdtgroup structure.
*
* Return: void
*/
static void rdtgroup_remove(struct rdtgroup *rdtgrp)
{
kernfs_put(rdtgrp->kn);
kfree(rdtgrp);
}
struct task_move_callback {
struct callback_head work;
struct rdtgroup *rdtgrp;
@ -529,7 +547,7 @@ static void move_myself(struct callback_head *head)
(rdtgrp->flags & RDT_DELETED)) {
current->closid = 0;
current->rmid = 0;
kfree(rdtgrp);
rdtgroup_remove(rdtgrp);
}
preempt_disable();
@ -1618,7 +1636,6 @@ static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
if (IS_ERR(kn_subdir))
return PTR_ERR(kn_subdir);
kernfs_get(kn_subdir);
ret = rdtgroup_kn_set_ugid(kn_subdir);
if (ret)
return ret;
@ -1641,7 +1658,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
if (IS_ERR(kn_info))
return PTR_ERR(kn_info);
kernfs_get(kn_info);
ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
if (ret)
@ -1662,12 +1678,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
goto out_destroy;
}
/*
* This extra ref will be put in kernfs_remove() and guarantees
* that @rdtgrp->kn is always accessible.
*/
kernfs_get(kn_info);
ret = rdtgroup_kn_set_ugid(kn_info);
if (ret)
goto out_destroy;
@ -1696,12 +1706,6 @@ mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
if (dest_kn)
*dest_kn = kn;
/*
* This extra ref will be put in kernfs_remove() and guarantees
* that @rdtgrp->kn is always accessible.
*/
kernfs_get(kn);
ret = rdtgroup_kn_set_ugid(kn);
if (ret)
goto out_destroy;
@ -1928,8 +1932,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
rdtgroup_pseudo_lock_remove(rdtgrp);
kernfs_unbreak_active_protection(kn);
kernfs_put(rdtgrp->kn);
kfree(rdtgrp);
rdtgroup_remove(rdtgrp);
} else {
kernfs_unbreak_active_protection(kn);
}
@ -1988,13 +1991,11 @@ static int rdt_get_tree(struct fs_context *fc)
&kn_mongrp);
if (ret < 0)
goto out_info;
kernfs_get(kn_mongrp);
ret = mkdir_mondata_all(rdtgroup_default.kn,
&rdtgroup_default, &kn_mondata);
if (ret < 0)
goto out_mongrp;
kernfs_get(kn_mondata);
rdtgroup_default.mon.mon_data_kn = kn_mondata;
}
@ -2223,7 +2224,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
if (atomic_read(&sentry->waitcount) != 0)
sentry->flags = RDT_DELETED;
else
kfree(sentry);
rdtgroup_remove(sentry);
}
}
@ -2265,7 +2266,7 @@ static void rmdir_all_sub(void)
if (atomic_read(&rdtgrp->waitcount) != 0)
rdtgrp->flags = RDT_DELETED;
else
kfree(rdtgrp);
rdtgroup_remove(rdtgrp);
}
/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
update_closid_rmid(cpu_online_mask, &rdtgroup_default);
@ -2365,11 +2366,6 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
if (IS_ERR(kn))
return PTR_ERR(kn);
/*
* This extra ref will be put in kernfs_remove() and guarantees
* that kn is always accessible.
*/
kernfs_get(kn);
ret = rdtgroup_kn_set_ugid(kn);
if (ret)
goto out_destroy;
@ -2705,8 +2701,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
/*
* kernfs_remove() will drop the reference count on "kn" which
* will free it. But we still need it to stick around for the
* rdtgroup_kn_unlock(kn} call below. Take one extra reference
* here, which will be dropped inside rdtgroup_kn_unlock().
* rdtgroup_kn_unlock(kn) call. Take one extra reference here,
* which will be dropped by kernfs_put() in rdtgroup_remove().
*/
kernfs_get(kn);
@ -2747,6 +2743,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
out_idfree:
free_rmid(rdtgrp->mon.rmid);
out_destroy:
kernfs_put(rdtgrp->kn);
kernfs_remove(rdtgrp->kn);
out_free_rgrp:
kfree(rdtgrp);
@ -2759,7 +2756,7 @@ static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
{
kernfs_remove(rgrp->kn);
free_rmid(rgrp->mon.rmid);
kfree(rgrp);
rdtgroup_remove(rgrp);
}
/*
@ -2921,11 +2918,6 @@ static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
list_del(&rdtgrp->mon.crdtgrp_list);
/*
* one extra hold on this, will drop when we kfree(rdtgrp)
* in rdtgroup_kn_unlock()
*/
kernfs_get(kn);
kernfs_remove(rdtgrp->kn);
return 0;
@ -2937,11 +2929,6 @@ static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
rdtgrp->flags = RDT_DELETED;
list_del(&rdtgrp->rdtgroup_list);
/*
* one extra hold on this, will drop when we kfree(rdtgrp)
* in rdtgroup_kn_unlock()
*/
kernfs_get(kn);
kernfs_remove(rdtgrp->kn);
return 0;
}

View File

@ -40,29 +40,10 @@ static int pending_userspace_extint(struct kvm_vcpu *v)
* check if there is pending interrupt from
* non-APIC source without intack.
*/
static int kvm_cpu_has_extint(struct kvm_vcpu *v)
{
u8 accept = kvm_apic_accept_pic_intr(v);
if (accept) {
if (irqchip_split(v->kvm))
return pending_userspace_extint(v);
else
return v->kvm->arch.vpic->output;
} else
return 0;
}
/*
* check if there is injectable interrupt:
* when virtual interrupt delivery enabled,
* interrupt from apic will handled by hardware,
* we don't need to check it here.
*/
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
int kvm_cpu_has_extint(struct kvm_vcpu *v)
{
/*
* FIXME: interrupt.injected represents an interrupt that it's
* FIXME: interrupt.injected represents an interrupt whose
* side-effects have already been applied (e.g. bit from IRR
* already moved to ISR). Therefore, it is incorrect to rely
* on interrupt.injected to know if there is a pending
@ -75,6 +56,23 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
if (!lapic_in_kernel(v))
return v->arch.interrupt.injected;
if (!kvm_apic_accept_pic_intr(v))
return 0;
if (irqchip_split(v->kvm))
return pending_userspace_extint(v);
else
return v->kvm->arch.vpic->output;
}
/*
* check if there is injectable interrupt:
* when virtual interrupt delivery enabled,
* interrupt from apic will handled by hardware,
* we don't need to check it here.
*/
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
{
if (kvm_cpu_has_extint(v))
return 1;
@ -90,20 +88,6 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
*/
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
{
/*
* FIXME: interrupt.injected represents an interrupt that it's
* side-effects have already been applied (e.g. bit from IRR
* already moved to ISR). Therefore, it is incorrect to rely
* on interrupt.injected to know if there is a pending
* interrupt in the user-mode LAPIC.
* This leads to nVMX/nSVM not be able to distinguish
* if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
* pending interrupt or should re-inject an injected
* interrupt.
*/
if (!lapic_in_kernel(v))
return v->arch.interrupt.injected;
if (kvm_cpu_has_extint(v))
return 1;
@ -117,16 +101,21 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
*/
static int kvm_cpu_get_extint(struct kvm_vcpu *v)
{
if (kvm_cpu_has_extint(v)) {
if (irqchip_split(v->kvm)) {
int vector = v->arch.pending_external_vector;
v->arch.pending_external_vector = -1;
return vector;
} else
return kvm_pic_read_irq(v->kvm); /* PIC */
} else
if (!kvm_cpu_has_extint(v)) {
WARN_ON(!lapic_in_kernel(v));
return -1;
}
if (!lapic_in_kernel(v))
return v->arch.interrupt.nr;
if (irqchip_split(v->kvm)) {
int vector = v->arch.pending_external_vector;
v->arch.pending_external_vector = -1;
return vector;
} else
return kvm_pic_read_irq(v->kvm); /* PIC */
}
/*
@ -134,13 +123,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
*/
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
{
int vector;
if (!lapic_in_kernel(v))
return v->arch.interrupt.nr;
vector = kvm_cpu_get_extint(v);
int vector = kvm_cpu_get_extint(v);
if (vector != -1)
return vector; /* PIC */

View File

@ -2330,7 +2330,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic = vcpu->arch.apic;
u32 ppr;
if (!kvm_apic_hw_enabled(apic))
if (!kvm_apic_present(vcpu))
return -1;
__apic_update_ppr(apic, &ppr);

View File

@ -3624,21 +3624,23 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
{
/*
* We can accept userspace's request for interrupt injection
* as long as we have a place to store the interrupt number.
* The actual injection will happen when the CPU is able to
* deliver the interrupt.
*/
if (kvm_cpu_has_extint(vcpu))
return false;
/* Acknowledging ExtINT does not happen if LINT0 is masked. */
return (!lapic_in_kernel(vcpu) ||
kvm_apic_accept_pic_intr(vcpu));
}
/*
* if userspace requested an interrupt window, check that the
* interrupt window is open.
*
* No need to exit to userspace if we already have an interrupt queued.
*/
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
{
return kvm_arch_interrupt_allowed(vcpu) &&
!kvm_cpu_has_interrupt(vcpu) &&
!kvm_event_needs_reinjection(vcpu) &&
kvm_cpu_accept_dm_intr(vcpu);
}

View File

@ -93,10 +93,20 @@ void xen_init_lock_cpu(int cpu)
void xen_uninit_lock_cpu(int cpu)
{
int irq;
if (!xen_pvspin)
return;
unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
/*
* When booting the kernel with 'mitigations=auto,nosmt', the secondary
* CPUs are not activated, and lock_kicker_irq is not initialized.
*/
irq = per_cpu(lock_kicker_irq, cpu);
if (irq == -1)
return;
unbind_from_irqhandler(irq, NULL);
per_cpu(lock_kicker_irq, cpu) = -1;
kfree(per_cpu(irq_name, cpu));
per_cpu(irq_name, cpu) = NULL;

View File

@ -300,7 +300,7 @@ strncpy_from_user(char *dst, const char *src, long count)
return -EFAULT;
}
#else
long strncpy_from_user(char *dst, const char *src, long count);
long strncpy_from_user(char *dst, const char __user *src, long count);
#endif
/*

View File

@ -192,6 +192,9 @@ static int sysc_wait_softreset(struct sysc *ddata)
u32 sysc_mask, syss_done, rstval;
int syss_offset, error = 0;
if (ddata->cap->regbits->srst_shift < 0)
return 0;
syss_offset = ddata->offsets[SYSC_SYSSTATUS];
sysc_mask = BIT(ddata->cap->regbits->srst_shift);

View File

@ -2793,7 +2793,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
* If burst size is smaller than bus width then make sure we only
* transfer one at a time to avoid a burst stradling an MFIFO entry.
*/
if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
if (burst * 8 < pl330->pcfg.data_bus_width)
desc->rqcfg.brst_len = 1;
desc->bytes_requested = len;

View File

@ -454,8 +454,8 @@ struct xilinx_dma_device {
#define to_dma_tx_descriptor(tx) \
container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
cond, delay_us, timeout_us)
readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
val, cond, delay_us, timeout_us)
/* IO accessors */
static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)

View File

@ -216,7 +216,7 @@ config EFI_DEV_PATH_PARSER
config EFI_EARLYCON
def_bool y
depends on SERIAL_EARLYCON && !ARM && !IA64
depends on EFI && SERIAL_EARLYCON && !ARM && !IA64
select FONT_SUPPORT
select ARCH_USE_MEMREMAP_PROT

View File

@ -23,19 +23,17 @@
#define CP_2WHEEL_MOUSE_HACK 0x02
#define CP_2WHEEL_MOUSE_HACK_ON 0x04
#define VA_INVAL_LOGICAL_BOUNDARY 0x08
/*
* Some USB barcode readers from cypress have usage min and usage max in
* the wrong order
*/
static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static __u8 *cp_rdesc_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
unsigned int i;
if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
return rdesc;
if (*rsize < 4)
return rdesc;
@ -48,6 +46,40 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
static __u8 *va_logical_boundary_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
/*
* Varmilo VA104M (with VID Cypress and device ID 07B1) incorrectly
* reports Logical Minimum of its Consumer Control device as 572
* (0x02 0x3c). Fix this by setting its Logical Minimum to zero.
*/
if (*rsize == 25 &&
rdesc[0] == 0x05 && rdesc[1] == 0x0c &&
rdesc[2] == 0x09 && rdesc[3] == 0x01 &&
rdesc[6] == 0x19 && rdesc[7] == 0x00 &&
rdesc[11] == 0x16 && rdesc[12] == 0x3c && rdesc[13] == 0x02) {
hid_info(hdev,
"fixing up varmilo VA104M consumer control report descriptor\n");
rdesc[12] = 0x00;
rdesc[13] = 0x00;
}
return rdesc;
}
static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
if (quirks & CP_RDESC_SWAPPED_MIN_MAX)
rdesc = cp_rdesc_fixup(hdev, rdesc, rsize);
if (quirks & VA_INVAL_LOGICAL_BOUNDARY)
rdesc = va_logical_boundary_fixup(hdev, rdesc, rsize);
return rdesc;
}
static int cp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
@ -128,6 +160,8 @@ static const struct hid_device_id cp_devices[] = {
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
.driver_data = CP_2WHEEL_MOUSE_HACK },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1),
.driver_data = VA_INVAL_LOGICAL_BOUNDARY },
{ }
};
MODULE_DEVICE_TABLE(hid, cp_devices);

View File

@ -337,6 +337,8 @@
#define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81
#define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
#define USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1 0X07b1
#define USB_VENDOR_ID_DATA_MODUL 0x7374
#define USB_VENDOR_ID_DATA_MODUL_EASYMAXTOUCH 0x1201
@ -449,6 +451,10 @@
#define USB_VENDOR_ID_FRUCTEL 0x25B6
#define USB_DEVICE_ID_GAMETEL_MT_MODE 0x0002
#define USB_VENDOR_ID_GAMEVICE 0x27F8
#define USB_DEVICE_ID_GAMEVICE_GV186 0x0BBE
#define USB_DEVICE_ID_GAMEVICE_KISHI 0x0BBF
#define USB_VENDOR_ID_GAMERON 0x0810
#define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001
#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
@ -487,6 +493,7 @@
#define USB_DEVICE_ID_PENPOWER 0x00f4
#define USB_VENDOR_ID_GREENASIA 0x0e8f
#define USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR 0x3010
#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013
#define USB_VENDOR_ID_GRETAGMACBETH 0x0971
@ -743,6 +750,7 @@
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
#define USB_DEVICE_ID_LOGITECH_T651 0xb00c
#define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD 0xb309
#define USB_DEVICE_ID_LOGITECH_C007 0xc007
#define USB_DEVICE_ID_LOGITECH_C077 0xc077
#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
@ -1292,6 +1300,7 @@
#define USB_VENDOR_ID_UGTIZER 0x2179
#define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077
#define USB_VENDOR_ID_VIEWSONIC 0x0543
#define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621

View File

@ -319,6 +319,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD),
HID_BATTERY_QUIRK_IGNORE },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
HID_BATTERY_QUIRK_IGNORE },
{}
};

View File

@ -11,6 +11,48 @@
#include "hid-ids.h"
#define QUIRK_TOUCHPAD_ON_OFF_REPORT BIT(0)
static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize)
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
if (quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) {
if (*rsize == 188 && rdesc[162] == 0x81 && rdesc[163] == 0x02) {
hid_info(hdev, "Fixing up ITE keyboard report descriptor\n");
rdesc[163] = HID_MAIN_ITEM_RELATIVE;
}
}
return rdesc;
}
static int ite_input_mapping(struct hid_device *hdev,
struct hid_input *hi, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit,
int *max)
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
if ((quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) &&
(usage->hid & HID_USAGE_PAGE) == 0x00880000) {
if (usage->hid == 0x00880078) {
/* Touchpad on, userspace expects F22 for this */
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F22);
return 1;
}
if (usage->hid == 0x00880079) {
/* Touchpad off, userspace expects F23 for this */
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F23);
return 1;
}
return -1;
}
return 0;
}
static int ite_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
@ -37,13 +79,27 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
return 0;
}
static int ite_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
hid_set_drvdata(hdev, (void *)id->driver_data);
ret = hid_open_report(hdev);
if (ret)
return ret;
return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
}
static const struct hid_device_id ite_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
/* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_SYNAPTICS,
USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012),
.driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
/* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_SYNAPTICS,
@ -55,6 +111,9 @@ MODULE_DEVICE_TABLE(hid, ite_devices);
static struct hid_driver ite_driver = {
.name = "itetech",
.id_table = ite_devices,
.probe = ite_probe,
.report_fixup = ite_report_fixup,
.input_mapping = ite_input_mapping,
.event = ite_event,
};
module_hid_driver(ite_driver);

View File

@ -3789,6 +3789,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */
LDJ_DEVICE(0xb305),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{ /* Dinovo Edge (Bluetooth-receiver in HID proxy mode) */
LDJ_DEVICE(0xb309),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{ /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */
LDJ_DEVICE(0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
@ -3831,6 +3834,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX5000 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{ /* Dinovo Edge keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb309),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{ /* MX5500 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },

View File

@ -83,7 +83,12 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD), HID_QUIRK_MULTI_INPUT },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_GV186),
HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_KISHI),
HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },

View File

@ -483,7 +483,8 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
return 1;
ptr = raw_data;
ptr++; /* Skip report id */
if (report->id)
ptr++; /* Skip report id */
spin_lock_irqsave(&pdata->lock, flags);

View File

@ -385,6 +385,8 @@ static const struct hid_device_id uclogic_devices[] = {
USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER,
USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER,
USB_DEVICE_ID_UGTIZER_TABLET_GT5040) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_TABLET_G5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,

View File

@ -997,6 +997,8 @@ int uclogic_params_init(struct uclogic_params *params,
break;
case VID_PID(USB_VENDOR_ID_UGTIZER,
USB_DEVICE_ID_UGTIZER_TABLET_GP0610):
case VID_PID(USB_VENDOR_ID_UGTIZER,
USB_DEVICE_ID_UGTIZER_TABLET_GT5040):
case VID_PID(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_G540):
case VID_PID(USB_VENDOR_ID_UGEE,

View File

@ -2423,6 +2423,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
@ -4614,11 +4615,11 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
V2_QPC_BYTE_28_AT_M,
V2_QPC_BYTE_28_AT_S);
qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
V2_QPC_BYTE_212_RETRY_CNT_M,
V2_QPC_BYTE_212_RETRY_CNT_S);
V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
V2_QPC_BYTE_212_RETRY_NUM_INIT_S);
qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
V2_QPC_BYTE_244_RNR_CNT_M,
V2_QPC_BYTE_244_RNR_CNT_S);
V2_QPC_BYTE_244_RNR_NUM_INIT_M,
V2_QPC_BYTE_244_RNR_NUM_INIT_S);
done:
qp_attr->cur_qp_state = qp_attr->qp_state;

View File

@ -803,8 +803,10 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
}
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_out_arm;
}
cq_context = mailbox->buf;
@ -846,9 +848,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
}
spin_lock_irq(&dev->cq_table.lock);
if (mthca_array_set(&dev->cq_table.cq,
cq->cqn & (dev->limits.num_cqs - 1),
cq)) {
err = mthca_array_set(&dev->cq_table.cq,
cq->cqn & (dev->limits.num_cqs - 1), cq);
if (err) {
spin_unlock_irq(&dev->cq_table.lock);
goto err_out_free_mr;
}

View File

@ -121,6 +121,7 @@ module_param_named(unmask_kbd_data, i8042_unmask_kbd_data, bool, 0600);
MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive data) of normally sanitize-filtered kbd data traffic debug log [pre-condition: i8042.debug=1 enabled]");
#endif
static bool i8042_present;
static bool i8042_bypass_aux_irq_test;
static char i8042_kbd_firmware_id[128];
static char i8042_aux_firmware_id[128];
@ -341,6 +342,9 @@ int i8042_command(unsigned char *param, int command)
unsigned long flags;
int retval;
if (!i8042_present)
return -1;
spin_lock_irqsave(&i8042_lock, flags);
retval = __i8042_command(param, command);
spin_unlock_irqrestore(&i8042_lock, flags);
@ -1609,12 +1613,15 @@ static int __init i8042_init(void)
err = i8042_platform_init();
if (err)
return err;
return (err == -ENODEV) ? 0 : err;
err = i8042_controller_check();
if (err)
goto err_platform_exit;
/* Set this before creating the dev to allow i8042_command to work right away */
i8042_present = true;
pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
if (IS_ERR(pdev)) {
err = PTR_ERR(pdev);
@ -1633,6 +1640,9 @@ static int __init i8042_init(void)
static void __exit i8042_exit(void)
{
if (!i8042_present)
return;
platform_device_unregister(i8042_platform_device);
platform_driver_unregister(&i8042_driver);
i8042_platform_exit();

View File

@ -136,7 +136,7 @@ static int exiu_domain_translate(struct irq_domain *domain,
if (fwspec->param_count != 2)
return -EINVAL;
*hwirq = fwspec->param[0];
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
}
return 0;
}

View File

@ -990,7 +990,7 @@ static const struct can_bittiming_const m_can_bittiming_const_31X = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 256,
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
.tseg2_min = 2, /* Time segment 2 = phase_seg2 */
.tseg2_max = 128,
.sjw_max = 128,
.brp_min = 1,
@ -1605,7 +1605,7 @@ static int m_can_open(struct net_device *dev)
INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
err = request_threaded_irq(dev->irq, NULL, m_can_isr,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
IRQF_ONESHOT,
dev->name, dev);
} else {
err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,

View File

@ -63,21 +63,27 @@ enum gs_can_identify_mode {
};
/* data types passed between host and device */
struct gs_host_config {
u32 byte_order;
} __packed;
/* All data exchanged between host and device is exchanged in host byte order,
* thanks to the struct gs_host_config byte_order member, which is sent first
* to indicate the desired byte order.
/* The firmware on the original USB2CAN by Geschwister Schneider
* Technologie Entwicklungs- und Vertriebs UG exchanges all data
* between the host and the device in host byte order. This is done
* with the struct gs_host_config::byte_order member, which is sent
* first to indicate the desired byte order.
*
* The widely used open source firmware candleLight doesn't support
* this feature and exchanges the data in little endian byte order.
*/
struct gs_host_config {
__le32 byte_order;
} __packed;
struct gs_device_config {
u8 reserved1;
u8 reserved2;
u8 reserved3;
u8 icount;
u32 sw_version;
u32 hw_version;
__le32 sw_version;
__le32 hw_version;
} __packed;
#define GS_CAN_MODE_NORMAL 0
@ -87,26 +93,26 @@ struct gs_device_config {
#define GS_CAN_MODE_ONE_SHOT BIT(3)
struct gs_device_mode {
u32 mode;
u32 flags;
__le32 mode;
__le32 flags;
} __packed;
struct gs_device_state {
u32 state;
u32 rxerr;
u32 txerr;
__le32 state;
__le32 rxerr;
__le32 txerr;
} __packed;
struct gs_device_bittiming {
u32 prop_seg;
u32 phase_seg1;
u32 phase_seg2;
u32 sjw;
u32 brp;
__le32 prop_seg;
__le32 phase_seg1;
__le32 phase_seg2;
__le32 sjw;
__le32 brp;
} __packed;
struct gs_identify_mode {
u32 mode;
__le32 mode;
} __packed;
#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
@ -117,23 +123,23 @@ struct gs_identify_mode {
#define GS_CAN_FEATURE_IDENTIFY BIT(5)
struct gs_device_bt_const {
u32 feature;
u32 fclk_can;
u32 tseg1_min;
u32 tseg1_max;
u32 tseg2_min;
u32 tseg2_max;
u32 sjw_max;
u32 brp_min;
u32 brp_max;
u32 brp_inc;
__le32 feature;
__le32 fclk_can;
__le32 tseg1_min;
__le32 tseg1_max;
__le32 tseg2_min;
__le32 tseg2_max;
__le32 sjw_max;
__le32 brp_min;
__le32 brp_max;
__le32 brp_inc;
} __packed;
#define GS_CAN_FLAG_OVERFLOW 1
struct gs_host_frame {
u32 echo_id;
u32 can_id;
__le32 can_id;
u8 can_dlc;
u8 channel;
@ -329,13 +335,13 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
if (!skb)
return;
cf->can_id = hf->can_id;
cf->can_id = le32_to_cpu(hf->can_id);
cf->can_dlc = get_can_dlc(hf->can_dlc);
memcpy(cf->data, hf->data, 8);
/* ERROR frames tell us information about the controller */
if (hf->can_id & CAN_ERR_FLAG)
if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
gs_update_state(dev, cf);
netdev->stats.rx_packets++;
@ -418,11 +424,11 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
if (!dbt)
return -ENOMEM;
dbt->prop_seg = bt->prop_seg;
dbt->phase_seg1 = bt->phase_seg1;
dbt->phase_seg2 = bt->phase_seg2;
dbt->sjw = bt->sjw;
dbt->brp = bt->brp;
dbt->prop_seg = cpu_to_le32(bt->prop_seg);
dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
dbt->sjw = cpu_to_le32(bt->sjw);
dbt->brp = cpu_to_le32(bt->brp);
/* request bit timings */
rc = usb_control_msg(interface_to_usbdev(intf),
@ -503,7 +509,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
cf = (struct can_frame *)skb->data;
hf->can_id = cf->can_id;
hf->can_id = cpu_to_le32(cf->can_id);
hf->can_dlc = cf->can_dlc;
memcpy(hf->data, cf->data, cf->can_dlc);
@ -573,6 +579,7 @@ static int gs_can_open(struct net_device *netdev)
int rc, i;
struct gs_device_mode *dm;
u32 ctrlmode;
u32 flags = 0;
rc = open_candev(netdev);
if (rc)
@ -640,24 +647,24 @@ static int gs_can_open(struct net_device *netdev)
/* flags */
ctrlmode = dev->can.ctrlmode;
dm->flags = 0;
if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
dm->flags |= GS_CAN_MODE_LOOP_BACK;
flags |= GS_CAN_MODE_LOOP_BACK;
else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
flags |= GS_CAN_MODE_LISTEN_ONLY;
/* Controller is not allowed to retry TX
* this mode is unavailable on atmels uc3c hardware
*/
if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
dm->flags |= GS_CAN_MODE_ONE_SHOT;
flags |= GS_CAN_MODE_ONE_SHOT;
if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
/* finally start device */
dm->mode = GS_CAN_MODE_START;
dm->mode = cpu_to_le32(GS_CAN_MODE_START);
dm->flags = cpu_to_le32(flags);
rc = usb_control_msg(interface_to_usbdev(dev->iface),
usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
GS_USB_BREQ_MODE,
@ -737,9 +744,9 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
return -ENOMEM;
if (do_identify)
imode->mode = GS_CAN_IDENTIFY_ON;
imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
else
imode->mode = GS_CAN_IDENTIFY_OFF;
imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
rc = usb_control_msg(interface_to_usbdev(dev->iface),
usb_sndctrlpipe(interface_to_usbdev(dev->iface),
@ -790,6 +797,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
struct net_device *netdev;
int rc;
struct gs_device_bt_const *bt_const;
u32 feature;
bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
if (!bt_const)
@ -830,14 +838,14 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* dev settup */
strcpy(dev->bt_const.name, "gs_usb");
dev->bt_const.tseg1_min = bt_const->tseg1_min;
dev->bt_const.tseg1_max = bt_const->tseg1_max;
dev->bt_const.tseg2_min = bt_const->tseg2_min;
dev->bt_const.tseg2_max = bt_const->tseg2_max;
dev->bt_const.sjw_max = bt_const->sjw_max;
dev->bt_const.brp_min = bt_const->brp_min;
dev->bt_const.brp_max = bt_const->brp_max;
dev->bt_const.brp_inc = bt_const->brp_inc;
dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max);
dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max);
dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min);
dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max);
dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc);
dev->udev = interface_to_usbdev(intf);
dev->iface = intf;
@ -854,28 +862,29 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* can settup */
dev->can.state = CAN_STATE_STOPPED;
dev->can.clock.freq = bt_const->fclk_can;
dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can);
dev->can.bittiming_const = &dev->bt_const;
dev->can.do_set_bittiming = gs_usb_set_bittiming;
dev->can.ctrlmode_supported = 0;
if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
feature = le32_to_cpu(bt_const->feature);
if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
if (feature & GS_CAN_FEATURE_LOOP_BACK)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
if (feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
if (feature & GS_CAN_FEATURE_ONE_SHOT)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
SET_NETDEV_DEV(netdev, &intf->dev);
if (dconf->sw_version > 1)
if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY)
if (le32_to_cpu(dconf->sw_version) > 1)
if (feature & GS_CAN_FEATURE_IDENTIFY)
netdev->ethtool_ops = &gs_usb_ethtool_ops;
kfree(bt_const);
@ -910,7 +919,7 @@ static int gs_usb_probe(struct usb_interface *intf,
if (!hconf)
return -ENOMEM;
hconf->byte_order = 0x0000beef;
hconf->byte_order = cpu_to_le32(0x0000beef);
/* send host config */
rc = usb_control_msg(interface_to_usbdev(intf),

View File

@ -2143,6 +2143,8 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
mv88e6xxx_g1_wait_eeprom_done(chip);
}
}

View File

@ -75,6 +75,37 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
{
const unsigned long timeout = jiffies + 1 * HZ;
u16 val;
int err;
/* Wait up to 1 second for the switch to finish reading the
* EEPROM.
*/
while (time_before(jiffies, timeout)) {
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
if (err) {
dev_err(chip->dev, "Error reading status");
return;
}
/* If the switch is still resetting, it may not
* respond on the bus, and so MDIO read returns
* 0xffff. Differentiate between that, and waiting for
* the EEPROM to be done by bit 0 being set.
*/
if (val != 0xffff &&
val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
return;
usleep_range(1000, 2000);
}
dev_err(chip->dev, "Timeout waiting for EEPROM done");
}
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5

View File

@ -277,6 +277,7 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);

View File

@ -2622,16 +2622,9 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
goto err_mmio_read_less;
}
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width));
if (rc) {
dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
goto err_mmio_read_less;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
if (rc) {
dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
rc);
dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc);
goto err_mmio_read_less;
}
@ -3450,6 +3443,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS));
if (rc) {
dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc);
goto err_disable_device;
}
pci_set_master(pdev);
ena_dev = vzalloc(sizeof(*ena_dev));

View File

@ -10826,7 +10826,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
dev_err(&pdev->dev, "System does not support DMA, aborting\n");
goto init_err_disable;
rc = -EIO;
goto init_err_release;
}
pci_set_master(pdev);
@ -11892,6 +11893,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
create_singlethread_workqueue("bnxt_pf_wq");
if (!bnxt_pf_wq) {
dev_err(&pdev->dev, "Unable to create workqueue.\n");
rc = -ENOMEM;
goto init_err_pci_clean;
}
}

View File

@ -630,7 +630,8 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
fwr->smac_sel = f->smt->idx;
if (f->fs.newsmac)
fwr->smac_sel = f->smt->idx;
fwr->rx_chan_rx_rpl_iq =
htons(FW_FILTER_WR_RX_CHAN_V(0) |
FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));

View File

@ -1994,8 +1994,11 @@ static int do_reset(struct ibmvnic_adapter *adapter,
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
if (adapter->reset_reason != VNIC_RESET_FAILOVER)
if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
adapter->reset_reason == VNIC_RESET_MOBILITY) {
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
}
rc = 0;
@ -2065,6 +2068,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
if (rc)
return IBMVNIC_OPEN_FAILED;
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
return 0;
}
@ -2761,6 +2767,9 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
{
int i, rc;
if (!adapter->tx_scrq || !adapter->rx_scrq)
return -EINVAL;
for (i = 0; i < adapter->req_tx_queues; i++) {
netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
@ -4768,6 +4777,9 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
/* Clean out the queue */
if (!crq->msgs)
return -EINVAL;
memset(crq->msgs, 0, PAGE_SIZE);
crq->cur = 0;
crq->active = false;

View File

@ -150,6 +150,7 @@ enum i40e_state_t {
__I40E_CLIENT_RESET,
__I40E_VIRTCHNL_OP_PENDING,
__I40E_RECOVERY_MODE,
__I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};

View File

@ -3988,8 +3988,16 @@ static irqreturn_t i40e_intr(int irq, void *data)
}
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
/* disable any further VFLR event notifications */
if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
} else {
ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
}
}
if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
@ -15345,6 +15353,11 @@ static void i40e_remove(struct pci_dev *pdev)
while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
usleep_range(1000, 2000);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
}
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
@ -15371,11 +15384,6 @@ static void i40e_remove(struct pci_dev *pdev)
*/
i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
}
i40e_fdir_teardown(pf);
/* If there is a switch structure or any orphans, remove them.

View File

@ -1335,7 +1335,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
* Returns true if the VF is reset, false otherwise.
* Returns true if the VF is in reset, resets successfully, or resets
* are disabled and false otherwise.
**/
bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
@ -1345,11 +1346,14 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
u32 reg;
int i;
if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
return true;
/* If the VFs have been disabled, this means something else is
* resetting the VF, so we shouldn't continue.
*/
if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
return false;
return true;
i40e_trigger_vf_reset(vf, flr);
@ -1513,6 +1517,15 @@ void i40e_free_vfs(struct i40e_pf *pf)
i40e_notify_client_of_vf_enable(pf, 0);
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
*/
if (!pci_vfs_assigned(pf->pdev))
pci_disable_sriov(pf->pdev);
else
dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
/* Amortize wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) {
if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
@ -1528,15 +1541,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
}
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
*/
if (!pci_vfs_assigned(pf->pdev))
pci_disable_sriov(pf->pdev);
else
dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
/* free up VF resources */
tmp = pf->num_alloc_vfs;
pf->num_alloc_vfs = 0;

View File

@ -3069,6 +3069,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
goto out_unlock;
}
if (vif->type == NL80211_IFTYPE_STATION)
vif->bss_conf.he_support = sta->he_cap.has_he;
if (sta->tdls &&
(vif->p2p ||
iwl_mvm_tdls_sta_count(mvm, NULL) ==

View File

@ -26,8 +26,8 @@ struct s3fwrn5_i2c_phy {
struct i2c_client *i2c_dev;
struct nci_dev *ndev;
unsigned int gpio_en;
unsigned int gpio_fw_wake;
int gpio_en;
int gpio_fw_wake;
struct mutex mutex;

View File

@ -271,9 +271,21 @@ static void nvme_dbbuf_init(struct nvme_dev *dev,
nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
}
static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
{
if (!nvmeq->qid)
return;
nvmeq->dbbuf_sq_db = NULL;
nvmeq->dbbuf_cq_db = NULL;
nvmeq->dbbuf_sq_ei = NULL;
nvmeq->dbbuf_cq_ei = NULL;
}
static void nvme_dbbuf_set(struct nvme_dev *dev)
{
struct nvme_command c;
unsigned int i;
if (!dev->dbbuf_dbs)
return;
@ -287,6 +299,9 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
/* Free memory and continue on */
nvme_dbbuf_dma_free(dev);
for (i = 1; i <= dev->online_queues; i++)
nvme_dbbuf_free(&dev->queues[i]);
}
}

View File

@ -949,6 +949,7 @@ power_down:
reset:
reset_control_assert(padctl->rst);
remove:
platform_set_drvdata(pdev, NULL);
soc->ops->remove(padctl);
return err;
}

View File

@ -4238,6 +4238,7 @@ static void hotkey_resume(void)
pr_err("error while attempting to reset the event firmware interface\n");
tpacpi_send_radiosw_update();
tpacpi_input_send_tabletsw();
hotkey_tablet_mode_notify_change();
hotkey_wakeup_reason_notify_change();
hotkey_wakeup_hotunplug_complete_notify_change();

View File

@ -1485,7 +1485,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char *buffer;
char *cmd;
int lcd_out, crt_out, tv_out;
int lcd_out = -1, crt_out = -1, tv_out = -1;
int remain = count;
int value;
int ret;
@ -1517,7 +1517,6 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
kfree(cmd);
lcd_out = crt_out = tv_out = -1;
ret = get_video_status(dev, &video_out);
if (!ret) {
unsigned int new_video_out = video_out;

View File

@ -436,10 +436,13 @@ enum qeth_qdio_out_buffer_state {
QETH_QDIO_BUF_EMPTY,
/* Filled by driver; owned by hardware in order to be sent. */
QETH_QDIO_BUF_PRIMED,
/* Identified to be pending in TPQ. */
/* Discovered by the TX completion code: */
QETH_QDIO_BUF_PENDING,
/* Found in completion queue. */
QETH_QDIO_BUF_IN_CQ,
/* Finished by the TX completion code: */
QETH_QDIO_BUF_NEED_QAOB,
/* Received QAOB notification on CQ: */
QETH_QDIO_BUF_QAOB_OK,
QETH_QDIO_BUF_QAOB_ERROR,
/* Handled via transfer pending / completion queue. */
QETH_QDIO_BUF_HANDLED_DELAYED,
};

View File

@ -31,6 +31,7 @@
#include <net/iucv/af_iucv.h>
#include <net/dsfield.h>
#include <net/sock.h>
#include <asm/ebcdic.h>
#include <asm/chpid.h>
@ -425,18 +426,13 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
}
}
if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
QETH_QDIO_BUF_HANDLED_DELAYED)) {
/* for recovery situations */
qeth_init_qdio_out_buf(q, bidx);
QETH_CARD_TEXT(q->card, 2, "clprecov");
}
}
static void qeth_qdio_handle_aob(struct qeth_card *card,
unsigned long phys_aob_addr)
{
enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK;
struct qaob *aob;
struct qeth_qdio_out_buffer *buffer;
enum iucv_tx_notify notification;
@ -448,22 +444,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
buffer = (struct qeth_qdio_out_buffer *) aob->user1;
QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
notification = TX_NOTIFY_OK;
} else {
WARN_ON_ONCE(atomic_read(&buffer->state) !=
QETH_QDIO_BUF_PENDING);
atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
notification = TX_NOTIFY_DELAYED_OK;
}
if (aob->aorc != 0) {
QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
notification = qeth_compute_cq_notification(aob->aorc, 1);
}
qeth_notify_skbs(buffer->q, buffer, notification);
/* Free dangling allocations. The attached skbs are handled by
* qeth_cleanup_handled_pending().
*/
@ -474,7 +454,33 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
kmem_cache_free(qeth_core_header_cache,
(void *) aob->sba[i]);
}
atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
if (aob->aorc) {
QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
new_state = QETH_QDIO_BUF_QAOB_ERROR;
}
switch (atomic_xchg(&buffer->state, new_state)) {
case QETH_QDIO_BUF_PRIMED:
/* Faster than TX completion code. */
notification = qeth_compute_cq_notification(aob->aorc, 0);
qeth_notify_skbs(buffer->q, buffer, notification);
atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
break;
case QETH_QDIO_BUF_PENDING:
/* TX completion code is active and will handle the async
* completion for us.
*/
break;
case QETH_QDIO_BUF_NEED_QAOB:
/* TX completion code is already finished. */
notification = qeth_compute_cq_notification(aob->aorc, 1);
qeth_notify_skbs(buffer->q, buffer, notification);
atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
break;
default:
WARN_ON_ONCE(1);
}
qdio_release_aob(aob);
}
@ -1083,7 +1089,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
skb_queue_walk(&buf->skb_list, skb) {
QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
if (skb->sk && skb->sk->sk_family == PF_IUCV)
iucv_sk(skb->sk)->sk_txnotify(skb, notification);
}
}
@ -1094,9 +1100,6 @@ static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
struct qeth_qdio_out_q *queue = buf->q;
struct sk_buff *skb;
/* release may never happen from within CQ tasklet scope */
WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
@ -5223,9 +5226,32 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
QETH_QDIO_BUF_PENDING) ==
QETH_QDIO_BUF_PRIMED)
QETH_QDIO_BUF_PRIMED) {
qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
/* Handle race with qeth_qdio_handle_aob(): */
switch (atomic_xchg(&buffer->state,
QETH_QDIO_BUF_NEED_QAOB)) {
case QETH_QDIO_BUF_PENDING:
/* No concurrent QAOB notification. */
break;
case QETH_QDIO_BUF_QAOB_OK:
qeth_notify_skbs(queue, buffer,
TX_NOTIFY_DELAYED_OK);
atomic_set(&buffer->state,
QETH_QDIO_BUF_HANDLED_DELAYED);
break;
case QETH_QDIO_BUF_QAOB_ERROR:
qeth_notify_skbs(queue, buffer,
TX_NOTIFY_DELAYED_GENERALERROR);
atomic_set(&buffer->state,
QETH_QDIO_BUF_HANDLED_DELAYED);
break;
default:
WARN_ON_ONCE(1);
}
}
QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
/* prepare the queue slot for re-use: */

View File

@ -533,8 +533,8 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
if (conn->task == task)
conn->task = NULL;
if (conn->ping_task == task)
conn->ping_task = NULL;
if (READ_ONCE(conn->ping_task) == task)
WRITE_ONCE(conn->ping_task, NULL);
/* release get from queueing */
__iscsi_put_task(task);
@ -738,6 +738,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
task->conn->session->age);
}
if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK))
WRITE_ONCE(conn->ping_task, task);
if (!ihost->workq) {
if (iscsi_prep_mgmt_task(conn, task))
goto free_task;
@ -941,8 +944,11 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
struct iscsi_nopout hdr;
struct iscsi_task *task;
if (!rhdr && conn->ping_task)
return -EINVAL;
if (!rhdr) {
if (READ_ONCE(conn->ping_task))
return -EINVAL;
WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK);
}
memset(&hdr, 0, sizeof(struct iscsi_nopout));
hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
@ -957,11 +963,12 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
if (!task) {
if (!rhdr)
WRITE_ONCE(conn->ping_task, NULL);
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
return -EIO;
} else if (!rhdr) {
/* only track our nops */
conn->ping_task = task;
conn->last_ping = jiffies;
}
@ -984,7 +991,7 @@ static int iscsi_nop_out_rsp(struct iscsi_task *task,
struct iscsi_conn *conn = task->conn;
int rc = 0;
if (conn->ping_task != task) {
if (READ_ONCE(conn->ping_task) != task) {
/*
* If this is not in response to one of our
* nops then it must be from userspace.
@ -1923,7 +1930,7 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
*/
static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
{
if (conn->ping_task &&
if (READ_ONCE(conn->ping_task) &&
time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
(conn->ping_timeout * HZ), jiffies))
return 1;
@ -2058,7 +2065,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
* Checking the transport already or nop from a cmd timeout still
* running
*/
if (conn->ping_task) {
if (READ_ONCE(conn->ping_task)) {
task->have_checked_conn = true;
rc = BLK_EH_RESET_TIMER;
goto done;

View File

@ -8160,11 +8160,7 @@ int ufshcd_shutdown(struct ufs_hba *hba)
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
goto out;
if (pm_runtime_suspended(hba->dev)) {
ret = ufshcd_runtime_resume(hba);
if (ret)
goto out;
}
pm_runtime_get_sync(hba->dev);
ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
out:

View File

@ -1213,7 +1213,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
if (!of_match_node(bcm_qspi_of_match, dev->of_node))
return -ENODEV;
master = spi_alloc_master(dev, sizeof(struct bcm_qspi));
master = devm_spi_alloc_master(dev, sizeof(struct bcm_qspi));
if (!master) {
dev_err(dev, "error allocating spi_master\n");
return -ENOMEM;
@ -1252,21 +1252,17 @@ int bcm_qspi_probe(struct platform_device *pdev,
if (res) {
qspi->base[MSPI] = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->base[MSPI])) {
ret = PTR_ERR(qspi->base[MSPI]);
goto qspi_resource_err;
}
if (IS_ERR(qspi->base[MSPI]))
return PTR_ERR(qspi->base[MSPI]);
} else {
goto qspi_resource_err;
return 0;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
if (res) {
qspi->base[BSPI] = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->base[BSPI])) {
ret = PTR_ERR(qspi->base[BSPI]);
goto qspi_resource_err;
}
if (IS_ERR(qspi->base[BSPI]))
return PTR_ERR(qspi->base[BSPI]);
qspi->bspi_mode = true;
} else {
qspi->bspi_mode = false;
@ -1277,18 +1273,14 @@ int bcm_qspi_probe(struct platform_device *pdev,
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
if (res) {
qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->base[CHIP_SELECT])) {
ret = PTR_ERR(qspi->base[CHIP_SELECT]);
goto qspi_resource_err;
}
if (IS_ERR(qspi->base[CHIP_SELECT]))
return PTR_ERR(qspi->base[CHIP_SELECT]);
}
qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
GFP_KERNEL);
if (!qspi->dev_ids) {
ret = -ENOMEM;
goto qspi_resource_err;
}
if (!qspi->dev_ids)
return -ENOMEM;
for (val = 0; val < num_irqs; val++) {
irq = -1;
@ -1357,7 +1349,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi->xfer_mode.addrlen = -1;
qspi->xfer_mode.hp = -1;
ret = devm_spi_register_master(&pdev->dev, master);
ret = spi_register_master(master);
if (ret < 0) {
dev_err(dev, "can't register master\n");
goto qspi_reg_err;
@ -1370,8 +1362,6 @@ qspi_reg_err:
clk_disable_unprepare(qspi->clk);
qspi_probe_err:
kfree(qspi->dev_ids);
qspi_resource_err:
spi_master_put(master);
return ret;
}
/* probe function to be called by SoC specific platform driver probe */
@ -1381,10 +1371,10 @@ int bcm_qspi_remove(struct platform_device *pdev)
{
struct bcm_qspi *qspi = platform_get_drvdata(pdev);
spi_unregister_master(qspi->master);
bcm_qspi_hw_uninit(qspi);
clk_disable_unprepare(qspi->clk);
kfree(qspi->dev_ids);
spi_unregister_master(qspi->master);
return 0;
}

View File

@ -1264,7 +1264,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
struct bcm2835_spi *bs;
int err;
ctlr = spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
ctlr = devm_spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
dma_get_cache_alignment()));
if (!ctlr)
return -ENOMEM;
@ -1284,23 +1284,19 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
bs = spi_controller_get_devdata(ctlr);
bs->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bs->regs)) {
err = PTR_ERR(bs->regs);
goto out_controller_put;
}
if (IS_ERR(bs->regs))
return PTR_ERR(bs->regs);
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
dev_err(&pdev->dev, "could not get clk: %d\n", err);
goto out_controller_put;
return err;
}
bs->irq = platform_get_irq(pdev, 0);
if (bs->irq <= 0) {
err = bs->irq ? bs->irq : -ENODEV;
goto out_controller_put;
}
if (bs->irq <= 0)
return bs->irq ? bs->irq : -ENODEV;
clk_prepare_enable(bs->clk);
@ -1330,8 +1326,6 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
out_clk_disable:
clk_disable_unprepare(bs->clk);
out_controller_put:
spi_controller_put(ctlr);
return err;
}

View File

@ -529,8 +529,9 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
dev_err(&pdev->dev, "could not get clk: %d\n", err);
return PTR_ERR(bs->clk);
return err;
}
bs->irq = platform_get_irq(pdev, 0);

View File

@ -2,6 +2,7 @@
config DMA_RALINK
tristate "RALINK DMA support"
depends on RALINK && !SOC_RT288X
depends on DMADEVICES
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS

View File

@ -483,8 +483,7 @@ EXPORT_SYMBOL(iscsit_queue_rsp);
void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node) &&
!(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
if (!list_empty(&cmd->i_conn_node))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
@ -4082,12 +4081,22 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
spin_lock_bh(&conn->cmd_lock);
list_splice_init(&conn->conn_cmd_list, &tmp_list);
list_for_each_entry(cmd, &tmp_list, i_conn_node) {
list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
struct se_cmd *se_cmd = &cmd->se_cmd;
if (se_cmd->se_tfo != NULL) {
spin_lock_irq(&se_cmd->t_state_lock);
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
if (se_cmd->transport_state & CMD_T_ABORTED) {
/*
* LIO's abort path owns the cleanup for this,
* so put it back on the list and let
* aborted_task handle it.
*/
list_move_tail(&cmd->i_conn_node,
&conn->conn_cmd_list);
} else {
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
}
spin_unlock_irq(&se_cmd->t_state_lock);
}
}

View File

@ -558,10 +558,8 @@ void optee_free_pages_list(void *list, size_t num_entries)
static bool is_normal_memory(pgprot_t p)
{
#if defined(CONFIG_ARM)
u32 attr = pgprot_val(p) & L_PTE_MT_MASK;
return (attr == L_PTE_MT_WRITEALLOC) || (attr == L_PTE_MT_WRITEBACK) ||
(attr == L_PTE_MT_WRITETHROUGH);
return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
#elif defined(CONFIG_ARM64)
return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
#else

View File

@ -482,11 +482,11 @@ static void snoop_urb(struct usb_device *udev,
if (userurb) { /* Async */
if (when == SUBMIT)
dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
"length %u\n",
userurb, ep, t, d, length);
else
dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
"actual_length %u status %d\n",
userurb, ep, t, d, length,
timeout_or_status);
@ -1992,7 +1992,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
if (as) {
int retval;
snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
snoop(&ps->dev->dev, "reap %px\n", as->userurb);
retval = processcompl(as, (void __user * __user *)arg);
free_async(as);
return retval;
@ -2009,7 +2009,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
as = async_getcompleted(ps);
if (as) {
snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
snoop(&ps->dev->dev, "reap %px\n", as->userurb);
retval = processcompl(as, (void __user * __user *)arg);
free_async(as);
} else {
@ -2139,7 +2139,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
if (as) {
int retval;
snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
snoop(&ps->dev->dev, "reap %px\n", as->userurb);
retval = processcompl_compat(as, (void __user * __user *)arg);
free_async(as);
return retval;
@ -2156,7 +2156,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar
as = async_getcompleted(ps);
if (as) {
snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
snoop(&ps->dev->dev, "reap %px\n", as->userurb);
retval = processcompl_compat(as, (void __user * __user *)arg);
free_async(as);
} else {
@ -2621,7 +2621,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
#endif
case USBDEVFS_DISCARDURB:
snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p);
snoop(&dev->dev, "%s: DISCARDURB %px\n", __func__, p);
ret = proc_unlinkurb(ps, p);
break;

View File

@ -348,6 +348,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Guillemot Webcam Hercules Dualpix Exchange*/
{ USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
/* Guillemot Hercules DJ Console audio card (BZ 208357) */
{ USB_DEVICE(0x06f8, 0xb000), .driver_info =
USB_QUIRK_ENDPOINT_BLACKLIST },
/* Midiman M-Audio Keystation 88es */
{ USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
@ -421,6 +425,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1532, 0x0116), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
/* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
{ USB_DEVICE(0x17ef, 0xa012), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
/* BUILDWIN Photo Frame */
{ USB_DEVICE(0x1908, 0x1315), .driver_info =
USB_QUIRK_HONOR_BNUMINTERFACES },
@ -521,6 +529,8 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
* Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST.
*/
static const struct usb_device_id usb_endpoint_blacklist[] = {
{ USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x01 },
{ USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x81 },
{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 },
{ }

View File

@ -1315,7 +1315,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
midi->id = kstrdup(opts->id, GFP_KERNEL);
if (opts->id && !midi->id) {
status = -ENOMEM;
goto setup_fail;
goto midi_free;
}
midi->in_ports = opts->in_ports;
midi->out_ports = opts->out_ports;
@ -1327,7 +1327,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
status = kfifo_alloc(&midi->in_req_fifo, midi->qlen, GFP_KERNEL);
if (status)
goto setup_fail;
goto midi_free;
spin_lock_init(&midi->transmit_lock);
@ -1343,9 +1343,13 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
return &midi->func;
midi_free:
if (midi)
kfree(midi->id);
kfree(midi);
setup_fail:
mutex_unlock(&opts->lock);
kfree(midi);
return ERR_PTR(status);
}

View File

@ -2040,6 +2040,9 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
return 0;
Enomem:
kfree(CHIP);
CHIP = NULL;
return -ENOMEM;
}

View File

@ -320,7 +320,7 @@ static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
return 1;
}
static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
{
struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
@ -340,6 +340,16 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
target_free_tag(se_sess, se_cmd);
}
static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
{
struct vhost_scsi_cmd *cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
struct vhost_scsi *vs = cmd->tvc_vhost;
llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
vhost_work_queue(&vs->dev, &vs->vs_completion_work);
}
static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
{
return 0;
@ -362,28 +372,15 @@ static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
return 0;
}
static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
{
struct vhost_scsi *vs = cmd->tvc_vhost;
llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
vhost_work_queue(&vs->dev, &vs->vs_completion_work);
}
static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
{
struct vhost_scsi_cmd *cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
vhost_scsi_complete_cmd(cmd);
transport_generic_free_cmd(se_cmd, 0);
return 0;
}
static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
{
struct vhost_scsi_cmd *cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
vhost_scsi_complete_cmd(cmd);
transport_generic_free_cmd(se_cmd, 0);
return 0;
}
@ -429,15 +426,6 @@ vhost_scsi_allocate_evt(struct vhost_scsi *vs,
return evt;
}
static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
{
struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
/* TODO locking against target/backend threads? */
transport_generic_free_cmd(se_cmd, 0);
}
static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
{
return target_put_sess_cmd(se_cmd);
@ -556,7 +544,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
vhost_scsi_free_cmd(cmd);
vhost_scsi_release_cmd_res(se_cmd);
}
vq = -1;
@ -1088,7 +1076,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
&prot_iter, exp_data_len,
&data_iter))) {
vq_err(vq, "Failed to map iov to sgl\n");
vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
goto err;
}
}

View File

@ -703,7 +703,12 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto err1;
}
fb_virt = ioremap(par->mem->start, screen_fb_size);
/*
* Map the VRAM cacheable for performance. This is also required for
* VM Connect to display properly for ARM64 Linux VM, as the host also
* maps the VRAM cacheable.
*/
fb_virt = ioremap_cache(par->mem->start, screen_fb_size);
if (!fb_virt)
goto err2;

View File

@ -488,13 +488,13 @@ next2:
break;
}
out:
btrfs_free_path(path);
fs_info->qgroup_flags |= flags;
if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
ret >= 0)
ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
btrfs_free_path(path);
if (ret < 0) {
ulist_free(fs_info->qgroup_ulist);

View File

@ -913,6 +913,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
"invalid root item size, have %u expect %zu or %u",
btrfs_item_size_nr(leaf, slot), sizeof(ri),
btrfs_legacy_root_item_size());
return -EUCLEAN;
}
/*
@ -1268,6 +1269,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
"invalid item size, have %u expect aligned to %zu for key type %u",
btrfs_item_size_nr(leaf, slot),
sizeof(*dref), key->type);
return -EUCLEAN;
}
if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
generic_err(leaf, slot,
@ -1296,6 +1298,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
extent_err(leaf, slot,
"invalid extent data backref offset, have %llu expect aligned to %u",
offset, leaf->fs_info->sectorsize);
return -EUCLEAN;
}
}
return 0;

View File

@ -1122,7 +1122,13 @@ static noinline struct btrfs_device *device_list_add(const char *path,
if (device->bdev != path_bdev) {
bdput(path_bdev);
mutex_unlock(&fs_devices->device_list_mutex);
btrfs_warn_in_rcu(device->fs_info,
/*
* device->fs_info may not be reliable here, so
* pass in a NULL instead. This avoids a
* possible use-after-free when the fs_info and
* fs_info->sb are already torn down.
*/
btrfs_warn_in_rcu(NULL,
"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
path, devid, found_transid,
current->comm,

View File

@ -1198,6 +1198,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
} else if (mode_from_special_sid) {
rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, true);
kfree(pntsd);
} else {
/* get approximated mode from ACL */
rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, false);

View File

@ -259,7 +259,7 @@ smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
}
static struct mid_q_entry *
smb2_find_mid(struct TCP_Server_Info *server, char *buf)
__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
{
struct mid_q_entry *mid;
struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
@ -276,6 +276,10 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
(mid->mid_state == MID_REQUEST_SUBMITTED) &&
(mid->command == shdr->Command)) {
kref_get(&mid->refcount);
if (dequeue) {
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
}
spin_unlock(&GlobalMid_Lock);
return mid;
}
@ -284,6 +288,18 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
return NULL;
}
static struct mid_q_entry *
smb2_find_mid(struct TCP_Server_Info *server, char *buf)
{
return __smb2_find_mid(server, buf, false);
}
static struct mid_q_entry *
smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
{
return __smb2_find_mid(server, buf, true);
}
static void
smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
{
@ -3979,7 +3995,8 @@ init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
static int
handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
char *buf, unsigned int buf_len, struct page **pages,
unsigned int npages, unsigned int page_data_size)
unsigned int npages, unsigned int page_data_size,
bool is_offloaded)
{
unsigned int data_offset;
unsigned int data_len;
@ -4001,7 +4018,8 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
if (server->ops->is_session_expired &&
server->ops->is_session_expired(buf)) {
cifs_reconnect(server);
if (!is_offloaded)
cifs_reconnect(server);
wake_up(&server->response_q);
return -1;
}
@ -4026,7 +4044,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
cifs_dbg(FYI, "%s: server returned error %d\n",
__func__, rdata->result);
/* normal error on read response */
dequeue_mid(mid, false);
if (is_offloaded)
mid->mid_state = MID_RESPONSE_RECEIVED;
else
dequeue_mid(mid, false);
return 0;
}
@ -4050,7 +4071,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
__func__, data_offset);
rdata->result = -EIO;
dequeue_mid(mid, rdata->result);
if (is_offloaded)
mid->mid_state = MID_RESPONSE_MALFORMED;
else
dequeue_mid(mid, rdata->result);
return 0;
}
@ -4066,21 +4090,30 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
__func__, data_offset);
rdata->result = -EIO;
dequeue_mid(mid, rdata->result);
if (is_offloaded)
mid->mid_state = MID_RESPONSE_MALFORMED;
else
dequeue_mid(mid, rdata->result);
return 0;
}
if (data_len > page_data_size - pad_len) {
/* data_len is corrupt -- discard frame */
rdata->result = -EIO;
dequeue_mid(mid, rdata->result);
if (is_offloaded)
mid->mid_state = MID_RESPONSE_MALFORMED;
else
dequeue_mid(mid, rdata->result);
return 0;
}
rdata->result = init_read_bvec(pages, npages, page_data_size,
cur_off, &bvec);
if (rdata->result != 0) {
dequeue_mid(mid, rdata->result);
if (is_offloaded)
mid->mid_state = MID_RESPONSE_MALFORMED;
else
dequeue_mid(mid, rdata->result);
return 0;
}
@ -4095,7 +4128,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
/* read response payload cannot be in both buf and pages */
WARN_ONCE(1, "buf can not contain only a part of read data");
rdata->result = -EIO;
dequeue_mid(mid, rdata->result);
if (is_offloaded)
mid->mid_state = MID_RESPONSE_MALFORMED;
else
dequeue_mid(mid, rdata->result);
return 0;
}
@ -4106,7 +4142,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
if (length < 0)
return length;
dequeue_mid(mid, false);
if (is_offloaded)
mid->mid_state = MID_RESPONSE_RECEIVED;
else
dequeue_mid(mid, false);
return length;
}
@ -4135,15 +4174,34 @@ static void smb2_decrypt_offload(struct work_struct *work)
}
dw->server->lstrp = jiffies;
mid = smb2_find_mid(dw->server, dw->buf);
mid = smb2_find_dequeue_mid(dw->server, dw->buf);
if (mid == NULL)
cifs_dbg(FYI, "mid not found\n");
else {
mid->decrypted = true;
rc = handle_read_data(dw->server, mid, dw->buf,
dw->server->vals->read_rsp_size,
dw->ppages, dw->npages, dw->len);
mid->callback(mid);
dw->ppages, dw->npages, dw->len,
true);
if (rc >= 0) {
#ifdef CONFIG_CIFS_STATS2
mid->when_received = jiffies;
#endif
mid->callback(mid);
} else {
spin_lock(&GlobalMid_Lock);
if (dw->server->tcpStatus == CifsNeedReconnect) {
mid->mid_state = MID_RETRY_NEEDED;
spin_unlock(&GlobalMid_Lock);
mid->callback(mid);
} else {
mid->mid_state = MID_REQUEST_SUBMITTED;
mid->mid_flags &= ~(MID_DELETED);
list_add_tail(&mid->qhead,
&dw->server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
}
}
cifs_mid_q_entry_release(mid);
}
@ -4246,7 +4304,7 @@ non_offloaded_decrypt:
(*mid)->decrypted = true;
rc = handle_read_data(server, *mid, buf,
server->vals->read_rsp_size,
pages, npages, len);
pages, npages, len, false);
}
free_pages:
@ -4391,7 +4449,7 @@ smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
return handle_read_data(server, mid, buf, server->pdu_size,
NULL, 0, 0);
NULL, 0, 0, false);
}
static int

View File

@ -7,6 +7,7 @@
#include <linux/efi.h>
#include <linux/fs.h>
#include <linux/ctype.h>
#include <linux/kmemleak.h>
#include <linux/slab.h>
#include <linux/uuid.h>
@ -103,6 +104,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
var->var.VariableName[i] = '\0';
inode->i_private = var;
kmemleak_ignore(var);
err = efivar_entry_add(var, &efivarfs_list);
if (err)

View File

@ -21,7 +21,6 @@ LIST_HEAD(efivarfs_list);
static void efivarfs_evict_inode(struct inode *inode)
{
clear_inode(inode);
kfree(inode->i_private);
}
static const struct super_operations efivarfs_ops = {

View File

@ -16,6 +16,13 @@ static const char *proc_self_get_link(struct dentry *dentry,
pid_t tgid = task_tgid_nr_ns(current, ns);
char *name;
/*
* Not currently supported. Once we can inherit all of struct pid,
* we can allow this.
*/
if (current->flags & PF_KTHREAD)
return ERR_PTR(-EOPNOTSUPP);
if (!tgid)
return ERR_PTR(-ENOENT);
/* max length of unsigned int in decimal + NULL term */

View File

@ -1159,6 +1159,19 @@ static inline bool arch_has_pfn_modify_check(void)
#endif /* !__ASSEMBLY__ */
#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
#ifdef CONFIG_PHYS_ADDR_T_64BIT
/*
* ZSMALLOC needs to know the highest PFN on 32-bit architectures
* with physical address space extension, but falls back to
* BITS_PER_LONG otherwise.
*/
#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
#else
#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
#endif
#ifndef has_transparent_hugepage
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define has_transparent_hugepage() 1

View File

@ -316,7 +316,7 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
list_del(&skb->list);
skb_list_del_init(skb);
if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
list_add_tail(&skb->list, &sublist);
}

View File

@ -132,6 +132,9 @@ struct iscsi_task {
void *dd_data; /* driver/transport data */
};
/* invalid scsi_task pointer */
#define INVALID_SCSI_TASK (struct iscsi_task *)-1l
static inline int iscsi_task_has_unsol_data(struct iscsi_task *task)
{
return task->unsol_r2t.data_length > task->unsol_r2t.sent;

View File

@ -192,7 +192,7 @@ TRACE_EVENT(inode_foreign_history,
),
TP_fast_assign(
strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
__entry->history = history;
@ -221,7 +221,7 @@ TRACE_EVENT(inode_switch_wbs,
),
TP_fast_assign(
strncpy(__entry->name, bdi_dev_name(old_wb->bdi), 32);
strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
__entry->ino = inode->i_ino;
__entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
__entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
@ -254,7 +254,7 @@ TRACE_EVENT(track_foreign_dirty,
struct address_space *mapping = page_mapping(page);
struct inode *inode = mapping ? mapping->host : NULL;
strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->bdi_id = wb->bdi->id;
__entry->ino = inode ? inode->i_ino : 0;
__entry->memcg_id = wb->memcg_css->id;
@ -287,7 +287,7 @@ TRACE_EVENT(flush_foreign,
),
TP_fast_assign(
strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
__entry->frn_bdi_id = frn_bdi_id;
__entry->frn_memcg_id = frn_memcg_id;

View File

@ -74,7 +74,11 @@
#include <linux/socket.h> /* for "struct sockaddr" et al */
#include <linux/if.h> /* for IFNAMSIZ and co... */
#include <stddef.h> /* for offsetof */
#ifdef __KERNEL__
# include <linux/stddef.h> /* for offsetof */
#else
# include <stddef.h> /* for offsetof */
#endif
/***************************** VERSION *****************************/
/*

View File

@ -18,6 +18,8 @@
*/
#define SKL_CONTROL_TYPE_BYTE_TLV 0x100
#define SKL_CONTROL_TYPE_MIC_SELECT 0x102
#define SKL_CONTROL_TYPE_MULTI_IO_SELECT 0x103
#define SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC 0x104
#define HDA_SST_CFG_MAX 900 /* size of copier cfg*/
#define MAX_IN_QUEUE 8

View File

@ -180,6 +180,7 @@ static const struct file_operations batadv_log_fops = {
.read = batadv_log_read,
.poll = batadv_log_poll,
.llseek = no_llseek,
.owner = THIS_MODULE,
};
/**

View File

@ -706,7 +706,7 @@ int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
cfg->fc_gw4 = *((__be32 *)via->rtvia_addr);
break;
case AF_INET6:
#ifdef CONFIG_IPV6
#if IS_ENABLED(CONFIG_IPV6)
if (alen != sizeof(struct in6_addr)) {
NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_VIA");
return -EINVAL;

View File

@ -17,7 +17,6 @@
#include "skl.h"
#define BXT_BASEFW_TIMEOUT 3000
#define BXT_INIT_TIMEOUT 300
#define BXT_ROM_INIT_TIMEOUT 70
#define BXT_IPC_PURGE_FW 0x01004000
@ -38,8 +37,6 @@
/* Delay before scheduling D0i3 entry */
#define BXT_D0I3_DELAY 5000
#define BXT_FW_ROM_INIT_RETRY 3
static unsigned int bxt_get_errorcode(struct sst_dsp *ctx)
{
return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE);

View File

@ -57,18 +57,34 @@ static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize)
ctx->dsp_ops.stream_tag = stream_tag;
memcpy(ctx->dmab.area, fwdata, fwsize);
ret = skl_dsp_core_power_up(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
dev_err(ctx->dev, "dsp core0 power up failed\n");
ret = -EIO;
goto base_fw_load_failed;
}
/* purge FW request */
sst_dsp_shim_write(ctx, CNL_ADSP_REG_HIPCIDR,
CNL_ADSP_REG_HIPCIDR_BUSY | (CNL_IPC_PURGE |
((stream_tag - 1) << CNL_ROM_CTRL_DMA_ID)));
ret = cnl_dsp_enable_core(ctx, SKL_DSP_CORE0_MASK);
ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
dev_err(ctx->dev, "dsp boot core failed ret: %d\n", ret);
dev_err(ctx->dev, "Start dsp core failed ret: %d\n", ret);
ret = -EIO;
goto base_fw_load_failed;
}
ret = sst_dsp_register_poll(ctx, CNL_ADSP_REG_HIPCIDA,
CNL_ADSP_REG_HIPCIDA_DONE,
CNL_ADSP_REG_HIPCIDA_DONE,
BXT_INIT_TIMEOUT, "HIPCIDA Done");
if (ret < 0) {
dev_err(ctx->dev, "timeout for purge request: %d\n", ret);
goto base_fw_load_failed;
}
/* enable interrupt */
cnl_ipc_int_enable(ctx);
cnl_ipc_op_int_enable(ctx);
@ -109,7 +125,7 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx)
{
struct firmware stripped_fw;
struct skl_dev *cnl = ctx->thread_context;
int ret;
int ret, i;
if (!ctx->fw) {
ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
@ -131,12 +147,16 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx)
stripped_fw.size = ctx->fw->size;
skl_dsp_strip_extended_manifest(&stripped_fw);
ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
if (ret < 0) {
dev_err(ctx->dev, "prepare firmware failed: %d\n", ret);
goto cnl_load_base_firmware_failed;
for (i = 0; i < BXT_FW_ROM_INIT_RETRY; i++) {
ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
if (!ret)
break;
dev_dbg(ctx->dev, "prepare firmware failed: %d\n", ret);
}
if (ret < 0)
goto cnl_load_base_firmware_failed;
ret = sst_transfer_fw_host_dma(ctx);
if (ret < 0) {
dev_err(ctx->dev, "transfer firmware failed: %d\n", ret);
@ -158,6 +178,7 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx)
return 0;
cnl_load_base_firmware_failed:
dev_err(ctx->dev, "firmware load failed: %d\n", ret);
release_firmware(ctx->fw);
ctx->fw = NULL;

View File

@ -182,7 +182,8 @@ void skl_nhlt_remove_sysfs(struct skl_dev *skl)
{
struct device *dev = &skl->pci->dev;
sysfs_remove_file(&dev->kobj, &dev_attr_platform_id.attr);
if (skl->nhlt)
sysfs_remove_file(&dev->kobj, &dev_attr_platform_id.attr);
}
/*

View File

@ -67,6 +67,8 @@ struct skl_dev;
#define SKL_FW_INIT 0x1
#define SKL_FW_RFW_START 0xf
#define BXT_FW_ROM_INIT_RETRY 3
#define BXT_INIT_TIMEOUT 300
#define SKL_ADSPIC_IPC 1
#define SKL_ADSPIS_IPC 1

View File

@ -579,6 +579,38 @@ static int skl_tplg_unload_pipe_modules(struct skl_dev *skl,
return ret;
}
static bool skl_tplg_is_multi_fmt(struct skl_dev *skl, struct skl_pipe *pipe)
{
struct skl_pipe_fmt *cur_fmt;
struct skl_pipe_fmt *next_fmt;
int i;
if (pipe->nr_cfgs <= 1)
return false;
if (pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
return true;
for (i = 0; i < pipe->nr_cfgs - 1; i++) {
if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) {
cur_fmt = &pipe->configs[i].out_fmt;
next_fmt = &pipe->configs[i + 1].out_fmt;
} else {
cur_fmt = &pipe->configs[i].in_fmt;
next_fmt = &pipe->configs[i + 1].in_fmt;
}
if (!CHECK_HW_PARAMS(cur_fmt->channels, cur_fmt->freq,
cur_fmt->bps,
next_fmt->channels,
next_fmt->freq,
next_fmt->bps))
return true;
}
return false;
}
/*
* Here, we select pipe format based on the pipe type and pipe
* direction to determine the current config index for the pipeline.
@ -601,6 +633,14 @@ skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig)
return 0;
}
if (skl_tplg_is_multi_fmt(skl, pipe)) {
pipe->cur_config_idx = pipe->pipe_config_idx;
pipe->memory_pages = pconfig->mem_pages;
dev_dbg(skl->dev, "found pipe config idx:%d\n",
pipe->cur_config_idx);
return 0;
}
if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) {
dev_dbg(skl->dev, "No conn_type detected, take 0th config\n");
pipe->cur_config_idx = 0;
@ -1315,6 +1355,68 @@ static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
return 0;
}
static int skl_tplg_multi_config_set_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol,
bool is_set)
{
struct snd_soc_component *component =
snd_soc_kcontrol_component(kcontrol);
struct hdac_bus *bus = snd_soc_component_get_drvdata(component);
struct skl_dev *skl = bus_to_skl(bus);
struct skl_pipeline *ppl;
struct skl_pipe *pipe = NULL;
struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
u32 *pipe_id;
if (!ec)
return -EINVAL;
if (is_set && ucontrol->value.enumerated.item[0] > ec->items)
return -EINVAL;
pipe_id = ec->dobj.private;
list_for_each_entry(ppl, &skl->ppl_list, node) {
if (ppl->pipe->ppl_id == *pipe_id) {
pipe = ppl->pipe;
break;
}
}
if (!pipe)
return -EIO;
if (is_set)
pipe->pipe_config_idx = ucontrol->value.enumerated.item[0];
else
ucontrol->value.enumerated.item[0] = pipe->pipe_config_idx;
return 0;
}
static int skl_tplg_multi_config_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
}
static int skl_tplg_multi_config_set(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
}
static int skl_tplg_multi_config_get_dmic(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
}
static int skl_tplg_multi_config_set_dmic(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
}
static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
unsigned int __user *data, unsigned int size)
{
@ -1854,6 +1956,16 @@ static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
.get = skl_tplg_mic_control_get,
.put = skl_tplg_mic_control_set,
},
{
.id = SKL_CONTROL_TYPE_MULTI_IO_SELECT,
.get = skl_tplg_multi_config_get,
.put = skl_tplg_multi_config_set,
},
{
.id = SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC,
.get = skl_tplg_multi_config_get_dmic,
.put = skl_tplg_multi_config_set_dmic,
}
};
static int skl_tplg_fill_pipe_cfg(struct device *dev,
@ -3014,12 +3126,21 @@ static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
case SND_SOC_TPLG_CTL_ENUM:
tplg_ec = container_of(hdr,
struct snd_soc_tplg_enum_control, hdr);
if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) {
if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READ) {
se = (struct soc_enum *)kctl->private_value;
if (tplg_ec->priv.size)
return skl_init_enum_data(bus->dev, se,
tplg_ec);
skl_init_enum_data(bus->dev, se, tplg_ec);
}
/*
* now that the control initializations are done, remove
* write permission for the DMIC configuration enums to
* avoid conflicts between NHLT settings and user interaction
*/
if (hdr->ops.get == SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC)
kctl->access = SNDRV_CTL_ELEM_ACCESS_READ;
break;
default:
@ -3489,6 +3610,37 @@ static int skl_manifest_load(struct snd_soc_component *cmpnt, int index,
return 0;
}
static void skl_tplg_complete(struct snd_soc_component *component)
{
struct snd_soc_dobj *dobj;
struct snd_soc_acpi_mach *mach =
dev_get_platdata(component->card->dev);
int i;
list_for_each_entry(dobj, &component->dobj_list, list) {
struct snd_kcontrol *kcontrol = dobj->control.kcontrol;
struct soc_enum *se =
(struct soc_enum *)kcontrol->private_value;
char **texts = dobj->control.dtexts;
char chan_text[4];
if (dobj->type != SND_SOC_DOBJ_ENUM ||
dobj->control.kcontrol->put !=
skl_tplg_multi_config_set_dmic)
continue;
sprintf(chan_text, "c%d", mach->mach_params.dmic_num);
for (i = 0; i < se->items; i++) {
struct snd_ctl_elem_value val;
if (strstr(texts[i], chan_text)) {
val.value.enumerated.item[0] = i;
kcontrol->put(kcontrol, &val);
}
}
}
}
static struct snd_soc_tplg_ops skl_tplg_ops = {
.widget_load = skl_tplg_widget_load,
.control_load = skl_tplg_control_load,
@ -3498,6 +3650,7 @@ static struct snd_soc_tplg_ops skl_tplg_ops = {
.io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
.manifest = skl_manifest_load,
.dai_load = skl_dai_load,
.complete = skl_tplg_complete,
};
/*

View File

@ -306,6 +306,7 @@ struct skl_pipe {
struct skl_path_config configs[SKL_MAX_PATH_CONFIGS];
struct list_head w_list;
bool passthru;
u32 pipe_config_idx;
};
enum skl_module_state {

Some files were not shown because too many files have changed in this diff Show More