1
0
Fork 0

Linux 4.1.22

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJXFh3gAAoJEN6mb/eXdyzctEAP/0gu5Hy3gdpTCSE55iHc6kN/
 b8c9THQRQzvoGexksxYwVmt/YNM4j7by5ndSGv+JJ08Qiyj96Czmame1KF/9uTcv
 IDrBy10hTKcDRHjk4Oi1tIbPnQifEnDXokKQaOwNZnQ6+bPjtv4FIX7ycEi/RIDU
 eXxszRclkT6yECcYZekVj5aA0zvw02e0SXMvaFg+zgj0EG6eeOJxzgZ1BRiJC6Qe
 Y5LcWqliuiNmwOstAOG6qxiua/JupKwHA6XKhHUhs8kch/Zd4FaMX+MqsKqpjsuz
 /HjgPpPALq5waMmeVjR/5l8K4YlPpphn4CPs+Rs4lJ4TvUBQuUPZ870sGqNqqX/f
 uLyZlxZC79IhwCn5jipIqfxgvibs3I4H/dFkuf5LE8THzN3rZ/ao2d95dbqqOs3E
 i4hc0iYOcfc/Os4U46AoZmth7xKS4wiaDJyQD13a09N1SqbWz7NJXzUdiil5ioFq
 oGOVvm0XNJUqcuNPo67SiLG/pQJjhOpsEBF6rXk9s6jGGLEMf2v9MXo5e94cEpjH
 5TqTYMd2M4TA26U7615RDoQ8fd+U7JvwC8+RSKuQAJmGOO55ZIcYRveX8MO4b6q7
 8Jq8JTmiv477JLoSDABtRl8X4M1hNtj7UqzGi8d9rt8yvnAuUmX+v7HqUy/jeCvs
 Sk7tQIWZrMoD6iId14Kx
 =QvQh
 -----END PGP SIGNATURE-----

Merge tag 'v4.1.22' into 4.1-1.0.x-imx

Linux 4.1.22

* tag 'v4.1.22': (253 commits)
  Linux 4.1.22
  ALSA: hda - Fix regression of monitor_present flag in eld proc file
  crypto: atmel - fix checks of error code returned by devm_ioremap_resource()
  arm64: errata: Add -mpc-relative-literal-loads to build flags
  mm/page_alloc: prevent merging between isolated and other pageblocks
  mm: use 'unsigned int' for page order
  mm: page_alloc: pass PFN to __free_pages_bootmem
  ocfs2/dlm: fix BUG in dlm_move_lockres_to_recovery_list
  ocfs2/dlm: fix race between convert and recovery
  Input: ati_remote2 - fix crashes on detecting device with invalid descriptor
  ideapad-laptop: Add ideapad Y700 (15) to the no_hw_rfkill DMI list
  staging: comedi: ni_mio_common: fix the ni_write[blw]() functions
  rapidio/rionet: fix deadlock on SMP
  fs/coredump: prevent fsuid=0 dumps into user-controlled directories
  coredump: Use 64bit time for unix time of coredump
  tracing: Fix trace_printk() to print when not using bprintk()
  KVM: fix spin_lock_init order on x86
  KVM: VMX: avoid guest hang on invalid invept instruction
  target: Fix target_release_cmd_kref shutdown comp leak
  bitops: Do not default to __clear_bit() for __clear_bit_unlock()
  ...
wifi-calibration
Otavio Salvador 2016-04-20 11:02:35 -03:00
commit eb0ed03687
265 changed files with 3187 additions and 1540 deletions

View File

@ -23,6 +23,7 @@ Optional properties:
during suspend.
- ti,no-reset-on-init: When present, the module should not be reset at init
- ti,no-idle-on-init: When present, the module should not be idled at init
- ti,no-idle: When present, the module is never allowed to idle.
Example:

View File

@ -14,3 +14,10 @@ filesystem.
efivarfs is typically mounted like this,
mount -t efivarfs none /sys/firmware/efi/efivars
Due to the presence of numerous firmware bugs where removing non-standard
UEFI variables causes the system firmware to fail to POST, efivarfs
files that are not well-known standardized variables are created
as immutable files. This doesn't prevent removal - "chattr -i" will work -
but it does prevent this kind of failure from being accomplished
accidentally.

View File

@ -352,7 +352,8 @@ In the first case there are two additional complications:
- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
the kernel may now execute it. We handle this by also setting spte.nx.
If we get a user fetch or read fault, we'll change spte.u=1 and
spte.nx=gpte.nx back.
spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when
shadow paging is in use.
- if CR4.SMAP is disabled: since the page has been changed to a kernel
page, it can not be reused when CR4.SMAP is enabled. We set
CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,

View File

@ -9533,9 +9533,11 @@ S: Maintained
F: drivers/net/ethernet/dlink/sundance.c
SUPERH
M: Yoshinori Sato <ysato@users.sourceforge.jp>
M: Rich Felker <dalias@libc.org>
L: linux-sh@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-sh/list/
S: Orphan
S: Maintained
F: Documentation/sh/
F: arch/sh/
F: drivers/sh/

View File

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 1
SUBLEVEL = 20
SUBLEVEL = 22
EXTRAVERSION =
NAME = Series 4800

View File

@ -508,7 +508,7 @@
};
sata@a0000 {
compatible = "marvell,orion-sata";
compatible = "marvell,armada-370-sata";
reg = <0xa0000 0x5000>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gateclk 14>, <&gateclk 20>;

View File

@ -1411,6 +1411,16 @@
0x48485200 0x2E00>;
#address-cells = <1>;
#size-cells = <1>;
/*
* Do not allow gating of cpsw clock as workaround
* for errata i877. Keeping internal clock disabled
* causes the device switching characteristics
* to degrade over time and eventually fail to meet
* the data manual delay time/skew specs.
*/
ti,no-idle;
/*
* rx_thresh_pend
* rx_pend

View File

@ -37,7 +37,7 @@ struct psci_operations {
extern struct psci_operations psci_ops;
extern struct smp_operations psci_smp_ops;
#ifdef CONFIG_ARM_PSCI
#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI)
int psci_init(void);
bool psci_smp_available(void);
#else

View File

@ -876,6 +876,36 @@ static int _init_opt_clks(struct omap_hwmod *oh)
return ret;
}
static void _enable_optional_clocks(struct omap_hwmod *oh)
{
struct omap_hwmod_opt_clk *oc;
int i;
pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
if (oc->_clk) {
pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
__clk_get_name(oc->_clk));
clk_enable(oc->_clk);
}
}
static void _disable_optional_clocks(struct omap_hwmod *oh)
{
struct omap_hwmod_opt_clk *oc;
int i;
pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
if (oc->_clk) {
pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
__clk_get_name(oc->_clk));
clk_disable(oc->_clk);
}
}
/**
* _enable_clocks - enable hwmod main clock and interface clocks
* @oh: struct omap_hwmod *
@ -903,6 +933,9 @@ static int _enable_clocks(struct omap_hwmod *oh)
clk_enable(os->_clk);
}
if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
_enable_optional_clocks(oh);
/* The opt clocks are controlled by the device driver. */
return 0;
@ -934,41 +967,14 @@ static int _disable_clocks(struct omap_hwmod *oh)
clk_disable(os->_clk);
}
if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
_disable_optional_clocks(oh);
/* The opt clocks are controlled by the device driver. */
return 0;
}
static void _enable_optional_clocks(struct omap_hwmod *oh)
{
struct omap_hwmod_opt_clk *oc;
int i;
pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
if (oc->_clk) {
pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
__clk_get_name(oc->_clk));
clk_enable(oc->_clk);
}
}
static void _disable_optional_clocks(struct omap_hwmod *oh)
{
struct omap_hwmod_opt_clk *oc;
int i;
pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
if (oc->_clk) {
pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
__clk_get_name(oc->_clk));
clk_disable(oc->_clk);
}
}
/**
* _omap4_enable_module - enable CLKCTRL modulemode on OMAP4
* @oh: struct omap_hwmod *
@ -2180,6 +2186,11 @@ static int _enable(struct omap_hwmod *oh)
*/
static int _idle(struct omap_hwmod *oh)
{
if (oh->flags & HWMOD_NO_IDLE) {
oh->_int_flags |= _HWMOD_SKIP_ENABLE;
return 0;
}
pr_debug("omap_hwmod: %s: idling\n", oh->name);
if (oh->_state != _HWMOD_STATE_ENABLED) {
@ -2484,6 +2495,8 @@ static int __init _init(struct omap_hwmod *oh, void *data)
oh->flags |= HWMOD_INIT_NO_RESET;
if (of_find_property(np, "ti,no-idle-on-init", NULL))
oh->flags |= HWMOD_INIT_NO_IDLE;
if (of_find_property(np, "ti,no-idle", NULL))
oh->flags |= HWMOD_NO_IDLE;
}
oh->_state = _HWMOD_STATE_INITIALIZED;
@ -2610,7 +2623,7 @@ static void __init _setup_postsetup(struct omap_hwmod *oh)
* XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data -
* it should be set by the core code as a runtime flag during startup
*/
if ((oh->flags & HWMOD_INIT_NO_IDLE) &&
if ((oh->flags & (HWMOD_INIT_NO_IDLE | HWMOD_NO_IDLE)) &&
(postsetup_state == _HWMOD_STATE_IDLE)) {
oh->_int_flags |= _HWMOD_SKIP_ENABLE;
postsetup_state = _HWMOD_STATE_ENABLED;

View File

@ -517,6 +517,10 @@ struct omap_hwmod_omap4_prcm {
* HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up
* events by calling _reconfigure_io_chain() when a device is enabled
* or idled.
* HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to
* operate and they need to be handled at the same time as the main_clk.
* HWMOD_NO_IDLE: Do not idle the hwmod at all. Useful to handle certain
* IPs like CPSW on DRA7, where clocks to this module cannot be disabled.
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@ -532,6 +536,8 @@ struct omap_hwmod_omap4_prcm {
#define HWMOD_FORCE_MSTANDBY (1 << 11)
#define HWMOD_SWSUP_SIDLE_ACT (1 << 12)
#define HWMOD_RECONFIG_IO_CHAIN (1 << 13)
#define HWMOD_OPT_CLKS_NEEDED (1 << 14)
#define HWMOD_NO_IDLE (1 << 15)
/*
* omap_hwmod._int_flags definitions

View File

@ -436,12 +436,14 @@ skipl2dis:
and r1, #0x700
cmp r1, #0x300
beq l2_inv_gp
adr r0, l2_inv_api_params_offset
ldr r3, [r0]
add r3, r3, r0 @ r3 points to dummy parameters
mov r0, #40 @ set service ID for PPA
mov r12, r0 @ copy secure Service ID in r12
mov r1, #0 @ set task id for ROM code in r1
mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
adr r3, l2_inv_api_params @ r3 points to dummy parameters
dsb @ data write barrier
dmb @ data memory barrier
smc #1 @ call SMI monitor (smi #1)
@ -475,8 +477,8 @@ skipl2dis:
b logic_l1_restore
.align
l2_inv_api_params:
.word 0x1, 0x00
l2_inv_api_params_offset:
.long l2_inv_api_params - .
l2_inv_gp:
/* Execute smi to invalidate L2 cache */
mov r12, #0x1 @ set up to invalidate L2
@ -531,6 +533,10 @@ l2dis_3630_offset:
l2dis_3630:
.word 0
.data
l2_inv_api_params:
.word 0x1, 0x00
/*
* Internal functions
*/

View File

@ -18,6 +18,8 @@ GZFLAGS :=-9
KBUILD_DEFCONFIG := defconfig
KBUILD_CFLAGS += -mgeneral-regs-only
KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
AS += -EB

View File

@ -33,7 +33,7 @@
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
*
* VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array
* VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
* (rounded up to PUD_SIZE).
* VMALLOC_START: beginning of the kernel VA space
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
@ -43,7 +43,9 @@
#define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS)
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
#define VMEMMAP_START (VMALLOC_END + SZ_64K)
#define vmemmap ((struct page *)VMEMMAP_START - \
SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
#define FIRST_USER_ADDRESS 0UL

View File

@ -184,20 +184,21 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
/* EL1 Single Step Handler hooks */
static LIST_HEAD(step_hook);
static DEFINE_RWLOCK(step_hook_lock);
static DEFINE_SPINLOCK(step_hook_lock);
void register_step_hook(struct step_hook *hook)
{
write_lock(&step_hook_lock);
list_add(&hook->node, &step_hook);
write_unlock(&step_hook_lock);
spin_lock(&step_hook_lock);
list_add_rcu(&hook->node, &step_hook);
spin_unlock(&step_hook_lock);
}
void unregister_step_hook(struct step_hook *hook)
{
write_lock(&step_hook_lock);
list_del(&hook->node);
write_unlock(&step_hook_lock);
spin_lock(&step_hook_lock);
list_del_rcu(&hook->node);
spin_unlock(&step_hook_lock);
synchronize_rcu();
}
/*
@ -211,15 +212,15 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
struct step_hook *hook;
int retval = DBG_HOOK_ERROR;
read_lock(&step_hook_lock);
rcu_read_lock();
list_for_each_entry(hook, &step_hook, node) {
list_for_each_entry_rcu(hook, &step_hook, node) {
retval = hook->fn(regs, esr);
if (retval == DBG_HOOK_HANDLED)
break;
}
read_unlock(&step_hook_lock);
rcu_read_unlock();
return retval;
}
@ -271,20 +272,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
* Use reader/writer locks instead of plain spinlock.
*/
static LIST_HEAD(break_hook);
static DEFINE_RWLOCK(break_hook_lock);
static DEFINE_SPINLOCK(break_hook_lock);
void register_break_hook(struct break_hook *hook)
{
write_lock(&break_hook_lock);
list_add(&hook->node, &break_hook);
write_unlock(&break_hook_lock);
spin_lock(&break_hook_lock);
list_add_rcu(&hook->node, &break_hook);
spin_unlock(&break_hook_lock);
}
void unregister_break_hook(struct break_hook *hook)
{
write_lock(&break_hook_lock);
list_del(&hook->node);
write_unlock(&break_hook_lock);
spin_lock(&break_hook_lock);
list_del_rcu(&hook->node);
spin_unlock(&break_hook_lock);
synchronize_rcu();
}
static int call_break_hook(struct pt_regs *regs, unsigned int esr)
@ -292,11 +294,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
struct break_hook *hook;
int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
read_lock(&break_hook_lock);
list_for_each_entry(hook, &break_hook, node)
rcu_read_lock();
list_for_each_entry_rcu(hook, &break_hook, node)
if ((esr & hook->esr_mask) == hook->esr_val)
fn = hook->fn;
read_unlock(&break_hook_lock);
rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}

View File

@ -312,8 +312,8 @@ void __init mem_init(void)
" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLG(VMALLOC_START, VMALLOC_END),
#ifdef CONFIG_SPARSEMEM_VMEMMAP
MLG((unsigned long)vmemmap,
(unsigned long)vmemmap + VMEMMAP_SIZE),
MLG(VMEMMAP_START,
VMEMMAP_START + VMEMMAP_SIZE),
MLM((unsigned long)virt_to_page(PAGE_OFFSET),
(unsigned long)virt_to_page(high_memory)),
#endif

View File

@ -1328,6 +1328,21 @@ static struct clk atmel_mci0_pclk = {
.index = 9,
};
static bool at32_mci_dma_filter(struct dma_chan *chan, void *pdata)
{
struct mci_dma_data *sl = pdata;
if (!sl)
return false;
if (find_slave_dev(sl) == chan->device->dev) {
chan->private = slave_data_ptr(sl);
return true;
}
return false;
}
struct platform_device *__init
at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
{
@ -1362,6 +1377,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
slave->sdata.dst_master = 0;
data->dma_slave = slave;
data->dma_filter = at32_mci_dma_filter;
if (platform_device_add_data(pdev, data,
sizeof(struct mci_platform_data)))

View File

@ -2103,11 +2103,11 @@ config CPU_R4K_CACHE_TLB
config MIPS_MT_SMP
bool "MIPS MT SMP support (1 TC on each available VPE)"
depends on SYS_SUPPORTS_MULTITHREADING
depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select SYNC_R4K
select MIPS_GIC_IPI
select MIPS_GIC_IPI if MIPS_GIC
select MIPS_MT
select SMP
select SMP_UP
@ -2204,8 +2204,8 @@ config MIPS_VPE_APSP_API_MT
config MIPS_CMP
bool "MIPS CMP framework support (DEPRECATED)"
depends on SYS_SUPPORTS_MIPS_CMP
select MIPS_GIC_IPI
depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
select MIPS_GIC_IPI if MIPS_GIC
select SMP
select SYNC_R4K
select SYS_SUPPORTS_SMP
@ -2221,11 +2221,11 @@ config MIPS_CMP
config MIPS_CPS
bool "MIPS Coherent Processing System support"
depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
depends on SYS_SUPPORTS_MIPS_CPS && !CPU_MIPSR6
select MIPS_CM
select MIPS_CPC
select MIPS_CPS_PM if HOTPLUG_CPU
select MIPS_GIC_IPI
select MIPS_GIC_IPI if MIPS_GIC
select SMP
select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
select SYS_SUPPORTS_HOTPLUG_CPU
@ -2244,6 +2244,7 @@ config MIPS_CPS_PM
bool
config MIPS_GIC_IPI
depends on MIPS_GIC
bool
config MIPS_CM

View File

@ -120,6 +120,7 @@ static inline void calculate_cpu_foreign_map(void)
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
cpumask_clear(&temp_foreign_map);
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map)

View File

@ -335,7 +335,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
if (syms[i].st_shndx == SHN_UNDEF) {
char *name = strtab + syms[i].st_name;
if (name[0] == '.')
memmove(name, name+1, strlen(name));
syms[i].st_name++;
}
}
}

View File

@ -1273,6 +1273,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
std r6, VCPU_ACOP(r9)
stw r7, VCPU_GUEST_PID(r9)
std r8, VCPU_WORT(r9)
/*
* Restore various registers to 0, where non-zero values
* set by the guest could disrupt the host.
*/
li r0, 0
mtspr SPRN_IAMR, r0
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0
mtspr SPRN_TCSCR, r0
mtspr SPRN_WORT, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1
sldi r0, r0, 31
mtspr SPRN_MMCRS, r0
8:
/* Save and reset AMR and UAMOR before turning on the MMU */

View File

@ -15,17 +15,25 @@
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
spin_lock_init(&mm->context.list_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list);
INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.attach_count, 0);
mm->context.flush_mm = 0;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste;
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
#endif
mm->context.asce_limit = STACK_TOP_MAX;
if (mm->context.asce_limit == 0) {
/* context created by exec, set asce limit to 4TB */
mm->context.asce_bits = _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
mm->context.asce_limit = STACK_TOP_MAX;
} else if (mm->context.asce_limit == (1UL << 31)) {
mm_inc_nr_pmds(mm);
}
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
}
@ -111,8 +119,6 @@ static inline void activate_mm(struct mm_struct *prev,
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
if (oldmm->context.asce_limit < mm->context.asce_limit)
crst_table_downgrade(mm, oldmm->context.asce_limit);
}
static inline void arch_exit_mmap(struct mm_struct *mm)

View File

@ -45,7 +45,7 @@ struct zpci_fmb {
u64 rpcit_ops;
u64 dma_rbytes;
u64 dma_wbytes;
} __packed __aligned(16);
} __packed __aligned(64);
enum zpci_state {
ZPCI_FN_STATE_RESERVED,

View File

@ -100,12 +100,26 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
spin_lock_init(&mm->context.list_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list);
INIT_LIST_HEAD(&mm->context.gmap_list);
return (pgd_t *) crst_table_alloc(mm);
unsigned long *table = crst_table_alloc(mm);
if (!table)
return NULL;
if (mm->context.asce_limit == (1UL << 31)) {
/* Forking a compat process with 2 page table levels */
if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
crst_table_free(mm, table);
return NULL;
}
}
return (pgd_t *) table;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
if (mm->context.asce_limit == (1UL << 31))
pgtable_pmd_page_dtor(virt_to_page(pgd));
crst_table_free(mm, (unsigned long *) pgd);
}
#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
static inline void pmd_populate(struct mm_struct *mm,
pmd_t *pmd, pgtable_t pte)

View File

@ -871,8 +871,11 @@ static inline int barsize(u8 size)
static int zpci_mem_init(void)
{
BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
__alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
16, 0, NULL);
__alignof__(struct zpci_fmb), 0, NULL);
if (!zdev_fmb_cache)
goto error_zdev;

View File

@ -133,7 +133,7 @@ void mconsole_proc(struct mc_request *req)
ptr += strlen("proc");
ptr = skip_spaces(ptr);
file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY, 0);
if (IS_ERR(file)) {
mconsole_reply(req, "Failed to open file", 1, 0);
printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));

View File

@ -511,6 +511,7 @@ ENTRY(ia32_syscall)
* it is too small to ever cause noticeable irq latency.
*/
PARAVIRT_ADJUST_EXCEPTION_FRAME
ASM_CLAC /* Do this early to minimize exposure */
SWAPGS
ENABLE_INTERRUPTS(CLBR_NONE)

View File

@ -640,8 +640,8 @@ static inline void entering_irq(void)
static inline void entering_ack_irq(void)
{
ack_APIC_irq();
entering_irq();
ack_APIC_irq();
}
static inline void exiting_irq(void)

View File

@ -159,6 +159,14 @@ struct x86_pmu_capability {
*/
#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
#define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
#define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62)
#define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
#define GLOBAL_STATUS_ASIF BIT_ULL(60)
#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
/*
* IBS cpuid feature detection
*/

View File

@ -57,4 +57,6 @@ static inline bool xen_x2apic_para_available(void)
}
#endif
extern void xen_set_iopl_mask(unsigned mask);
#endif /* _ASM_X86_XEN_HYPERVISOR_H */

View File

@ -72,6 +72,12 @@
#define MSR_LBR_CORE_FROM 0x00000040
#define MSR_LBR_CORE_TO 0x00000060
#define MSR_LBR_INFO_0 0x00000dc0 /* ... 0xddf for _31 */
#define LBR_INFO_MISPRED BIT_ULL(63)
#define LBR_INFO_IN_TX BIT_ULL(62)
#define LBR_INFO_ABORT BIT_ULL(61)
#define LBR_INFO_CYCLES 0xffff
#define MSR_IA32_PEBS_ENABLE 0x000003f1
#define MSR_IA32_DS_AREA 0x00000600
#define MSR_IA32_PERF_CAPABILITIES 0x00000345

View File

@ -96,9 +96,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
SYSCALL_DEFINE1(iopl, unsigned int, level)
{
struct pt_regs *regs = current_pt_regs();
unsigned int old = (regs->flags >> 12) & 3;
struct thread_struct *t = &current->thread;
/*
* Careful: the IOPL bits in regs->flags are undefined under Xen PV
* and changing them has no effect.
*/
unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT;
if (level > 3)
return -EINVAL;
/* Trying to gain more privileges? */
@ -106,8 +111,9 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
}
regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
t->iopl = level << 12;
regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
(level << X86_EFLAGS_IOPL_BIT);
t->iopl = level << X86_EFLAGS_IOPL_BIT;
set_iopl_mask(t->iopl);
return 0;

View File

@ -49,6 +49,7 @@
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/switch_to.h>
#include <asm/xen/hypervisor.h>
asmlinkage extern void ret_from_fork(void);
@ -419,6 +420,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
__switch_to_xtra(prev_p, next_p, tss);
#ifdef CONFIG_XEN
/*
* On Xen PV, IOPL bits in pt_regs->flags have no effect, and
* current_pt_regs()->flags may not match the current task's
* intended IOPL. We need to switch it manually.
*/
if (unlikely(xen_pv_domain() &&
prev->iopl != next->iopl))
xen_set_iopl_mask(next->iopl);
#endif
if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
/*
* AMD CPUs have a misfeature: SYSRET sets the SS selector but

View File

@ -244,7 +244,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
* PIC is being reset. Handle it gracefully here
*/
atomic_inc(&ps->pending);
else if (value > 0)
else if (value > 0 && ps->reinject)
/* in this case, we had multiple outstanding pit interrupts
* that we needed to inject. Reinject
*/
@ -287,7 +287,9 @@ static void pit_do_work(struct kthread_work *work)
* last one has been acked.
*/
spin_lock(&ps->inject_lock);
if (ps->irq_ack) {
if (!ps->reinject)
inject = 1;
else if (ps->irq_ack) {
ps->irq_ack = 0;
inject = 1;
}
@ -316,10 +318,10 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
struct kvm_pit *pt = ps->kvm->arch.vpit;
if (ps->reinject || !atomic_read(&ps->pending)) {
if (ps->reinject)
atomic_inc(&ps->pending);
queue_kthread_work(&pt->worker, &pt->expired);
}
queue_kthread_work(&pt->worker, &pt->expired);
if (ps->is_periodic) {
hrtimer_add_expires_ns(&ps->timer, ps->period);

View File

@ -1674,6 +1674,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
return;
}
break;
case MSR_IA32_PEBS_ENABLE:
/* PEBS needs a quiescent period after being disabled (to write
* a record). Disabling PEBS through VMX MSR swapping doesn't
* provide that period, so a CPU could write host's record into
* guest's memory.
*/
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
}
for (i = 0; i < m->nr; ++i)
@ -1711,26 +1718,31 @@ static void reload_tss(void)
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
{
u64 guest_efer;
u64 ignore_bits;
u64 guest_efer = vmx->vcpu.arch.efer;
u64 ignore_bits = 0;
guest_efer = vmx->vcpu.arch.efer;
if (!enable_ept) {
/*
* NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
* host CPUID is more efficient than testing guest CPUID
* or CR4. Host SMEP is anyway a requirement for guest SMEP.
*/
if (boot_cpu_has(X86_FEATURE_SMEP))
guest_efer |= EFER_NX;
else if (!(guest_efer & EFER_NX))
ignore_bits |= EFER_NX;
}
/*
* NX is emulated; LMA and LME handled by hardware; SCE meaningless
* outside long mode
* LMA and LME handled by hardware; SCE meaningless outside long mode.
*/
ignore_bits = EFER_NX | EFER_SCE;
ignore_bits |= EFER_SCE;
#ifdef CONFIG_X86_64
ignore_bits |= EFER_LMA | EFER_LME;
/* SCE is meaningful only in long mode on Intel */
if (guest_efer & EFER_LMA)
ignore_bits &= ~(u64)EFER_SCE;
#endif
guest_efer &= ~ignore_bits;
guest_efer |= host_efer & ignore_bits;
vmx->guest_msrs[efer_offset].data = guest_efer;
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
clear_atomic_switch_msr(vmx, MSR_EFER);
@ -1741,16 +1753,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
*/
if (cpu_has_load_ia32_efer ||
(enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
guest_efer = vmx->vcpu.arch.efer;
if (!(guest_efer & EFER_LMA))
guest_efer &= ~EFER_LME;
if (guest_efer != host_efer)
add_atomic_switch_msr(vmx, MSR_EFER,
guest_efer, host_efer);
return false;
}
} else {
guest_efer &= ~ignore_bits;
guest_efer |= host_efer & ignore_bits;
return true;
vmx->guest_msrs[efer_offset].data = guest_efer;
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
return true;
}
}
static unsigned long segment_base(u16 selector)
@ -7193,6 +7210,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
if (!(types & (1UL << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
skip_emulated_instruction(vcpu);
return 1;
}

View File

@ -3732,13 +3732,13 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
int r = 0;
int i;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
for (i = 0; i < 3; i++)
kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return r;
return 0;
}
static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
@ -3757,6 +3757,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
int r = 0, start = 0;
int i;
u32 prev_legacy, cur_legacy;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
@ -3766,7 +3767,8 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
sizeof(kvm->arch.vpit->pit_state.channels));
kvm->arch.vpit->pit_state.flags = ps->flags;
kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
for (i = 0; i < 3; i++)
kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return r;
}

View File

@ -553,3 +553,10 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
static void pci_bdwep_bar(struct pci_dev *dev)
{
dev->non_compliant_bars = 1;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);

View File

@ -959,7 +959,7 @@ static void xen_load_sp0(struct tss_struct *tss,
tss->x86_tss.sp0 = thread->sp0;
}
static void xen_set_iopl_mask(unsigned mask)
void xen_set_iopl_mask(unsigned mask)
{
struct physdev_set_iopl set_iopl;

View File

@ -128,7 +128,7 @@ ENTRY(_startup)
wsr a0, icountlevel
.set _index, 0
.rept XCHAL_NUM_DBREAK - 1
.rept XCHAL_NUM_DBREAK
wsr a0, SREG_DBREAKC + _index
.set _index, _index + 1
.endr

View File

@ -97,11 +97,11 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
unsigned long paddr;
void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
pagefault_disable();
preempt_disable();
kmap_invalidate_coherent(page, vaddr);
set_bit(PG_arch_1, &page->flags);
clear_page_alias(kvaddr, paddr);
pagefault_enable();
preempt_enable();
}
void copy_user_highpage(struct page *dst, struct page *src,
@ -113,11 +113,11 @@ void copy_user_highpage(struct page *dst, struct page *src,
void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
&src_paddr);
pagefault_disable();
preempt_disable();
kmap_invalidate_coherent(dst, vaddr);
set_bit(PG_arch_1, &dst->flags);
copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
pagefault_enable();
preempt_enable();
}
#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */

View File

@ -100,21 +100,23 @@ static void rs_poll(unsigned long priv)
{
struct tty_port *port = (struct tty_port *)priv;
int i = 0;
int rd = 1;
unsigned char c;
spin_lock(&timer_lock);
while (simc_poll(0)) {
simc_read(0, &c, 1);
rd = simc_read(0, &c, 1);
if (rd <= 0)
break;
tty_insert_flip_char(port, c, TTY_NORMAL);
i++;
}
if (i)
tty_flip_buffer_push(port);
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
if (rd)
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
spin_unlock(&timer_lock);
}

View File

@ -2067,7 +2067,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
if (q->mq_ops) {
if (blk_queue_io_stat(q))
blk_account_io_start(rq, true);
blk_mq_insert_request(rq, false, true, true);
blk_mq_insert_request(rq, false, true, false);
return 0;
}

View File

@ -15,15 +15,21 @@ obj-$(CONFIG_PUBLIC_KEY_ALGO_RSA) += rsa.o
obj-$(CONFIG_X509_CERTIFICATE_PARSER) += x509_key_parser.o
x509_key_parser-y := \
x509-asn1.o \
x509_akid-asn1.o \
x509_rsakey-asn1.o \
x509_cert_parser.o \
x509_public_key.o
$(obj)/x509_cert_parser.o: $(obj)/x509-asn1.h $(obj)/x509_rsakey-asn1.h
$(obj)/x509_cert_parser.o: \
$(obj)/x509-asn1.h \
$(obj)/x509_akid-asn1.h \
$(obj)/x509_rsakey-asn1.h
$(obj)/x509-asn1.o: $(obj)/x509-asn1.c $(obj)/x509-asn1.h
$(obj)/x509_akid-asn1.o: $(obj)/x509_akid-asn1.c $(obj)/x509_akid-asn1.h
$(obj)/x509_rsakey-asn1.o: $(obj)/x509_rsakey-asn1.c $(obj)/x509_rsakey-asn1.h
clean-files += x509-asn1.c x509-asn1.h
clean-files += x509_akid-asn1.c x509_akid-asn1.h
clean-files += x509_rsakey-asn1.c x509_rsakey-asn1.h
#

View File

@ -85,8 +85,8 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
/* No match - see if the root certificate has a signer amongst the
* trusted keys.
*/
if (last && last->authority) {
key = x509_request_asymmetric_key(trust_keyring, last->authority,
if (last && last->akid_skid) {
key = x509_request_asymmetric_key(trust_keyring, last->akid_skid,
false);
if (!IS_ERR(key)) {
x509 = last;

View File

@ -187,11 +187,11 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
goto maybe_missing_crypto_in_x509;
pr_debug("- issuer %s\n", x509->issuer);
if (x509->authority)
if (x509->akid_skid)
pr_debug("- authkeyid %*phN\n",
x509->authority->len, x509->authority->data);
x509->akid_skid->len, x509->akid_skid->data);
if (!x509->authority ||
if (!x509->akid_skid ||
strcmp(x509->subject, x509->issuer) == 0) {
/* If there's no authority certificate specified, then
* the certificate must be self-signed and is the root
@ -216,13 +216,13 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
* list to see if the next one is there.
*/
pr_debug("- want %*phN\n",
x509->authority->len, x509->authority->data);
x509->akid_skid->len, x509->akid_skid->data);
for (p = pkcs7->certs; p; p = p->next) {
if (!p->skid)
continue;
pr_debug("- cmp [%u] %*phN\n",
p->index, p->skid->len, p->skid->data);
if (asymmetric_key_id_same(p->skid, x509->authority))
if (asymmetric_key_id_same(p->skid, x509->akid_skid))
goto found_issuer;
}
@ -338,8 +338,6 @@ int pkcs7_verify(struct pkcs7_message *pkcs7)
ret = x509_get_sig_params(x509);
if (ret < 0)
return ret;
pr_debug("X.509[%u] %*phN\n",
n, x509->authority->len, x509->authority->data);
}
for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) {

View File

@ -0,0 +1,35 @@
-- X.509 AuthorityKeyIdentifier
-- rfc5280 section 4.2.1.1
AuthorityKeyIdentifier ::= SEQUENCE {
keyIdentifier [0] IMPLICIT KeyIdentifier OPTIONAL,
authorityCertIssuer [1] IMPLICIT GeneralNames OPTIONAL,
authorityCertSerialNumber [2] IMPLICIT CertificateSerialNumber OPTIONAL
}
KeyIdentifier ::= OCTET STRING ({ x509_akid_note_kid })
CertificateSerialNumber ::= INTEGER ({ x509_akid_note_serial })
GeneralNames ::= SEQUENCE OF GeneralName
GeneralName ::= CHOICE {
otherName [0] ANY,
rfc822Name [1] IA5String,
dNSName [2] IA5String,
x400Address [3] ANY,
directoryName [4] Name ({ x509_akid_note_name }),
ediPartyName [5] ANY,
uniformResourceIdentifier [6] IA5String,
iPAddress [7] OCTET STRING,
registeredID [8] OBJECT IDENTIFIER
}
Name ::= SEQUENCE OF RelativeDistinguishedName
RelativeDistinguishedName ::= SET OF AttributeValueAssertion
AttributeValueAssertion ::= SEQUENCE {
attributeType OBJECT IDENTIFIER ({ x509_note_OID }),
attributeValue ANY ({ x509_extract_name_segment })
}

View File

@ -18,6 +18,7 @@
#include "public_key.h"
#include "x509_parser.h"
#include "x509-asn1.h"
#include "x509_akid-asn1.h"
#include "x509_rsakey-asn1.h"
struct x509_parse_context {
@ -35,6 +36,10 @@ struct x509_parse_context {
u16 o_offset; /* Offset of organizationName (O) */
u16 cn_offset; /* Offset of commonName (CN) */
u16 email_offset; /* Offset of emailAddress */
unsigned raw_akid_size;
const void *raw_akid; /* Raw authorityKeyId in ASN.1 */
const void *akid_raw_issuer; /* Raw directoryName in authorityKeyId */
unsigned akid_raw_issuer_size;
};
/*
@ -48,7 +53,8 @@ void x509_free_certificate(struct x509_certificate *cert)
kfree(cert->subject);
kfree(cert->id);
kfree(cert->skid);
kfree(cert->authority);
kfree(cert->akid_id);
kfree(cert->akid_skid);
kfree(cert->sig.digest);
mpi_free(cert->sig.rsa.s);
kfree(cert);
@ -85,6 +91,18 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
if (ret < 0)
goto error_decode;
/* Decode the AuthorityKeyIdentifier */
if (ctx->raw_akid) {
pr_devel("AKID: %u %*phN\n",
ctx->raw_akid_size, ctx->raw_akid_size, ctx->raw_akid);
ret = asn1_ber_decoder(&x509_akid_decoder, ctx,
ctx->raw_akid, ctx->raw_akid_size);
if (ret < 0) {
pr_warn("Couldn't decode AuthKeyIdentifier\n");
goto error_decode;
}
}
/* Decode the public key */
ret = asn1_ber_decoder(&x509_rsakey_decoder, ctx,
ctx->key, ctx->key_size);
@ -422,7 +440,6 @@ int x509_process_extension(void *context, size_t hdrlen,
struct x509_parse_context *ctx = context;
struct asymmetric_key_id *kid;
const unsigned char *v = value;
int i;
pr_debug("Extension: %u\n", ctx->last_oid);
@ -449,117 +466,113 @@ int x509_process_extension(void *context, size_t hdrlen,
if (ctx->last_oid == OID_authorityKeyIdentifier) {
/* Get hold of the CA key fingerprint */
if (ctx->cert->authority || vlen < 5)
return -EBADMSG;
/* Authority Key Identifier must be a Constructed SEQUENCE */
if (v[0] != (ASN1_SEQ | (ASN1_CONS << 5)))
return -EBADMSG;
/* Authority Key Identifier is not indefinite length */
if (unlikely(vlen == ASN1_INDEFINITE_LENGTH))
return -EBADMSG;
if (vlen < ASN1_INDEFINITE_LENGTH) {
/* Short Form length */
if (v[1] != vlen - 2 ||
v[2] != SEQ_TAG_KEYID ||
v[3] > vlen - 4)
return -EBADMSG;
vlen = v[3];
v += 4;
} else {
/* Long Form length */
size_t seq_len = 0;
size_t sub = v[1] - ASN1_INDEFINITE_LENGTH;
if (sub > 2)
return -EBADMSG;
/* calculate the length from subsequent octets */
v += 2;
for (i = 0; i < sub; i++) {
seq_len <<= 8;
seq_len |= v[i];
}
if (seq_len != vlen - 2 - sub ||
v[sub] != SEQ_TAG_KEYID ||
v[sub + 1] > vlen - 4 - sub)
return -EBADMSG;
vlen = v[sub + 1];
v += (sub + 2);
}
kid = asymmetric_key_generate_id(ctx->cert->raw_issuer,
ctx->cert->raw_issuer_size,
v, vlen);
if (IS_ERR(kid))
return PTR_ERR(kid);
pr_debug("authkeyid %*phN\n", kid->len, kid->data);
ctx->cert->authority = kid;
ctx->raw_akid = v;
ctx->raw_akid_size = vlen;
return 0;
}
return 0;
}
/*
* Record a certificate time.
/**
* x509_decode_time - Decode an X.509 time ASN.1 object
* @_t: The time to fill in
* @hdrlen: The length of the object header
* @tag: The object tag
* @value: The object value
* @vlen: The size of the object value
*
* Decode an ASN.1 universal time or generalised time field into a struct the
* kernel can handle and check it for validity. The time is decoded thus:
*
* [RFC5280 §4.1.2.5]
* CAs conforming to this profile MUST always encode certificate validity
* dates through the year 2049 as UTCTime; certificate validity dates in
* 2050 or later MUST be encoded as GeneralizedTime. Conforming
* applications MUST be able to process validity dates that are encoded in
* either UTCTime or GeneralizedTime.
*/
static int x509_note_time(struct tm *tm, size_t hdrlen,
unsigned char tag,
const unsigned char *value, size_t vlen)
int x509_decode_time(time64_t *_t, size_t hdrlen,
unsigned char tag,
const unsigned char *value, size_t vlen)
{
static const unsigned char month_lengths[] = { 31, 28, 31, 30, 31, 30,
31, 31, 30, 31, 30, 31 };
const unsigned char *p = value;
unsigned year, mon, day, hour, min, sec, mon_len;
#define dec2bin(X) ((X) - '0')
#define dec2bin(X) ({ unsigned char x = (X) - '0'; if (x > 9) goto invalid_time; x; })
#define DD2bin(P) ({ unsigned x = dec2bin(P[0]) * 10 + dec2bin(P[1]); P += 2; x; })
if (tag == ASN1_UNITIM) {
/* UTCTime: YYMMDDHHMMSSZ */
if (vlen != 13)
goto unsupported_time;
tm->tm_year = DD2bin(p);
if (tm->tm_year >= 50)
tm->tm_year += 1900;
year = DD2bin(p);
if (year >= 50)
year += 1900;
else
tm->tm_year += 2000;
year += 2000;
} else if (tag == ASN1_GENTIM) {
/* GenTime: YYYYMMDDHHMMSSZ */
if (vlen != 15)
goto unsupported_time;
tm->tm_year = DD2bin(p) * 100 + DD2bin(p);
year = DD2bin(p) * 100 + DD2bin(p);
if (year >= 1950 && year <= 2049)
goto invalid_time;
} else {
goto unsupported_time;
}
tm->tm_year -= 1900;
tm->tm_mon = DD2bin(p) - 1;
tm->tm_mday = DD2bin(p);
tm->tm_hour = DD2bin(p);
tm->tm_min = DD2bin(p);
tm->tm_sec = DD2bin(p);
mon = DD2bin(p);
day = DD2bin(p);
hour = DD2bin(p);
min = DD2bin(p);
sec = DD2bin(p);
if (*p != 'Z')
goto unsupported_time;
mon_len = month_lengths[mon];
if (mon == 2) {
if (year % 4 == 0) {
mon_len = 29;
if (year % 100 == 0) {
mon_len = 28;
if (year % 400 == 0)
mon_len = 29;
}
}
}
if (year < 1970 ||
mon < 1 || mon > 12 ||
day < 1 || day > mon_len ||
hour < 0 || hour > 23 ||
min < 0 || min > 59 ||
sec < 0 || sec > 59)
goto invalid_time;
*_t = mktime64(year, mon, day, hour, min, sec);
return 0;
unsupported_time:
pr_debug("Got unsupported time [tag %02x]: '%*.*s'\n",
tag, (int)vlen, (int)vlen, value);
pr_debug("Got unsupported time [tag %02x]: '%*phN'\n",
tag, (int)vlen, value);
return -EBADMSG;
invalid_time:
pr_debug("Got invalid time [tag %02x]: '%*phN'\n",
tag, (int)vlen, value);
return -EBADMSG;
}
EXPORT_SYMBOL_GPL(x509_decode_time);
int x509_note_not_before(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
return x509_note_time(&ctx->cert->valid_from, hdrlen, tag, value, vlen);
return x509_decode_time(&ctx->cert->valid_from, hdrlen, tag, value, vlen);
}
int x509_note_not_after(void *context, size_t hdrlen,
@ -567,5 +580,73 @@ int x509_note_not_after(void *context, size_t hdrlen,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
return x509_note_time(&ctx->cert->valid_to, hdrlen, tag, value, vlen);
return x509_decode_time(&ctx->cert->valid_to, hdrlen, tag, value, vlen);
}
/*
* Note a key identifier-based AuthorityKeyIdentifier
*/
int x509_akid_note_kid(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
struct asymmetric_key_id *kid;
pr_debug("AKID: keyid: %*phN\n", (int)vlen, value);
if (ctx->cert->akid_skid)
return 0;
kid = asymmetric_key_generate_id(ctx->cert->raw_issuer,
ctx->cert->raw_issuer_size,
value, vlen);
if (IS_ERR(kid))
return PTR_ERR(kid);
pr_debug("authkeyid %*phN\n", kid->len, kid->data);
ctx->cert->akid_skid = kid;
return 0;
}
/*
* Note a directoryName in an AuthorityKeyIdentifier
*/
int x509_akid_note_name(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
pr_debug("AKID: name: %*phN\n", (int)vlen, value);
ctx->akid_raw_issuer = value;
ctx->akid_raw_issuer_size = vlen;
return 0;
}
/*
* Note a serial number in an AuthorityKeyIdentifier
*/
int x509_akid_note_serial(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
struct asymmetric_key_id *kid;
pr_debug("AKID: serial: %*phN\n", (int)vlen, value);
if (!ctx->akid_raw_issuer || ctx->cert->akid_id)
return 0;
kid = asymmetric_key_generate_id(value,
vlen,
ctx->akid_raw_issuer,
ctx->akid_raw_issuer_size);
if (IS_ERR(kid))
return PTR_ERR(kid);
pr_debug("authkeyid %*phN\n", kid->len, kid->data);
ctx->cert->akid_id = kid;
return 0;
}

View File

@ -19,11 +19,12 @@ struct x509_certificate {
struct public_key_signature sig; /* Signature parameters */
char *issuer; /* Name of certificate issuer */
char *subject; /* Name of certificate subject */
struct asymmetric_key_id *id; /* Serial number + issuer */
struct asymmetric_key_id *id; /* Issuer + Serial number */
struct asymmetric_key_id *skid; /* Subject + subjectKeyId (optional) */
struct asymmetric_key_id *authority; /* Authority key identifier (optional) */
struct tm valid_from;
struct tm valid_to;
struct asymmetric_key_id *akid_id; /* CA AuthKeyId matching ->id (optional) */
struct asymmetric_key_id *akid_skid; /* CA AuthKeyId matching ->skid (optional) */
time64_t valid_from;
time64_t valid_to;
const void *tbs; /* Signed data */
unsigned tbs_size; /* Size of signed data */
unsigned raw_sig_size; /* Size of sigature */
@ -48,6 +49,9 @@ struct x509_certificate {
*/
extern void x509_free_certificate(struct x509_certificate *cert);
extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen);
extern int x509_decode_time(time64_t *_t, size_t hdrlen,
unsigned char tag,
const unsigned char *value, size_t vlen);
/*
* x509_public_key.c

View File

@ -227,10 +227,10 @@ static int x509_validate_trust(struct x509_certificate *cert,
if (!trust_keyring)
return -EOPNOTSUPP;
if (ca_keyid && !asymmetric_key_id_partial(cert->authority, ca_keyid))
if (ca_keyid && !asymmetric_key_id_partial(cert->akid_skid, ca_keyid))
return -EPERM;
key = x509_request_asymmetric_key(trust_keyring, cert->authority,
key = x509_request_asymmetric_key(trust_keyring, cert->akid_skid,
false);
if (!IS_ERR(key)) {
if (!use_builtin_keys
@ -271,14 +271,7 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
}
pr_devel("Cert Key Algo: %s\n", pkey_algo_name[cert->pub->pkey_algo]);
pr_devel("Cert Valid From: %04ld-%02d-%02d %02d:%02d:%02d\n",
cert->valid_from.tm_year + 1900, cert->valid_from.tm_mon + 1,
cert->valid_from.tm_mday, cert->valid_from.tm_hour,
cert->valid_from.tm_min, cert->valid_from.tm_sec);
pr_devel("Cert Valid To: %04ld-%02d-%02d %02d:%02d:%02d\n",
cert->valid_to.tm_year + 1900, cert->valid_to.tm_mon + 1,
cert->valid_to.tm_mday, cert->valid_to.tm_hour,
cert->valid_to.tm_min, cert->valid_to.tm_sec);
pr_devel("Cert Valid period: %lld-%lld\n", cert->valid_from, cert->valid_to);
pr_devel("Cert Signature: %s + %s\n",
pkey_algo_name[cert->sig.pkey_algo],
hash_algo_name[cert->sig.pkey_hash_algo]);
@ -287,8 +280,8 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
cert->pub->id_type = PKEY_ID_X509;
/* Check the signature on the key if it appears to be self-signed */
if (!cert->authority ||
asymmetric_key_id_same(cert->skid, cert->authority)) {
if (!cert->akid_skid ||
asymmetric_key_id_same(cert->skid, cert->akid_skid)) {
ret = x509_check_signature(cert->pub, cert); /* self-signed */
if (ret < 0)
goto error_free_cert;

View File

@ -166,14 +166,6 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
{
.callback = video_detect_force_vendor,
.ident = "Dell Inspiron 5737",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
},
},
{ },
};

View File

@ -705,7 +705,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
fail_reason = "thermal shutdown";
}
if (buf[288] == 0xBF) {
set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed. Secure erase required.\n");
fail_all_ncq_cmds = 1;
@ -896,6 +896,10 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
/* Acknowledge the interrupt status on the port.*/
port_stat = readl(port->mmio + PORT_IRQ_STAT);
if (unlikely(port_stat == 0xFFFFFFFF)) {
mtip_check_surprise_removal(dd->pdev);
return IRQ_HANDLED;
}
writel(port_stat, port->mmio + PORT_IRQ_STAT);
/* Demux port status */
@ -991,15 +995,11 @@ static bool mtip_pause_ncq(struct mtip_port *port,
reply = port->rxfis + RX_FIS_D2H_REG;
task_file_data = readl(port->mmio+PORT_TFDATA);
if (fis->command == ATA_CMD_SEC_ERASE_UNIT)
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
if ((task_file_data & 1))
return false;
if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
port->ic_pause_timer = jiffies;
return true;
} else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
@ -1011,6 +1011,8 @@ static bool mtip_pause_ncq(struct mtip_port *port,
((fis->command == 0xFC) &&
(fis->features == 0x27 || fis->features == 0x72 ||
fis->features == 0x62 || fis->features == 0x26))) {
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
/* Com reset after secure erase or lowlevel format */
mtip_restart_port(port);
return false;
@ -1102,6 +1104,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
struct mtip_cmd *int_cmd;
struct driver_data *dd = port->dd;
int rv = 0;
unsigned long start;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
@ -1164,6 +1167,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
/* Populate the command header */
int_cmd->command_header->byte_count = 0;
start = jiffies;
/* Issue the command to the hardware */
mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
@ -1172,10 +1177,12 @@ static int mtip_exec_internal_command(struct mtip_port *port,
if ((rv = wait_for_completion_interruptible_timeout(
&wait,
msecs_to_jiffies(timeout))) <= 0) {
if (rv == -ERESTARTSYS) { /* interrupted */
dev_err(&dd->pdev->dev,
"Internal command [%02X] was interrupted after %lu ms\n",
fis->command, timeout);
"Internal command [%02X] was interrupted after %u ms\n",
fis->command,
jiffies_to_msecs(jiffies - start));
rv = -EINTR;
goto exec_ic_exit;
} else if (rv == 0) /* timeout */
@ -2780,48 +2787,6 @@ static void mtip_hw_debugfs_exit(struct driver_data *dd)
debugfs_remove_recursive(dd->dfs_node);
}
static int mtip_free_orphan(struct driver_data *dd)
{
struct kobject *kobj;
if (dd->bdev) {
if (dd->bdev->bd_holders >= 1)
return -2;
bdput(dd->bdev);
dd->bdev = NULL;
}
mtip_hw_debugfs_exit(dd);
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
spin_unlock(&rssd_index_lock);
if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag) &&
test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) {
put_disk(dd->disk);
} else {
if (dd->disk) {
kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
if (kobj) {
mtip_hw_sysfs_exit(dd, kobj);
kobject_put(kobj);
}
del_gendisk(dd->disk);
dd->disk = NULL;
}
if (dd->queue) {
dd->queue->queuedata = NULL;
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
dd->queue = NULL;
}
}
kfree(dd);
return 0;
}
/*
* Perform any init/resume time hardware setup
*
@ -2969,7 +2934,6 @@ static int mtip_service_thread(void *data)
unsigned long slot, slot_start, slot_wrap;
unsigned int num_cmd_slots = dd->slot_groups * 32;
struct mtip_port *port = dd->port;
int ret;
while (1) {
if (kthread_should_stop() ||
@ -3055,18 +3019,6 @@ restart_eh:
if (kthread_should_stop())
goto st_out;
}
while (1) {
ret = mtip_free_orphan(dd);
if (!ret) {
/* NOTE: All data structures are invalid, do not
* access any here */
return 0;
}
msleep_interruptible(1000);
if (kthread_should_stop())
goto st_out;
}
st_out:
return 0;
}
@ -3178,7 +3130,7 @@ static int mtip_hw_get_identify(struct driver_data *dd)
if (buf[288] == 0xBF) {
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed.\n");
/* TODO */
set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
}
}
@ -3352,20 +3304,25 @@ out1:
return rv;
}
static void mtip_standby_drive(struct driver_data *dd)
static int mtip_standby_drive(struct driver_data *dd)
{
if (dd->sr)
return;
int rv = 0;
if (dd->sr || !dd->port)
return -ENODEV;
/*
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
!test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
if (mtip_standby_immediate(dd->port))
!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
!test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
rv = mtip_standby_immediate(dd->port);
if (rv)
dev_warn(&dd->pdev->dev,
"STANDBY IMMEDIATE failed\n");
}
return rv;
}
/*
@ -3394,6 +3351,7 @@ static int mtip_hw_exit(struct driver_data *dd)
/* Release the IRQ. */
irq_set_affinity_hint(dd->pdev->irq, NULL);
devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
msleep(1000);
/* Free dma regions */
mtip_dma_free(dd);
@ -3422,8 +3380,7 @@ static int mtip_hw_shutdown(struct driver_data *dd)
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
if (!dd->sr && dd->port)
mtip_standby_immediate(dd->port);
mtip_standby_drive(dd);
return 0;
}
@ -3446,7 +3403,7 @@ static int mtip_hw_suspend(struct driver_data *dd)
* Send standby immediate (E0h) to the drive
* so that it saves its state.
*/
if (mtip_standby_immediate(dd->port) != 0) {
if (mtip_standby_drive(dd) != 0) {
dev_err(&dd->pdev->dev,
"Failed standby-immediate command\n");
return -EFAULT;
@ -3684,6 +3641,28 @@ static int mtip_block_getgeo(struct block_device *dev,
return 0;
}
static int mtip_block_open(struct block_device *dev, fmode_t mode)
{
struct driver_data *dd;
if (dev && dev->bd_disk) {
dd = (struct driver_data *) dev->bd_disk->private_data;
if (dd) {
if (test_bit(MTIP_DDF_REMOVAL_BIT,
&dd->dd_flag)) {
return -ENODEV;
}
return 0;
}
}
return -ENODEV;
}
void mtip_block_release(struct gendisk *disk, fmode_t mode)
{
}
/*
* Block device operation function.
*
@ -3691,6 +3670,8 @@ static int mtip_block_getgeo(struct block_device *dev,
* layer.
*/
static const struct block_device_operations mtip_block_ops = {
.open = mtip_block_open,
.release = mtip_block_release,
.ioctl = mtip_block_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mtip_block_compat_ioctl,
@ -3729,10 +3710,9 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
rq_data_dir(rq))) {
return -ENODATA;
}
if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
return -ENODATA;
if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
return -ENXIO;
}
if (rq->cmd_flags & REQ_DISCARD) {
@ -4066,52 +4046,51 @@ static int mtip_block_remove(struct driver_data *dd)
{
struct kobject *kobj;
if (!dd->sr) {
mtip_hw_debugfs_exit(dd);
mtip_hw_debugfs_exit(dd);
if (dd->mtip_svc_handler) {
set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
wake_up_interruptible(&dd->port->svc_wait);
kthread_stop(dd->mtip_svc_handler);
}
/* Clean up the sysfs attributes, if created */
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
if (kobj) {
mtip_hw_sysfs_exit(dd, kobj);
kobject_put(kobj);
}
if (dd->mtip_svc_handler) {
set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
wake_up_interruptible(&dd->port->svc_wait);
kthread_stop(dd->mtip_svc_handler);
}
/* Clean up the sysfs attributes, if created */
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
if (kobj) {
mtip_hw_sysfs_exit(dd, kobj);
kobject_put(kobj);
}
}
if (!dd->sr)
mtip_standby_drive(dd);
/*
* Delete our gendisk structure. This also removes the device
* from /dev
*/
if (dd->bdev) {
bdput(dd->bdev);
dd->bdev = NULL;
}
if (dd->disk) {
if (dd->disk->queue) {
del_gendisk(dd->disk);
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
dd->queue = NULL;
} else
put_disk(dd->disk);
}
dd->disk = NULL;
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
spin_unlock(&rssd_index_lock);
} else {
else
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
dd->disk->disk_name);
/*
* Delete our gendisk structure. This also removes the device
* from /dev
*/
if (dd->bdev) {
bdput(dd->bdev);
dd->bdev = NULL;
}
if (dd->disk) {
del_gendisk(dd->disk);
if (dd->disk->queue) {
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
dd->queue = NULL;
}
put_disk(dd->disk);
}
dd->disk = NULL;
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
spin_unlock(&rssd_index_lock);
/* De-initialize the protocol layer. */
mtip_hw_exit(dd);
@ -4140,12 +4119,12 @@ static int mtip_block_shutdown(struct driver_data *dd)
dev_info(&dd->pdev->dev,
"Shutting down %s ...\n", dd->disk->disk_name);
del_gendisk(dd->disk);
if (dd->disk->queue) {
del_gendisk(dd->disk);
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
} else
put_disk(dd->disk);
}
put_disk(dd->disk);
dd->disk = NULL;
dd->queue = NULL;
}
@ -4485,7 +4464,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
struct driver_data *dd = pci_get_drvdata(pdev);
unsigned long flags, to;
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
spin_lock_irqsave(&dev_lock, flags);
list_del_init(&dd->online_list);
@ -4502,11 +4481,18 @@ static void mtip_pci_remove(struct pci_dev *pdev)
} while (atomic_read(&dd->irq_workers_active) != 0 &&
time_before(jiffies, to));
fsync_bdev(dd->bdev);
if (atomic_read(&dd->irq_workers_active) != 0) {
dev_warn(&dd->pdev->dev,
"Completion workers still active!\n");
}
if (dd->sr)
blk_mq_stop_hw_queues(dd->queue);
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
/* Clean up the block layer. */
mtip_block_remove(dd);
@ -4524,10 +4510,8 @@ static void mtip_pci_remove(struct pci_dev *pdev)
list_del_init(&dd->remove_list);
spin_unlock_irqrestore(&dev_lock, flags);
if (!dd->sr)
kfree(dd);
else
set_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag);
kfree(dd);
set_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag);
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
pci_set_drvdata(pdev, NULL);

View File

@ -155,6 +155,7 @@ enum {
MTIP_DDF_RESUME_BIT = 6,
MTIP_DDF_INIT_DONE_BIT = 7,
MTIP_DDF_REBUILD_FAILED_BIT = 8,
MTIP_DDF_REMOVAL_BIT = 9,
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
(1 << MTIP_DDF_SEC_LOCK_BIT) |

View File

@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0489, 0xe05f) },
{ USB_DEVICE(0x0489, 0xe076) },
{ USB_DEVICE(0x0489, 0xe078) },
{ USB_DEVICE(0x0489, 0xe095) },
{ USB_DEVICE(0x04c5, 0x1330) },
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
@ -92,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x300d) },
{ USB_DEVICE(0x04CA, 0x300f) },
{ USB_DEVICE(0x04CA, 0x3010) },
{ USB_DEVICE(0x04CA, 0x3014) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x021c) },
{ USB_DEVICE(0x0930, 0x0220) },
@ -113,10 +115,12 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x13d3, 0x3393) },
{ USB_DEVICE(0x13d3, 0x3395) },
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x13d3, 0x3408) },
{ USB_DEVICE(0x13d3, 0x3423) },
{ USB_DEVICE(0x13d3, 0x3432) },
{ USB_DEVICE(0x13d3, 0x3472) },
{ USB_DEVICE(0x13d3, 0x3474) },
/* Atheros AR5BBU12 with sflash firmware */
@ -144,6 +148,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@ -154,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@ -175,10 +181,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */

View File

@ -184,6 +184,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@ -194,6 +195,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@ -215,10 +217,12 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */

View File

@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
return ret;
}
for_each_child_of_node(pdev->dev.of_node, child) {
for_each_available_child_of_node(pdev->dev.of_node, child) {
if (!child->name)
continue;

View File

@ -309,11 +309,11 @@ static int crb_acpi_remove(struct acpi_device *device)
struct device *dev = &device->dev;
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_chip_unregister(chip);
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_shutdown(chip, TPM2_SU_CLEAR);
tpm_chip_unregister(chip);
return 0;
}

View File

@ -708,6 +708,9 @@ static const char *const rk3188_critical_clocks[] __initconst = {
"aclk_cpu",
"aclk_peri",
"hclk_peri",
"pclk_cpu",
"pclk_peri",
"hclk_cpubus"
};
static void __init rk3188_common_clk_init(struct device_node *np)

View File

@ -1492,13 +1492,6 @@ static int atmel_sha_remove(struct platform_device *pdev)
clk_unprepare(sha_dd->iclk);
iounmap(sha_dd->io_base);
clk_put(sha_dd->iclk);
if (sha_dd->irq >= 0)
free_irq(sha_dd->irq, sha_dd);
return 0;
}

View File

@ -202,6 +202,39 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
return ccp_aes_cmac_finup(req);
}
static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
{
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_aes_cmac_exp_ctx state;
state.null_msg = rctx->null_msg;
memcpy(state.iv, rctx->iv, sizeof(state.iv));
state.buf_count = rctx->buf_count;
memcpy(state.buf, rctx->buf, sizeof(state.buf));
/* 'out' may not be aligned so memcpy from local variable */
memcpy(out, &state, sizeof(state));
return 0;
}
static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
{
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_aes_cmac_exp_ctx state;
/* 'in' may not be aligned so memcpy to local variable */
memcpy(&state, in, sizeof(state));
memset(rctx, 0, sizeof(*rctx));
rctx->null_msg = state.null_msg;
memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
rctx->buf_count = state.buf_count;
memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
return 0;
}
static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@ -334,10 +367,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
alg->final = ccp_aes_cmac_final;
alg->finup = ccp_aes_cmac_finup;
alg->digest = ccp_aes_cmac_digest;
alg->export = ccp_aes_cmac_export;
alg->import = ccp_aes_cmac_import;
alg->setkey = ccp_aes_cmac_setkey;
halg = &alg->halg;
halg->digestsize = AES_BLOCK_SIZE;
halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");

View File

@ -194,6 +194,43 @@ static int ccp_sha_digest(struct ahash_request *req)
return ccp_sha_finup(req);
}
static int ccp_sha_export(struct ahash_request *req, void *out)
{
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_sha_exp_ctx state;
state.type = rctx->type;
state.msg_bits = rctx->msg_bits;
state.first = rctx->first;
memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
state.buf_count = rctx->buf_count;
memcpy(state.buf, rctx->buf, sizeof(state.buf));
/* 'out' may not be aligned so memcpy from local variable */
memcpy(out, &state, sizeof(state));
return 0;
}
static int ccp_sha_import(struct ahash_request *req, const void *in)
{
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_sha_exp_ctx state;
/* 'in' may not be aligned so memcpy to local variable */
memcpy(&state, in, sizeof(state));
memset(rctx, 0, sizeof(*rctx));
rctx->type = state.type;
rctx->msg_bits = state.msg_bits;
rctx->first = state.first;
memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
rctx->buf_count = state.buf_count;
memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
return 0;
}
static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@ -390,9 +427,12 @@ static int ccp_register_sha_alg(struct list_head *head,
alg->final = ccp_sha_final;
alg->finup = ccp_sha_finup;
alg->digest = ccp_sha_digest;
alg->export = ccp_sha_export;
alg->import = ccp_sha_import;
halg = &alg->halg;
halg->digestsize = def->digest_size;
halg->statesize = sizeof(struct ccp_sha_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);

View File

@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
struct ccp_cmd cmd;
};
struct ccp_aes_cmac_exp_ctx {
unsigned int null_msg;
u8 iv[AES_BLOCK_SIZE];
unsigned int buf_count;
u8 buf[AES_BLOCK_SIZE];
};
/***** SHA related defines *****/
#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
struct ccp_cmd cmd;
};
struct ccp_sha_exp_ctx {
enum ccp_sha_type type;
u64 msg_bits;
unsigned int first;
u8 ctx[MAX_SHA_CONTEXT_SIZE];
unsigned int buf_count;
u8 buf[MAX_SHA_BLOCK_SIZE];
};
/***** Common Context Structure *****/
struct ccp_ctx {
int (*complete)(struct crypto_async_request *req, int ret);

View File

@ -176,6 +176,7 @@
#define AT_XDMAC_MAX_CHAN 0x20
#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
#define AT_XDMAC_DMA_BUSWIDTHS\
(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@ -925,8 +926,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct at_xdmac_desc *desc, *_desc;
struct list_head *descs_list;
enum dma_status ret;
int residue;
u32 cur_nda, mask, value;
int residue, retry;
u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0;
unsigned long flags;
@ -963,7 +964,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
cpu_relax();
}
/*
* When processing the residue, we need to read two registers but we
* can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
* we stand in the descriptor list and AT_XDMAC_CUBC is used
* to know how many data are remaining for the current descriptor.
* Since the dma channel is not paused to not loose data, between the
* AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
* descriptor.
* For that reason, after reading AT_XDMAC_CUBC, we check if we are
* still using the same descriptor by reading a second time
* AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
* read again AT_XDMAC_CUBC.
* Memory barriers are used to ensure the read order of the registers.
* A max number of retries is set because unlikely it can never ends if
* we are transferring a lot of data with small buffers.
*/
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
rmb();
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
if (likely(cur_nda == check_nda))
break;
cur_nda = check_nda;
rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
}
if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
ret = DMA_ERROR;
goto spin_unlock;
}
/*
* Remove size of all microblocks already transferred and the current
* one. Then add the remaining size to transfer of the current
@ -976,7 +1012,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
break;
}
residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
residue += cur_ubc << dwidth;
dma_set_residue(txstate, residue);

View File

@ -1437,7 +1437,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
u64 chan_off;
u64 dram_base = get_dram_base(pvt, range);
u64 hole_off = f10_dhar_offset(pvt);
u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
if (hi_rng) {
/*

View File

@ -1043,8 +1043,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
n_tads, gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
(u32)TAD_SOCK(reg),
(u32)TAD_CH(reg),
(u32)(1 << TAD_SOCK(reg)),
(u32)TAD_CH(reg) + 1,
(u32)TAD_TGT0(reg),
(u32)TAD_TGT1(reg),
(u32)TAD_TGT2(reg),
@ -1316,7 +1316,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
ch_way = TAD_CH(reg) + 1;
sck_way = TAD_SOCK(reg) + 1;
sck_way = 1 << TAD_SOCK(reg);
if (ch_way == 3)
idx = addr >> 6;
@ -1373,7 +1373,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
n_tads,
addr,
limit,
(u32)TAD_SOCK(reg),
sck_way,
ch_way,
offset,
idx,
@ -1388,18 +1388,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
offset, addr);
return -EINVAL;
}
addr -= offset;
/* Store the low bits [0:6] of the addr */
ch_addr = addr & 0x7f;
/* Remove socket wayness and remove 6 bits */
addr >>= 6;
addr = div_u64(addr, sck_xch);
#if 0
/* Divide by channel way */
addr = addr / ch_way;
#endif
/* Recover the last 6 bits */
ch_addr |= addr << 6;
ch_addr = addr - offset;
ch_addr >>= (6 + shiftup);
ch_addr /= ch_way * sck_way;
ch_addr <<= (6 + shiftup);
ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
/*
* Step 3) Decode rank

View File

@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
}
if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
efivar_validate(name, data, size) == false) {
efivar_validate(vendor, name, data, size) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL;
}
@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
}
if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
efivar_validate(name, data, size) == false) {
efivar_validate(new_var->VendorGuid, name, data,
size) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL;
}
@ -535,50 +536,43 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
* efivar_create_sysfs_entry - create a new entry in sysfs
* @new_var: efivar entry to create
*
* Returns 1 on failure, 0 on success
* Returns 0 on success, negative error code on failure
*/
static int
efivar_create_sysfs_entry(struct efivar_entry *new_var)
{
int i, short_name_size;
int short_name_size;
char *short_name;
unsigned long variable_name_size;
efi_char16_t *variable_name;
variable_name = new_var->var.VariableName;
variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
unsigned long utf8_name_size;
efi_char16_t *variable_name = new_var->var.VariableName;
int ret;
/*
* Length of the variable bytes in ASCII, plus the '-' separator,
* Length of the variable bytes in UTF8, plus the '-' separator,
* plus the GUID, plus trailing NUL
*/
short_name_size = variable_name_size / sizeof(efi_char16_t)
+ 1 + EFI_VARIABLE_GUID_LEN + 1;
short_name = kzalloc(short_name_size, GFP_KERNEL);
utf8_name_size = ucs2_utf8size(variable_name);
short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
short_name = kmalloc(short_name_size, GFP_KERNEL);
if (!short_name)
return 1;
return -ENOMEM;
ucs2_as_utf8(short_name, variable_name, short_name_size);
/* Convert Unicode to normal chars (assume top bits are 0),
ala UTF-8 */
for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
short_name[i] = variable_name[i] & 0xFF;
}
/* This is ugly, but necessary to separate one vendor's
private variables from another's. */
*(short_name + strlen(short_name)) = '-';
short_name[utf8_name_size] = '-';
efi_guid_to_str(&new_var->var.VendorGuid,
short_name + strlen(short_name));
short_name + utf8_name_size + 1);
new_var->kobj.kset = efivars_kset;
i = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
NULL, "%s", short_name);
kfree(short_name);
if (i)
return 1;
if (ret)
return ret;
kobject_uevent(&new_var->kobj, KOBJ_ADD);
efivar_entry_add(new_var, &efivar_sysfs_list);

View File

@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
}
struct variable_validate {
efi_guid_t vendor;
char *name;
bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
unsigned long len);
};
/*
* This is the list of variables we need to validate, as well as the
* whitelist for what we think is safe not to default to immutable.
*
* If it has a validate() method that's not NULL, it'll go into the
* validation routine. If not, it is assumed valid, but still used for
* whitelisting.
*
* Note that it's sorted by {vendor,name}, but globbed names must come after
* any other name with the same prefix.
*/
static const struct variable_validate variable_validate[] = {
{ "BootNext", validate_uint16 },
{ "BootOrder", validate_boot_order },
{ "DriverOrder", validate_boot_order },
{ "Boot*", validate_load_option },
{ "Driver*", validate_load_option },
{ "ConIn", validate_device_path },
{ "ConInDev", validate_device_path },
{ "ConOut", validate_device_path },
{ "ConOutDev", validate_device_path },
{ "ErrOut", validate_device_path },
{ "ErrOutDev", validate_device_path },
{ "Timeout", validate_uint16 },
{ "Lang", validate_ascii_string },
{ "PlatformLang", validate_ascii_string },
{ "", NULL },
{ EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
{ EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
{ EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
{ EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
{ EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
{ EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
{ EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
{ EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
{ EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
{ EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
{ EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
{ EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
{ EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
{ EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
{ EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
{ LINUX_EFI_CRASH_GUID, "*", NULL },
{ NULL_GUID, "", NULL },
};
static bool
variable_matches(const char *var_name, size_t len, const char *match_name,
int *match)
{
for (*match = 0; ; (*match)++) {
char c = match_name[*match];
char u = var_name[*match];
/* Wildcard in the matching name means we've matched */
if (c == '*')
return true;
/* Case sensitive match */
if (!c && *match == len)
return true;
if (c != u)
return false;
if (!c)
return true;
}
return true;
}
bool
efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len)
efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
unsigned long data_size)
{
int i;
u16 *unicode_name = var_name;
unsigned long utf8_size;
u8 *utf8_name;
for (i = 0; variable_validate[i].validate != NULL; i++) {
utf8_size = ucs2_utf8size(var_name);
utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
if (!utf8_name)
return false;
ucs2_as_utf8(utf8_name, var_name, utf8_size);
utf8_name[utf8_size] = '\0';
for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
const char *name = variable_validate[i].name;
int match;
int match = 0;
for (match = 0; ; match++) {
char c = name[match];
u16 u = unicode_name[match];
if (efi_guidcmp(vendor, variable_validate[i].vendor))
continue;
/* All special variables are plain ascii */
if (u > 127)
return true;
/* Wildcard in the matching name means we've matched */
if (c == '*')
return variable_validate[i].validate(var_name,
match, data, len);
/* Case sensitive match */
if (c != u)
if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
if (variable_validate[i].validate == NULL)
break;
/* Reached the end of the string while matching */
if (!c)
return variable_validate[i].validate(var_name,
match, data, len);
kfree(utf8_name);
return variable_validate[i].validate(var_name, match,
data, data_size);
}
}
kfree(utf8_name);
return true;
}
EXPORT_SYMBOL_GPL(efivar_validate);
bool
efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
size_t len)
{
int i;
bool found = false;
int match = 0;
/*
* Check if our variable is in the validated variables list
*/
for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
if (efi_guidcmp(variable_validate[i].vendor, vendor))
continue;
if (variable_matches(var_name, len,
variable_validate[i].name, &match)) {
found = true;
break;
}
}
/*
* If it's in our list, it is removable.
*/
return found;
}
EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
static efi_status_t
check_var_size(u32 attributes, unsigned long size)
{
@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
*set = false;
if (efivar_validate(name, data, *size) == false)
if (efivar_validate(*vendor, name, data, *size) == false)
return -EINVAL;
/*

View File

@ -798,12 +798,33 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
return mstb;
}
static void drm_dp_free_mst_port(struct kref *kref);
static void drm_dp_free_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
if (mstb->port_parent) {
if (list_empty(&mstb->port_parent->next))
kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
}
kfree(mstb);
}
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
struct drm_dp_mst_port *port, *tmp;
bool wake_tx = false;
/*
* init kref again to be used by ports to remove mst branch when it is
* not needed anymore
*/
kref_init(kref);
if (mstb->port_parent && list_empty(&mstb->port_parent->next))
kref_get(&mstb->port_parent->kref);
/*
* destroy all ports - don't need lock
* as there are no more references to the mst branch
@ -830,7 +851,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
if (wake_tx)
wake_up(&mstb->mgr->tx_waitq);
kfree(mstb);
kref_put(kref, drm_dp_free_mst_branch_device);
}
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
@ -878,6 +900,7 @@ static void drm_dp_destroy_port(struct kref *kref)
* from an EDID retrieval */
mutex_lock(&mgr->destroy_connector_lock);
kref_get(&port->parent->kref);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
@ -1013,18 +1036,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
return send_link;
}
static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port)
static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
{
int ret;
if (port->dpcd_rev >= 0x12) {
port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
if (!port->guid_valid) {
ret = drm_dp_send_dpcd_write(mstb->mgr,
port,
DP_GUID,
16, port->guid);
port->guid_valid = true;
memcpy(mstb->guid, guid, 16);
if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
if (mstb->port_parent) {
ret = drm_dp_send_dpcd_write(
mstb->mgr,
mstb->port_parent,
DP_GUID,
16,
mstb->guid);
} else {
ret = drm_dp_dpcd_write(
mstb->mgr->aux,
DP_GUID,
mstb->guid,
16);
}
}
}
@ -1081,7 +1113,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
port->dpcd_rev = port_msg->dpcd_revision;
port->num_sdp_streams = port_msg->num_sdp_streams;
port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
memcpy(port->guid, port_msg->peer_guid, 16);
/* manage mstb port lists with mgr lock - take a reference
for this list */
@ -1094,11 +1125,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
if (old_ddps != port->ddps) {
if (port->ddps) {
drm_dp_check_port_guid(mstb, port);
if (!port->input)
drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
} else {
port->guid_valid = false;
port->available_pbn = 0;
}
}
@ -1156,10 +1185,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
if (old_ddps != port->ddps) {
if (port->ddps) {
drm_dp_check_port_guid(mstb, port);
dowork = true;
} else {
port->guid_valid = false;
port->available_pbn = 0;
}
}
@ -1216,13 +1243,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
if (memcmp(mstb->guid, guid, 16) == 0)
return mstb;
list_for_each_entry(port, &mstb->ports, next) {
if (!port->mstb)
continue;
if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
return port->mstb;
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
if (found_mstb)
@ -1241,10 +1269,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
/* find the port by iterating down */
mutex_lock(&mgr->lock);
if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0)
mstb = mgr->mst_primary;
else
mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
if (mstb)
kref_get(&mstb->kref);
@ -1549,6 +1574,9 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
}
drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
}
@ -1595,6 +1623,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
{
if (!mstb->port_parent)
return NULL;
if (mstb->port_parent->mstb != mstb)
return mstb->port_parent;
return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
}
static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
int *port_num)
{
struct drm_dp_mst_branch *rmstb = NULL;
struct drm_dp_mst_port *found_port;
mutex_lock(&mgr->lock);
if (mgr->mst_primary) {
found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
if (found_port) {
rmstb = found_port->parent;
kref_get(&rmstb->kref);
*port_num = found_port->port_num;
}
}
mutex_unlock(&mgr->lock);
return rmstb;
}
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int id,
@ -1602,11 +1661,16 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
int len, ret;
int len, ret, port_num;
port_num = port->port_num;
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
if (!mstb)
return -EINVAL;
if (!mstb) {
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
if (!mstb)
return -EINVAL;
}
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
@ -1615,7 +1679,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
}
txmsg->dst = mstb;
len = build_allocate_payload(txmsg, port->port_num,
len = build_allocate_payload(txmsg, port_num,
id,
pbn);
@ -1969,6 +2033,12 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb;
kref_get(&mgr->mst_primary->kref);
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
if (ret < 0) {
goto out_unlock;
}
{
struct drm_dp_payload reset_pay;
reset_pay.start_slot = 0;
@ -1976,26 +2046,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
}
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
if (ret < 0) {
goto out_unlock;
}
/* sort out guid */
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
if (ret != 16) {
DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
goto out_unlock;
}
mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
if (!mgr->guid_valid) {
ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
mgr->guid_valid = true;
}
queue_work(system_long_wq, &mgr->work);
ret = 0;
@ -2217,6 +2267,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
}
drm_dp_update_port(mstb, &msg.u.conn_stat);
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
(*mgr->cbs->hotplug)(mgr);
@ -2749,6 +2800,13 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
static void drm_dp_free_mst_port(struct kref *kref)
{
struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
kfree(port);
}
static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@ -2769,13 +2827,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock);
kref_init(&port->kref);
INIT_LIST_HEAD(&port->next);
mgr->cbs->destroy_connector(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
if (!port->input && port->vcpi.vcpi > 0)
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
kfree(port);
if (!port->input && port->vcpi.vcpi > 0) {
if (mgr->mst_state) {
drm_dp_mst_reset_vcpi_slots(mgr, port);
drm_dp_update_payload_part1(mgr);
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
}
}
kref_put(&port->kref, drm_dp_free_mst_port);
send_hotplug = true;
}
if (send_hotplug)

View File

@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
return ret;
}
/* We have the initial and handle reference but need only one now */
drm_gem_object_unreference(&r->gem);
drm_gem_object_unreference_unlocked(&r->gem);
*handlep = handle;
return 0;
}

View File

@ -254,7 +254,7 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3
static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
static void dp_get_adjust_train(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count,
u8 train_set[4])
{
@ -301,77 +301,43 @@ static int convert_bpc_to_bpp(int bpc)
return bpc * 3;
}
/* get the max pix clock supported by the link rate and lane num */
static int dp_get_max_dp_pix_clock(int link_rate,
int lane_num,
int bpp)
{
return (link_rate * lane_num * 8) / bpp;
}
/***** radeon specific DP functions *****/
int radeon_dp_get_max_link_rate(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE])
{
int max_link_rate;
if (radeon_connector_is_dp12_capable(connector))
max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
else
max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
return max_link_rate;
}
/* First get the min lane# when low rate is used according to pixel clock
* (prefer low rate), second check max lane# supported by DP panel,
* if the max lane# < low rate lane# then use max lane# instead.
*/
static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
int radeon_dp_get_dp_link_config(struct drm_connector *connector,
const u8 dpcd[DP_DPCD_SIZE],
unsigned pix_clock,
unsigned *dp_lanes, unsigned *dp_rate)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
int max_lane_num = drm_dp_max_lane_count(dpcd);
int lane_num;
int max_dp_pix_clock;
for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
if (pix_clock <= max_dp_pix_clock)
break;
}
return lane_num;
}
static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int lane_num, max_pix_clock;
static const unsigned link_rates[3] = { 162000, 270000, 540000 };
unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
unsigned lane_num, i, max_pix_clock;
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_NUTMEG)
return 270000;
lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 162000;
max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 270000;
if (radeon_connector_is_dp12_capable(connector)) {
max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 540000;
ENCODER_OBJECT_ID_NUTMEG) {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * 270000 * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = 270000;
return 0;
}
}
} else {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = link_rates[i];
return 0;
}
}
}
}
return radeon_dp_get_max_link_rate(connector, dpcd);
return -EINVAL;
}
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@ -490,6 +456,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
int ret;
if (!radeon_connector->con_priv)
return;
@ -497,10 +464,14 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
dig_connector->dp_clock =
radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
dig_connector->dp_lane_count =
radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
mode->clock,
&dig_connector->dp_lane_count,
&dig_connector->dp_clock);
if (ret) {
dig_connector->dp_clock = 0;
dig_connector->dp_lane_count = 0;
}
}
}
@ -509,7 +480,8 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
int dp_clock;
unsigned dp_clock, dp_lanes;
int ret;
if ((mode->clock > 340000) &&
(!radeon_connector_is_dp12_capable(connector)))
@ -519,8 +491,12 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
dig_connector = radeon_connector->con_priv;
dp_clock =
radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
mode->clock,
&dp_lanes,
&dp_clock);
if (ret)
return MODE_CLOCK_HIGH;
if ((dp_clock == 540000) &&
(!radeon_connector_is_dp12_capable(connector)))

View File

@ -892,8 +892,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
else
args.v1.ucLaneNum = 4;
if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@ -910,6 +908,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
break;
case 2:
case 3:

View File

@ -62,6 +62,10 @@ bool radeon_has_atpx(void) {
return radeon_atpx_priv.atpx_detected;
}
bool radeon_has_atpx_dgpu_power_cntl(void) {
return radeon_atpx_priv.atpx.functions.power_cntl;
}
/**
* radeon_atpx_call - call an ATPX method
*
@ -141,10 +145,6 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
*/
static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
/* make sure required functions are enabled */
/* dGPU power control is required */
atpx->functions.power_cntl = true;
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;

View File

@ -103,6 +103,12 @@ static const char radeon_family_name[][16] = {
"LAST",
};
#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_has_atpx_dgpu_power_cntl(void);
#else
static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
#endif
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
@ -1427,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
if (rdev->flags & RADEON_IS_PX)
if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
runtime = true;
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
if (runtime)
@ -1734,7 +1740,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
drm_kms_helper_poll_enable(dev);
drm_helper_hpd_irq_event(dev);
/* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)

View File

@ -520,11 +520,17 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
{
struct radeon_connector_atom_dig *dig_connector;
int ret;
dig_connector = mst_enc->connector->con_priv;
dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base,
dig_connector->dpcd);
ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
dig_connector->dpcd, adjusted_mode->clock,
&dig_connector->dp_lane_count,
&dig_connector->dp_clock);
if (ret) {
dig_connector->dp_lane_count = 0;
dig_connector->dp_clock = 0;
}
DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
dig_connector->dp_lane_count, dig_connector->dp_clock);
}

View File

@ -333,7 +333,8 @@ out_unref:
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
if (rdev->mode_info.rfbdev)
drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
}
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
@ -373,6 +374,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
int bpp_sel = 32;
int ret;
/* don't enable fbdev if no connectors */
if (list_empty(&rdev->ddev->mode_config.connector_list))
return 0;
/* select 8 bpp console on RN50 or 16MB cards */
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
bpp_sel = 8;
@ -425,11 +430,15 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
{
fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
if (rdev->mode_info.rfbdev)
fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
}
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
if (!rdev->mode_info.rfbdev)
return false;
if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
return true;
return false;
@ -437,10 +446,12 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
{
drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
if (rdev->mode_info.rfbdev)
drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
}
void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
{
drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
if (rdev->mode_info.rfbdev)
drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
}

View File

@ -752,8 +752,10 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector);
int radeon_dp_get_max_link_rate(struct drm_connector *connector,
u8 *dpcd);
extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
const u8 *dpcd,
unsigned pix_clock,
unsigned *dp_lanes, unsigned *dp_rate);
extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state);
extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);

View File

@ -1079,6 +1079,8 @@ force:
/* update display watermarks based on new power state */
radeon_bandwidth_update(rdev);
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
@ -1095,9 +1097,6 @@ force:
radeon_dpm_post_set_power_state(rdev);
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
rdev->pm.dpm.single_display = single_display;

View File

@ -2584,9 +2584,10 @@ int hid_add_device(struct hid_device *hdev)
/*
* Scan generic devices for group information
*/
if (hid_ignore_special_drivers ||
(!hdev->group &&
!hid_match_id(hdev, hid_have_special_driver))) {
if (hid_ignore_special_drivers) {
hdev->group = HID_GROUP_GENERIC;
} else if (!hdev->group &&
!hid_match_id(hdev, hid_have_special_driver)) {
ret = hid_scan_report(hdev);
if (ret)
hid_warn(hdev, "bad device descriptor (%d)\n", ret);

View File

@ -322,8 +322,19 @@ static void mt_feature_mapping(struct hid_device *hdev,
break;
}
td->inputmode = field->report->id;
td->inputmode_index = usage->usage_index;
if (td->inputmode < 0) {
td->inputmode = field->report->id;
td->inputmode_index = usage->usage_index;
} else {
/*
* Some elan panels wrongly declare 2 input mode
* features, and silently ignore when we set the
* value in the second field. Skip the second feature
* and hope for the best.
*/
dev_info(&hdev->dev,
"Ignoring the extra HID_DG_INPUTMODE\n");
}
break;
case HID_DG_CONTACTMAX:

View File

@ -280,18 +280,22 @@ static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
/* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
u16 size = 2 /* size */ +
(reportID ? 1 : 0) /* reportID */ +
data_len /* buf */;
int args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
2 /* dataRegister */ +
size /* args */;
u16 size;
int args_len;
int index = 0;
i2c_hid_dbg(ihid, "%s\n", __func__);
if (data_len > ihid->bufsize)
return -EINVAL;
size = 2 /* size */ +
(reportID ? 1 : 0) /* reportID */ +
data_len /* buf */;
args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
2 /* dataRegister */ +
size /* args */;
if (!use_data && maxOutputLength == 0)
return -ENOSYS;

View File

@ -477,8 +477,6 @@ static void hid_ctrl(struct urb *urb)
struct usbhid_device *usbhid = hid->driver_data;
int unplug = 0, status = urb->status;
spin_lock(&usbhid->lock);
switch (status) {
case 0: /* success */
if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN)
@ -498,6 +496,8 @@ static void hid_ctrl(struct urb *urb)
hid_warn(urb->dev, "ctrl urb status %d received\n", status);
}
spin_lock(&usbhid->lock);
if (unplug) {
usbhid->ctrltail = usbhid->ctrlhead;
} else {

View File

@ -300,6 +300,7 @@ static int mcp4725_probe(struct i2c_client *client,
data->client = client;
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
indio_dev->info = &mcp4725_info;
indio_dev->channels = &mcp4725_channel;
indio_dev->num_channels = 1;

View File

@ -43,7 +43,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
return -ENOMEM;
rx = adis->buffer;
tx = rx + indio_dev->scan_bytes;
tx = rx + scan_count;
spi_message_init(&adis->msg);

View File

@ -117,7 +117,7 @@ static int mpl115_read_raw(struct iio_dev *indio_dev,
*val = ret >> 6;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
*val = 605;
*val = -605;
*val2 = 750000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:

View File

@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
error = l2t_send(tdev, skb, l2e);
if (error < 0)
kfree_skb(skb);
return error;
return error < 0 ? error : 0;
}
int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
error = cxgb3_ofld_send(tdev, skb);
if (error < 0)
kfree_skb(skb);
return error;
return error < 0 ? error : 0;
}
static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)

View File

@ -865,7 +865,7 @@ isert_put_conn(struct isert_conn *isert_conn)
* @isert_conn: isert connection struct
*
* Notes:
* In case the connection state is FULL_FEATURE, move state
* In case the connection state is BOUND, move state
* to TEMINATING and start teardown sequence (rdma_disconnect).
* In case the connection state is UP, complete flush as well.
*
@ -881,6 +881,7 @@ isert_conn_terminate(struct isert_conn *isert_conn)
case ISER_CONN_TERMINATING:
break;
case ISER_CONN_UP:
case ISER_CONN_BOUND:
case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
isert_info("Terminating conn %p state %d\n",
isert_conn, isert_conn->state);
@ -927,14 +928,9 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
enum rdma_cm_event_type event)
{
struct isert_np *isert_np = cma_id->context;
struct isert_conn *isert_conn;
struct isert_conn *isert_conn = cma_id->qp->qp_context;
bool terminating = false;
if (isert_np->np_cm_id == cma_id)
return isert_np_cma_handler(cma_id->context, event);
isert_conn = cma_id->qp->qp_context;
mutex_lock(&isert_conn->mutex);
terminating = (isert_conn->state == ISER_CONN_TERMINATING);
isert_conn_terminate(isert_conn);
@ -972,11 +968,15 @@ isert_connect_error(struct rdma_cm_id *cma_id)
static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
struct isert_np *isert_np = cma_id->context;
int ret = 0;
isert_info("event %d status %d id %p np %p\n", event->event,
event->status, cma_id, cma_id->context);
if (isert_np->np_cm_id == cma_id)
return isert_np_cma_handler(cma_id->context, event->event);
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event);
@ -2059,7 +2059,8 @@ is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
void *start = isert_conn->rx_descs;
int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
if (wr_id >= start && wr_id < start + len)
if ((wr_id >= start && wr_id < start + len) ||
(wr_id == isert_conn->login_req_buf))
return false;
return true;
@ -2085,7 +2086,8 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
isert_completion_put(desc, isert_cmd, ib_dev, true);
} else {
isert_conn->post_recv_buf_count--;
if (!isert_conn->post_recv_buf_count)
if (!isert_conn->post_recv_buf_count &&
isert_conn->state >= ISER_CONN_BOUND)
iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
}
}
@ -3268,6 +3270,7 @@ accept_wait:
conn->context = isert_conn;
isert_conn->conn = conn;
isert_conn->state = ISER_CONN_BOUND;
isert_set_conn_info(np, conn, isert_conn);

View File

@ -50,6 +50,7 @@ enum iser_ib_op_code {
enum iser_conn_state {
ISER_CONN_INIT,
ISER_CONN_UP,
ISER_CONN_BOUND,
ISER_CONN_FULL_FEATURE,
ISER_CONN_TERMINATING,
ISER_CONN_DOWN,

View File

@ -1742,47 +1742,6 @@ send_sense:
return -1;
}
/**
* srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
* @ch: RDMA channel of the task management request.
* @fn: Task management function to perform.
* @req_tag: Tag of the SRP task management request.
* @mgmt_ioctx: I/O context of the task management request.
*
* Returns zero if the target core will process the task management
* request asynchronously.
*
* Note: It is assumed that the initiator serializes tag-based task management
* requests.
*/
static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
{
struct srpt_device *sdev;
struct srpt_rdma_ch *ch;
struct srpt_send_ioctx *target;
int ret, i;
ret = -EINVAL;
ch = ioctx->ch;
BUG_ON(!ch);
BUG_ON(!ch->sport);
sdev = ch->sport->sdev;
BUG_ON(!sdev);
spin_lock_irq(&sdev->spinlock);
for (i = 0; i < ch->rq_size; ++i) {
target = ch->ioctx_ring[i];
if (target->cmd.se_lun == ioctx->cmd.se_lun &&
target->tag == tag &&
srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
ret = 0;
/* now let the target core abort &target->cmd; */
break;
}
}
spin_unlock_irq(&sdev->spinlock);
return ret;
}
static int srp_tmr_to_tcm(int fn)
{
switch (fn) {
@ -1817,7 +1776,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
struct se_cmd *cmd;
struct se_session *sess = ch->sess;
uint64_t unpacked_lun;
uint32_t tag = 0;
int tcm_tmr;
int rc;
@ -1833,25 +1791,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
send_ioctx->tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
if (tcm_tmr < 0) {
send_ioctx->cmd.se_tmr_req->response =
TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
goto fail;
}
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
sizeof(srp_tsk->lun));
if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
if (rc < 0) {
send_ioctx->cmd.se_tmr_req->response =
TMR_TASK_DOES_NOT_EXIST;
goto fail;
}
tag = srp_tsk->task_tag;
}
rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
srp_tsk, tcm_tmr, GFP_KERNEL, tag,
srp_tsk, tcm_tmr, GFP_KERNEL, srp_tsk->task_tag,
TARGET_SCF_ACK_KREF);
if (rc != 0) {
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;

View File

@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
ar2->udev = udev;
/* Sanity check, first interface must have an endpoint */
if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
dev_err(&interface->dev,
"%s(): interface 0 must have an endpoint\n", __func__);
r = -ENODEV;
goto fail1;
}
ar2->intf[0] = interface;
ar2->ep[0] = &alt->endpoint[0].desc;
/* Sanity check, the device must have two interfaces */
ar2->intf[1] = usb_ifnum_to_if(udev, 1);
if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
__func__, udev->actconfig->desc.bNumInterfaces);
r = -ENODEV;
goto fail1;
}
r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
if (r)
goto fail1;
/* Sanity check, second interface must have an endpoint */
alt = ar2->intf[1]->cur_altsetting;
if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
dev_err(&interface->dev,
"%s(): interface 1 must have an endpoint\n", __func__);
r = -ENODEV;
goto fail2;
}
ar2->ep[1] = &alt->endpoint[0].desc;
r = ati_remote2_urb_init(ar2);
if (r)
goto fail2;
goto fail3;
ar2->channel_mask = channel_mask;
ar2->mode_mask = mode_mask;
r = ati_remote2_setup(ar2, ar2->channel_mask);
if (r)
goto fail2;
goto fail3;
usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
if (r)
goto fail2;
goto fail3;
r = ati_remote2_input_init(ar2);
if (r)
goto fail3;
goto fail4;
usb_set_intfdata(interface, ar2);
@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
return 0;
fail3:
fail4:
sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
fail2:
fail3:
ati_remote2_urb_cleanup(ar2);
fail2:
usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
fail1:
kfree(ar2);

View File

@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bMasterInterface0);
if (!pcu->ctrl_intf)
return -EINVAL;
alt = pcu->ctrl_intf->cur_altsetting;
pcu->ep_ctrl = &alt->endpoint[0].desc;
@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
pcu->data_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bSlaveInterface0);
if (!pcu->data_intf)
return -EINVAL;
alt = pcu->data_intf->cur_altsetting;
if (alt->desc.bNumEndpoints != 2) {

View File

@ -307,6 +307,9 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
int error = -ENOMEM;
interface = intf->cur_altsetting;
if (interface->desc.bNumEndpoints < 1)
return -EINVAL;
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -EIO;

View File

@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse,
if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
return;
/* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
if (SYN_ID_FULL(priv->identity) == 0x801 &&
/* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */
if ((SYN_ID_FULL(priv->identity) == 0x801 ||
SYN_ID_FULL(priv->identity) == 0x802) &&
!((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
return;

View File

@ -48,6 +48,7 @@
#define INTC_ILR0 0x0100
#define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
#define SPURIOUSIRQ_MASK (0x1ffffff << 7)
#define INTCPS_NR_ILR_REGS 128
#define INTCPS_NR_MIR_REGS 4
@ -331,37 +332,36 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
static asmlinkage void __exception_irq_entry
omap_intc_handle_irq(struct pt_regs *regs)
{
u32 irqnr = 0;
int handled_irq = 0;
int i;
extern unsigned long irq_err_count;
u32 irqnr;
do {
for (i = 0; i < omap_nr_pending; i++) {
irqnr = intc_readl(INTC_PENDING_IRQ0 + (0x20 * i));
if (irqnr)
goto out;
}
out:
if (!irqnr)
break;
irqnr = intc_readl(INTC_SIR);
irqnr &= ACTIVEIRQ_MASK;
if (irqnr) {
handle_domain_irq(domain, irqnr, regs);
handled_irq = 1;
}
} while (irqnr);
irqnr = intc_readl(INTC_SIR);
/*
* If an irq is masked or deasserted while active, we will
* keep ending up here with no irq handled. So remove it from
* the INTC with an ack.
* A spurious IRQ can result if interrupt that triggered the
* sorting is no longer active during the sorting (10 INTC
* functional clock cycles after interrupt assertion). Or a
* change in interrupt mask affected the result during sorting
* time. There is no special handling required except ignoring
* the SIR register value just read and retrying.
* See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
*
* Many a times, a spurious interrupt situation has been fixed
* by adding a flush for the posted write acking the IRQ in
* the device driver. Typically, this is going be the device
* driver whose interrupt was handled just before the spurious
* IRQ occurred. Pay attention to those device drivers if you
* run into hitting the spurious IRQ condition below.
*/
if (!handled_irq)
if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
pr_err_once("%s: spurious irq!\n", __func__);
irq_err_count++;
omap_ack_irq(NULL);
return;
}
irqnr &= ACTIVEIRQ_MASK;
handle_domain_irq(domain, irqnr, regs);
}
void __init omap3_init_irq(void)

View File

@ -1046,8 +1046,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
*/
atomic_set(&dc->count, 1);
if (bch_cached_dev_writeback_start(dc))
/* Block writeback thread, but spawn it */
down_write(&dc->writeback_lock);
if (bch_cached_dev_writeback_start(dc)) {
up_write(&dc->writeback_lock);
return -ENOMEM;
}
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
bch_sectors_dirty_init(dc);
@ -1059,6 +1063,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
bch_cached_dev_run(dc);
bcache_device_link(&dc->disk, c, "bdev");
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
pr_info("Caching %s as %s on set %pU",
bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
dc->disk.c->sb.set_uuid);
@ -1397,6 +1404,9 @@ static void cache_set_flush(struct closure *cl)
struct btree *b;
unsigned i;
if (!c)
closure_return(cl);
bch_cache_accounting_destroy(&c->accounting);
kobject_put(&c->internal);
@ -1862,11 +1872,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
return 0;
}
static void register_cache(struct cache_sb *sb, struct page *sb_page,
static int register_cache(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct cache *ca)
{
char name[BDEVNAME_SIZE];
const char *err = "cannot allocate memory";
const char *err = NULL;
int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
@ -1881,27 +1892,35 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
if (cache_alloc(sb, ca) != 0)
ret = cache_alloc(sb, ca);
if (ret != 0)
goto err;
err = "error creating kobject";
if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
goto err;
if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
err = "error calling kobject_add";
ret = -ENOMEM;
goto out;
}
mutex_lock(&bch_register_lock);
err = register_cache_set(ca);
mutex_unlock(&bch_register_lock);
if (err)
goto err;
if (err) {
ret = -ENODEV;
goto out;
}
pr_info("registered cache device %s", bdevname(bdev, name));
out:
kobject_put(&ca->kobj);
return;
err:
pr_notice("error opening %s: %s", bdevname(bdev, name), err);
goto out;
if (err)
pr_notice("error opening %s: %s", bdevname(bdev, name), err);
return ret;
}
/* Global interfaces/init */
@ -1999,7 +2018,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!ca)
goto err_close;
register_cache(sb, sb_page, bdev, ca);
if (register_cache(sb, sb_page, bdev, ca) != 0)
goto err_close;
}
out:
if (sb_page)

View File

@ -1803,5 +1803,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
{
dm_tm_issue_prefetches(pmd->tm);
down_read(&pmd->root_lock);
if (!pmd->fail_io)
dm_tm_issue_prefetches(pmd->tm);
up_read(&pmd->root_lock);
}

View File

@ -1065,12 +1065,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
if (run_queue) {
if (md->queue->mq_ops)
blk_mq_run_hw_queues(md->queue, true);
else
blk_run_queue_async(md->queue);
}
if (!md->queue->mq_ops && run_queue)
blk_run_queue_async(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
@ -1296,7 +1292,10 @@ static void dm_complete_request(struct request *rq, int error)
struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
blk_complete_request(rq);
if (!rq->q->mq_ops)
blk_complete_request(rq);
else
blk_mq_complete_request(rq);
}
/*

View File

@ -129,7 +129,9 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
}
multipath = conf->multipaths + mp_bh->path;
mp_bh->bio = *bio;
bio_init(&mp_bh->bio);
__bio_clone_fast(&mp_bh->bio, bio);
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;

View File

@ -2080,6 +2080,14 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
unsigned long cpu;
int err = 0;
/*
* Never shrink. And mddev_suspend() could deadlock if this is called
* from raid5d. In that case, scribble_disks and scribble_sectors
* should equal to new_disks and new_sectors
*/
if (conf->scribble_disks >= new_disks &&
conf->scribble_sectors >= new_sectors)
return 0;
mddev_suspend(conf->mddev);
get_online_cpus();
for_each_present_cpu(cpu) {
@ -2101,6 +2109,10 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
}
put_online_cpus();
mddev_resume(conf->mddev);
if (!err) {
conf->scribble_disks = new_disks;
conf->scribble_sectors = new_sectors;
}
return err;
}
@ -4220,7 +4232,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
(1 << STRIPE_SYNCING) |
(1 << STRIPE_REPLACED) |
(1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DELAYED) |
(1 << STRIPE_BIT_DELAY) |
(1 << STRIPE_FULL_WRITE) |
@ -4235,6 +4246,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
(1 << STRIPE_REPLACED)));
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
(1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DEGRADED)),
head_sh->state & (1 << STRIPE_INSYNC));
@ -6366,6 +6378,12 @@ static int raid5_alloc_percpu(struct r5conf *conf)
}
put_online_cpus();
if (!err) {
conf->scribble_disks = max(conf->raid_disks,
conf->previous_raid_disks);
conf->scribble_sectors = max(conf->chunk_sectors,
conf->prev_chunk_sectors);
}
return err;
}
@ -6942,8 +6960,8 @@ static int run(struct mddev *mddev)
}
if (discard_supported &&
mddev->queue->limits.max_discard_sectors >= stripe &&
mddev->queue->limits.discard_granularity >= stripe)
mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
mddev->queue->limits.discard_granularity >= stripe)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
mddev->queue);
else

View File

@ -501,6 +501,8 @@ struct r5conf {
* conversions
*/
} __percpu *percpu;
int scribble_disks;
int scribble_sectors;
#ifdef CONFIG_HOTPLUG_CPU
struct notifier_block cpu_notify;
#endif

View File

@ -1048,12 +1048,23 @@ static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, in
}
}
static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
{
struct adv7511_state *state = get_adv7511_state(sd);
struct adv7511_edid_detect ed;
/* We failed to read the EDID, so send an event for this. */
ed.present = false;
ed.segment = adv7511_rd(sd, 0xc4);
v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
}
static void adv7511_edid_handler(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
struct v4l2_subdev *sd = &state->sd;
struct adv7511_edid_detect ed;
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
@ -1078,9 +1089,7 @@ static void adv7511_edid_handler(struct work_struct *work)
}
/* We failed to read the EDID, so send an event for this. */
ed.present = false;
ed.segment = adv7511_rd(sd, 0xc4);
v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
adv7511_notify_no_edid(sd);
v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
}
@ -1151,7 +1160,6 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
/* update read only ctrls */
v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
@ -1181,6 +1189,7 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
}
adv7511_s_power(sd, false);
memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
adv7511_notify_no_edid(sd);
}
}
@ -1257,6 +1266,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
}
/* one more segment read ok */
state->edid.segments = segment + 1;
v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
/* Request next EDID segment */
v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
@ -1276,7 +1286,6 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
ed.present = true;
ed.segment = 0;
state->edid_detect_counter++;
v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
return ed.present;
}

View File

@ -2334,6 +2334,19 @@ static int bttv_g_fmt_vid_overlay(struct file *file, void *priv,
return 0;
}
static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
unsigned int *width_mask,
unsigned int *width_bias)
{
if (fmt->flags & FORMAT_FLAGS_PLANAR) {
*width_mask = ~15; /* width must be a multiple of 16 pixels */
*width_bias = 8; /* nearest */
} else {
*width_mask = ~3; /* width must be a multiple of 4 pixels */
*width_bias = 2; /* nearest */
}
}
static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
@ -2343,6 +2356,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
enum v4l2_field field;
__s32 width, height;
__s32 height2;
unsigned int width_mask, width_bias;
int rc;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@ -2375,9 +2389,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
width = f->fmt.pix.width;
height = f->fmt.pix.height;
bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
rc = limit_scaled_size_lock(fh, &width, &height, field,
/* width_mask: 4 pixels */ ~3,
/* width_bias: nearest */ 2,
width_mask, width_bias,
/* adjust_size */ 1,
/* adjust_crop */ 0);
if (0 != rc)
@ -2410,6 +2424,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
__s32 width, height;
unsigned int width_mask, width_bias;
enum v4l2_field field;
retval = bttv_switch_type(fh, f->type);
@ -2424,9 +2439,10 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
height = f->fmt.pix.height;
field = f->fmt.pix.field;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
/* width_mask: 4 pixels */ ~3,
/* width_bias: nearest */ 2,
width_mask, width_bias,
/* adjust_size */ 1,
/* adjust_crop */ 1);
if (0 != retval)
@ -2434,8 +2450,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.field = field;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
/* update our state informations */
fh->fmt = fmt;
fh->cap.field = f->fmt.pix.field;

View File

@ -1211,10 +1211,13 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.height = dev->height;
f->fmt.pix.field = dev->field;
f->fmt.pix.pixelformat = dev->fmt->fourcc;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * dev->fmt->depth) >> 3;
if (dev->fmt->planar)
f->fmt.pix.bytesperline = f->fmt.pix.width;
else
f->fmt.pix.bytesperline =
(f->fmt.pix.width * dev->fmt->depth) / 8;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
(f->fmt.pix.height * f->fmt.pix.width * dev->fmt->depth) / 8;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
@ -1290,10 +1293,13 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
if (f->fmt.pix.height > maxh)
f->fmt.pix.height = maxh;
f->fmt.pix.width &= ~0x03;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
if (fmt->planar)
f->fmt.pix.bytesperline = f->fmt.pix.width;
else
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) / 8;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
(f->fmt.pix.height * f->fmt.pix.width * fmt->depth) / 8;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;

Some files were not shown because too many files have changed in this diff Show More