1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Three cases of simple overlapping changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
zero-colors
David S. Miller 2017-09-01 17:42:05 -07:00
commit 6026e043d0
359 changed files with 2442 additions and 1663 deletions

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 13
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Fearless Coyote
# *DOCUMENTATION*
@ -396,7 +396,7 @@ LINUXINCLUDE := \
KBUILD_CPPFLAGS := -D__KERNEL__
KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common \
-fno-strict-aliasing -fno-common -fshort-wchar \
-Werror-implicit-function-declaration \
-Wno-format-security \
-std=gnu89 $(call cc-option,-fno-PIE)
@ -442,7 +442,7 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
# ===========================================================================
# Rules shared between *config targets and build targets
# Basic helpers built in scripts/
# Basic helpers built in scripts/basic/
PHONY += scripts_basic
scripts_basic:
$(Q)$(MAKE) $(build)=scripts/basic
@ -505,7 +505,7 @@ ifeq ($(KBUILD_EXTMOD),)
endif
endif
endif
# install and module_install need also be processed one by one
# install and modules_install need also be processed one by one
ifneq ($(filter install,$(MAKECMDGOALS)),)
ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
mixed-targets := 1
@ -964,7 +964,7 @@ export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y2) $(drivers-y) $(net-y) $(virt-
export KBUILD_VMLINUX_LIBS := $(libs-y1)
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
export LDFLAGS_vmlinux
# used by scripts/pacmage/Makefile
# used by scripts/package/Makefile
export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools)
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_LIBS)
@ -992,8 +992,8 @@ include/generated/autoksyms.h: FORCE
ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
# Final link of vmlinux with optional arch pass after final link
cmd_link-vmlinux = \
$(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \
cmd_link-vmlinux = \
$(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \
$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE
@ -1184,6 +1184,7 @@ PHONY += kselftest
kselftest:
$(Q)$(MAKE) -C tools/testing/selftests run_tests
PHONY += kselftest-clean
kselftest-clean:
$(Q)$(MAKE) -C tools/testing/selftests clean

View File

@ -299,6 +299,7 @@ static inline void __iomem * ioremap_nocache(unsigned long offset,
return ioremap(offset, size);
}
#define ioremap_wc ioremap_nocache
#define ioremap_uc ioremap_nocache
static inline void iounmap(volatile void __iomem *addr)

View File

@ -1,6 +1,6 @@
#ifndef _ALPHA_TYPES_H
#define _ALPHA_TYPES_H
#include <asm-generic/int-ll64.h>
#include <uapi/asm/types.h>
#endif /* _ALPHA_TYPES_H */

View File

@ -3,7 +3,7 @@
#include <uapi/asm/unistd.h>
#define NR_SYSCALLS 514
#define NR_SYSCALLS 523
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64

View File

@ -9,8 +9,18 @@
* need to be careful to avoid a name clashes.
*/
#ifndef __KERNEL__
/*
* This is here because we used to use l64 for alpha
* and we don't want to impact user mode with our change to ll64
* in the kernel.
*
* However, some user programs are fine with this. They can
* flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
*/
#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__)
#include <asm-generic/int-l64.h>
#else
#include <asm-generic/int-ll64.h>
#endif
#endif /* _UAPI_ALPHA_TYPES_H */

View File

@ -475,5 +475,19 @@
#define __NR_getrandom 511
#define __NR_memfd_create 512
#define __NR_execveat 513
#define __NR_seccomp 514
#define __NR_bpf 515
#define __NR_userfaultfd 516
#define __NR_membarrier 517
#define __NR_mlock2 518
#define __NR_copy_file_range 519
#define __NR_preadv2 520
#define __NR_pwritev2 521
#define __NR_statx 522
/* Alpha doesn't have protection keys. */
#define __IGNORE_pkey_mprotect
#define __IGNORE_pkey_alloc
#define __IGNORE_pkey_free
#endif /* _UAPI_ALPHA_UNISTD_H */

View File

@ -351,7 +351,7 @@ marvel_init_io7(struct io7 *io7)
}
}
void
void __init
marvel_io7_present(gct6_node *node)
{
int pe;
@ -369,6 +369,7 @@ marvel_io7_present(gct6_node *node)
static void __init
marvel_find_console_vga_hose(void)
{
#ifdef CONFIG_VGA_HOSE
u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
if (pu64[7] == 3) { /* TERM_TYPE == graphics */
@ -402,9 +403,10 @@ marvel_find_console_vga_hose(void)
pci_vga_hose = hose;
}
}
#endif
}
gct6_search_struct gct_wanted_node_list[] = {
gct6_search_struct gct_wanted_node_list[] __initdata = {
{ GCT_TYPE_HOSE, GCT_SUBTYPE_IO_PORT_MODULE, marvel_io7_present },
{ 0, 0, NULL }
};

View File

@ -461,6 +461,7 @@ titan_ioremap(unsigned long addr, unsigned long size)
unsigned long *ptes;
unsigned long pfn;
#ifdef CONFIG_VGA_HOSE
/*
* Adjust the address and hose, if necessary.
*/
@ -468,6 +469,7 @@ titan_ioremap(unsigned long addr, unsigned long size)
h = pci_vga_hose->index;
addr += pci_vga_hose->mem_space->start;
}
#endif
/*
* Find the hose.

View File

@ -181,6 +181,9 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
switch (r_type) {
case R_ALPHA_NONE:
break;
case R_ALPHA_REFLONG:
*(u32 *)location = value;
break;
case R_ALPHA_REFQUAD:
/* BUG() can produce misaligned relocations. */
((u32 *)location)[0] = value;

View File

@ -115,7 +115,7 @@ wait_boot_cpu_to_stop(int cpuid)
/*
* Where secondaries begin a life of C.
*/
void
void __init
smp_callin(void)
{
int cpuid = hard_smp_processor_id();

View File

@ -532,6 +532,15 @@ sys_call_table:
.quad sys_getrandom
.quad sys_memfd_create
.quad sys_execveat
.quad sys_seccomp
.quad sys_bpf /* 515 */
.quad sys_userfaultfd
.quad sys_membarrier
.quad sys_mlock2
.quad sys_copy_file_range
.quad sys_preadv2 /* 520 */
.quad sys_pwritev2
.quad sys_statx
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object

View File

@ -20,12 +20,8 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
checksum.o \
csum_partial_copy.o \
$(ev67-y)strlen.o \
$(ev67-y)strcat.o \
strcpy.o \
$(ev67-y)strncat.o \
strncpy.o \
$(ev6-y)stxcpy.o \
$(ev6-y)stxncpy.o \
stycpy.o \
styncpy.o \
$(ev67-y)strchr.o \
$(ev67-y)strrchr.o \
$(ev6-y)memchr.o \
@ -49,3 +45,17 @@ AFLAGS___remlu.o = -DREM -DINTSIZE
$(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \
$(src)/$(ev6-y)divide.S FORCE
$(call if_changed_rule,as_o_S)
# There are direct branches between {str*cpy,str*cat} and stx*cpy.
# Ensure the branches are within range by merging these objects.
LDFLAGS_stycpy.o := -r
LDFLAGS_styncpy.o := -r
$(obj)/stycpy.o: $(obj)/strcpy.o $(obj)/$(ev67-y)strcat.o \
$(obj)/$(ev6-y)stxcpy.o FORCE
$(call if_changed,ld)
$(obj)/styncpy.o: $(obj)/strncpy.o $(obj)/$(ev67-y)strncat.o \
$(obj)/$(ev6-y)stxncpy.o FORCE
$(call if_changed,ld)

View File

@ -34,7 +34,7 @@
.ent __copy_user
__copy_user:
.prologue 0
and $18,$18,$0
mov $18,$0
and $16,7,$3
beq $0,$35
beq $3,$36

View File

@ -45,9 +45,10 @@
# Pipeline info: Slotting & Comments
__copy_user:
.prologue 0
andq $18, $18, $0
subq $18, 32, $1 # .. E .. .. : Is this going to be a small copy?
beq $0, $zerolength # U .. .. .. : U L U L
mov $18, $0 # .. .. .. E
subq $18, 32, $1 # .. .. E. .. : Is this going to be a small copy?
nop # .. E .. ..
beq $18, $zerolength # U .. .. .. : U L U L
and $16,7,$3 # .. .. .. E : is leading dest misalignment
ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data

View File

@ -75,13 +75,20 @@ void arc_init_IRQ(void)
* Set a default priority for all available interrupts to prevent
* switching of register banks if Fast IRQ and multiple register banks
* are supported by CPU.
* Also disable all IRQ lines so faulty external hardware won't
* Also disable private-per-core IRQ lines so faulty external HW won't
* trigger interrupt that kernel is not ready to handle.
*/
for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) {
write_aux_reg(AUX_IRQ_SELECT, i);
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
write_aux_reg(AUX_IRQ_ENABLE, 0);
/*
* Only mask cpu private IRQs here.
* "common" interrupts are masked at IDU, otherwise it would
* need to be unmasked at each cpu, with IPIs
*/
if (i < FIRST_EXT_IRQ)
write_aux_reg(AUX_IRQ_ENABLE, 0);
}
/* setup status32, don't enable intr yet as kernel doesn't want */

View File

@ -27,7 +27,7 @@
*/
void arc_init_IRQ(void)
{
int level_mask = 0, i;
unsigned int level_mask = 0, i;
/* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;

View File

@ -266,6 +266,7 @@
&hdmicec {
status = "okay";
needs-hpd;
};
&hsi2c_4 {

View File

@ -225,12 +225,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* We do not have shadow page tables, hence the empty hooks */
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
}
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);

View File

@ -1,7 +1,7 @@
menuconfig ARCH_AT91
bool "Atmel SoCs"
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7 || ARM_SINGLE_ARMV7M
select ARM_CPU_SUSPEND if PM
select ARM_CPU_SUSPEND if PM && ARCH_MULTI_V7
select COMMON_CLK_AT91
select GPIOLIB
select PINCTRL

View File

@ -608,6 +608,9 @@ static void __init at91_pm_init(void (*pm_idle)(void))
void __init at91rm9200_pm_init(void)
{
if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
return;
at91_dt_ramc();
/*
@ -620,18 +623,27 @@ void __init at91rm9200_pm_init(void)
void __init at91sam9_pm_init(void)
{
if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
return;
at91_dt_ramc();
at91_pm_init(at91sam9_idle);
}
void __init sama5_pm_init(void)
{
if (!IS_ENABLED(CONFIG_SOC_SAMA5))
return;
at91_dt_ramc();
at91_pm_init(NULL);
}
void __init sama5d2_pm_init(void)
{
if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
return;
at91_pm_backup_init();
sama5_pm_init();
}

View File

@ -326,12 +326,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* We do not have shadow page tables, hence the empty hooks */
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
}
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);

View File

@ -161,9 +161,11 @@ void fpsimd_flush_thread(void)
{
if (!system_supports_fpsimd())
return;
preempt_disable();
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
preempt_enable();
}
/*

View File

@ -354,7 +354,6 @@ __primary_switched:
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
b.ne 0f
mov x0, x21 // pass FDT address in x0
mov x1, x23 // pass modulo offset in x1
bl kaslr_early_init // parse FDT for KASLR options
cbz x0, 0f // KASLR disabled? just proceed
orr x23, x23, x0 // record KASLR offset

View File

@ -75,7 +75,7 @@ extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
* containing function pointers) to be reinitialized, and zero-initialized
* .bss variables will be reset to 0.
*/
u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
u64 __init kaslr_early_init(u64 dt_phys)
{
void *fdt;
u64 seed, offset, mask, module_range;
@ -131,15 +131,17 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
/*
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
* happens, increase the KASLR offset by the size of the kernel image
* rounded up by SWAPPER_BLOCK_SIZE.
* happens, round down the KASLR offset by (1 << SWAPPER_TABLE_SHIFT).
*
* NOTE: The references to _text and _end below will already take the
* modulo offset (the physical displacement modulo 2 MB) into
* account, given that the physical placement is controlled by
* the loader, and will not change as a result of the virtual
* mapping we choose.
*/
if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
(((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
u64 kimg_sz = _end - _text;
offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
& mask;
}
if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
(((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
offset = round_down(offset, 1 << SWAPPER_TABLE_SHIFT);
if (IS_ENABLED(CONFIG_KASAN))
/*

View File

@ -435,8 +435,11 @@ retry:
* the mmap_sem because it would already be released
* in __lock_page_or_retry in mm/filemap.c.
*/
if (fatal_signal_pending(current))
if (fatal_signal_pending(current)) {
if (!user_mode(regs))
goto no_context;
return 0;
}
/*
* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of

View File

@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6455=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y
@ -25,7 +24,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000
CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set

View File

@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6457=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y
@ -26,7 +25,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000
CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set

View File

@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6472=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y
@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000
CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set

View File

@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6474=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y
@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000
CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set

View File

@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6678=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y
@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000
CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set

View File

@ -208,14 +208,14 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
if (!pic) {
pr_err("%s: Could not alloc PIC structure.\n", np->full_name);
pr_err("%pOF: Could not alloc PIC structure.\n", np);
return NULL;
}
pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
&megamod_domain_ops, pic);
if (!pic->irqhost) {
pr_err("%s: Could not alloc host.\n", np->full_name);
pr_err("%pOF: Could not alloc host.\n", np);
goto error_free;
}
@ -225,7 +225,7 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
pic->regs = of_iomap(np, 0);
if (!pic->regs) {
pr_err("%s: Could not map registers.\n", np->full_name);
pr_err("%pOF: Could not map registers.\n", np);
goto error_free;
}
@ -253,8 +253,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
irq_data = irq_get_irq_data(irq);
if (!irq_data) {
pr_err("%s: combiner-%d no irq_data for virq %d!\n",
np->full_name, i, irq);
pr_err("%pOF: combiner-%d no irq_data for virq %d!\n",
np, i, irq);
continue;
}
@ -265,16 +265,16 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
* of the core priority interrupts (4 - 15).
*/
if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) {
pr_err("%s: combiner-%d core irq %ld out of range!\n",
np->full_name, i, hwirq);
pr_err("%pOF: combiner-%d core irq %ld out of range!\n",
np, i, hwirq);
continue;
}
/* record the mapping */
mapping[hwirq - 4] = i;
pr_debug("%s: combiner-%d cascading to hwirq %ld\n",
np->full_name, i, hwirq);
pr_debug("%pOF: combiner-%d cascading to hwirq %ld\n",
np, i, hwirq);
cascade_data[i].pic = pic;
cascade_data[i].index = i;
@ -290,8 +290,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
/* Finally, set up the MUX registers */
for (i = 0; i < NR_MUX_OUTPUTS; i++) {
if (mapping[i] != IRQ_UNMAPPED) {
pr_debug("%s: setting mux %d to priority %d\n",
np->full_name, mapping[i], i + 4);
pr_debug("%pOF: setting mux %d to priority %d\n",
np, mapping[i], i + 4);
set_megamod_mux(pic, mapping[i], i);
}
}

View File

@ -436,8 +436,8 @@ void __init c64x_setup_clocks(void)
err = of_property_read_u32(node, "clock-frequency", &val);
if (err || val == 0) {
pr_err("%s: no clock-frequency found! Using %dMHz\n",
node->full_name, (int)val / 1000000);
pr_err("%pOF: no clock-frequency found! Using %dMHz\n",
node, (int)val / 1000000);
val = 25000000;
}
clkin1.rate = val;

View File

@ -204,14 +204,14 @@ void __init timer64_init(void)
timer = of_iomap(np, 0);
if (!timer) {
pr_debug("%s: Cannot map timer registers.\n", np->full_name);
pr_debug("%pOF: Cannot map timer registers.\n", np);
goto out;
}
pr_debug("%s: Timer registers=%p.\n", np->full_name, timer);
pr_debug("%pOF: Timer registers=%p.\n", np, timer);
cd->irq = irq_of_parse_and_map(np, 0);
if (cd->irq == NO_IRQ) {
pr_debug("%s: Cannot find interrupt.\n", np->full_name);
pr_debug("%pOF: Cannot find interrupt.\n", np);
iounmap(timer);
goto out;
}
@ -229,7 +229,7 @@ void __init timer64_init(void)
dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
}
pr_debug("%s: Timer irq=%d.\n", np->full_name, cd->irq);
pr_debug("%pOF: Timer irq=%d.\n", np, cd->irq);
clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5);

View File

@ -938,11 +938,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
}
/* Emulation */
int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);

View File

@ -67,11 +67,6 @@ extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
}
#define HPTEG_CACHE_NUM (1 << 15)
#define HPTEG_HASH_BITS_PTE 13
#define HPTEG_HASH_BITS_PTE_LONG 12

View File

@ -90,6 +90,24 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
/* Mark this context has been used on the new CPU */
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
/*
* This full barrier orders the store to the cpumask above vs
* a subsequent operation which allows this CPU to begin loading
* translations for next.
*
* When using the radix MMU that operation is the load of the
* MMU context id, which is then moved to SPRN_PID.
*
* For the hash MMU it is either the first load from slb_cache
* in switch_slb(), and/or the store of paca->mm_ctx_id in
* copy_mm_to_paca().
*
* On the read side the barrier is in pte_xchg(), which orders
* the store to the PTE vs the load of mm_cpumask.
*/
smp_mb();
new_on_cpu = true;
}

View File

@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
unsigned long *p = (unsigned long *)ptep;
__be64 prev;
/* See comment in switch_mm_irqs_off() */
prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
(__force unsigned long)pte_raw(new));

View File

@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
{
unsigned long *p = (unsigned long *)ptep;
/* See comment in switch_mm_irqs_off() */
return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
}
#endif

View File

@ -294,32 +294,26 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce_64 *args)
{
struct kvmppc_spapr_tce_table *stt = NULL;
struct kvmppc_spapr_tce_table *siter;
unsigned long npages, size;
int ret = -ENOMEM;
int i;
int fd = -1;
if (!args->size)
return -EINVAL;
/* Check this LIOBN hasn't been previously allocated */
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt->liobn == args->liobn)
return -EBUSY;
}
size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
npages = kvmppc_tce_pages(size);
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
if (ret) {
stt = NULL;
goto fail;
}
if (ret)
return ret;
ret = -ENOMEM;
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
GFP_KERNEL);
if (!stt)
goto fail;
goto fail_acct;
stt->liobn = args->liobn;
stt->page_shift = args->page_shift;
@ -334,24 +328,42 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
goto fail;
}
kvm_get_kvm(kvm);
ret = fd = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
stt, O_RDWR | O_CLOEXEC);
if (ret < 0)
goto fail;
mutex_lock(&kvm->lock);
list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
/* Check this LIOBN hasn't been previously allocated */
ret = 0;
list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
if (siter->liobn == args->liobn) {
ret = -EBUSY;
break;
}
}
if (!ret) {
list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
kvm_get_kvm(kvm);
}
mutex_unlock(&kvm->lock);
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
stt, O_RDWR | O_CLOEXEC);
if (!ret)
return fd;
fail:
if (stt) {
for (i = 0; i < npages; i++)
if (stt->pages[i])
__free_page(stt->pages[i]);
put_unused_fd(fd);
kfree(stt);
}
fail:
for (i = 0; i < npages; i++)
if (stt->pages[i])
__free_page(stt->pages[i]);
kfree(stt);
fail_acct:
kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
return ret;
}

View File

@ -1291,6 +1291,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Hypervisor doorbell - exit only if host IPI flag set */
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
bne 3f
BEGIN_FTR_SECTION
PPC_MSGSYNC
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
beq 4f

View File

@ -16,7 +16,22 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
u8 cppr;
u16 ack;
/* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */
/*
* Ensure any previous store to CPPR is ordered vs.
* the subsequent loads from PIPR or ACK.
*/
eieio();
/*
* DD1 bug workaround: If PIPR is less favored than CPPR
* ignore the interrupt or we might incorrectly lose an IPB
* bit.
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
if (pipr >= xc->hw_cppr)
return;
}
/* Perform the acknowledge OS to register cycle. */
ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
@ -235,6 +250,11 @@ skip_ipi:
/*
* If we found an interrupt, adjust what the guest CPPR should
* be as if we had just fetched that interrupt from HW.
*
* Note: This can only make xc->cppr smaller as the previous
* loop will only exit with hirq != 0 if prio is lower than
* the current xc->cppr. Thus we don't need to re-check xc->mfrr
* for pending IPIs.
*/
if (hirq)
xc->cppr = prio;
@ -380,6 +400,12 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
old_cppr = xc->cppr;
xc->cppr = cppr;
/*
* Order the above update of xc->cppr with the subsequent
* read of xc->mfrr inside push_pending_to_hw()
*/
smp_mb();
/*
* We are masking less, we need to look for pending things
* to deliver and set VP pending bits accordingly to trigger
@ -420,21 +446,37 @@ X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
* used to signal MFRR changes is EOId when fetched from
* the queue.
*/
if (irq == XICS_IPI || irq == 0)
if (irq == XICS_IPI || irq == 0) {
/*
* This barrier orders the setting of xc->cppr vs.
* subsquent test of xc->mfrr done inside
* scan_interrupts and push_pending_to_hw
*/
smp_mb();
goto bail;
}
/* Find interrupt source */
sb = kvmppc_xive_find_source(xive, irq, &src);
if (!sb) {
pr_devel(" source not found !\n");
rc = H_PARAMETER;
/* Same as above */
smp_mb();
goto bail;
}
state = &sb->irq_state[src];
kvmppc_xive_select_irq(state, &hw_num, &xd);
state->in_eoi = true;
mb();
/*
* This barrier orders both setting of in_eoi above vs,
* subsequent test of guest_priority, and the setting
* of xc->cppr vs. subsquent test of xc->mfrr done inside
* scan_interrupts and push_pending_to_hw
*/
smp_mb();
again:
if (state->guest_priority == MASKED) {
@ -461,6 +503,14 @@ again:
}
/*
* This barrier orders the above guest_priority check
* and spin_lock/unlock with clearing in_eoi below.
*
* It also has to be a full mb() as it must ensure
* the MMIOs done in source_eoi() are completed before
* state->in_eoi is visible.
*/
mb();
state->in_eoi = false;
bail:
@ -495,6 +545,18 @@ X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
/* Locklessly write over MFRR */
xc->mfrr = mfrr;
/*
* The load of xc->cppr below and the subsequent MMIO store
* to the IPI must happen after the above mfrr update is
* globally visible so that:
*
* - Synchronize with another CPU doing an H_EOI or a H_CPPR
* updating xc->cppr then reading xc->mfrr.
*
* - The target of the IPI sees the xc->mfrr update
*/
mb();
/* Shoot the IPI if most favored than target cppr */
if (mfrr < xc->cppr)
__x_writeq(0, __x_trig_page(&xc->vp_ipi_data));

View File

@ -614,15 +614,6 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
mmio_invalidate(npu_context, 1, address, true);
}
static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct npu_context *npu_context = mn_to_npu_context(mn);
mmio_invalidate(npu_context, 1, address, true);
}
static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
@ -640,7 +631,6 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
.release = pnv_npu2_mn_release,
.change_pte = pnv_npu2_mn_change_pte,
.invalidate_page = pnv_npu2_mn_invalidate_page,
.invalidate_range = pnv_npu2_mn_invalidate_range,
};

View File

@ -44,6 +44,11 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
break;
case -PAGE_SIZE:
/* forked 5-level task, set new asce with new_mm->pgd */
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
break;
case 1UL << 53:
/* forked 4-level task, set new asce with new mm->pgd */
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |

View File

@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
"srl %[cc],28\n"
: [cc] "=d" (cc)
: [code] "d" (code), [addr] "a" (addr)
: "memory", "cc");
: "3", "memory", "cc");
return cc;
}
@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
trace_kvm_s390_handle_sthyi(vcpu, code, addr);
if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK)
if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (code & 0xffff) {
@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
goto out;
}
if (addr & ~PAGE_MASK)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/*
* If the page has not yet been faulted in, we want to do that
* now and not after all the expensive calculations.

View File

@ -119,7 +119,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
check_asce_limit:
if (addr + len > current->mm->context.asce_limit) {
if (addr + len > current->mm->context.asce_limit &&
addr + len <= TASK_SIZE) {
rc = crst_table_upgrade(mm, addr + len);
if (rc)
return (unsigned long) rc;
@ -183,7 +184,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
}
check_asce_limit:
if (addr + len > current->mm->context.asce_limit) {
if (addr + len > current->mm->context.asce_limit &&
addr + len <= TASK_SIZE) {
rc = crst_table_upgrade(mm, addr + len);
if (rc)
return (unsigned long) rc;

View File

@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
return 0;
}
static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
{
if (use_xsave()) {
copy_kernel_to_xregs(&fpstate->xsave, -1);
copy_kernel_to_xregs(&fpstate->xsave, mask);
} else {
if (use_fxsr())
copy_kernel_to_fxregs(&fpstate->fxsave);
@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
: : [addr] "m" (fpstate));
}
__copy_kernel_to_fpregs(fpstate);
__copy_kernel_to_fpregs(fpstate, -1);
}
extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);

View File

@ -492,6 +492,7 @@ struct kvm_vcpu_arch {
unsigned long cr4;
unsigned long cr4_guest_owned_bits;
unsigned long cr8;
u32 pkru;
u32 hflags;
u64 efer;
u64 apic_base;
@ -1374,8 +1375,6 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address);
void kvm_define_shared_msr(unsigned index, u32 msr);
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);

View File

@ -140,9 +140,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.execute_only_pkey = -1;
}
#endif
init_new_context_ldt(tsk, mm);
return 0;
return init_new_context_ldt(tsk, mm);
}
static inline void destroy_context(struct mm_struct *mm)
{

View File

@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
cpuid_mask(&entry->ecx, CPUID_7_ECX);
/* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled)
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
entry->ecx &= ~F(PKU);
entry->edx &= kvm_cpuid_7_0_edx_x86_features;
entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);

View File

@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
}
static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
{
return kvm_x86_ops->get_pkru(vcpu);
}
static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
{
vcpu->arch.hflags |= HF_GUEST_MASK;

View File

@ -185,7 +185,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
* index of the protection domain, so pte_pkey * 2 is
* is the index of the first bit for the domain.
*/
pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
offset = (pfec & ~1) +

View File

@ -1777,11 +1777,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
to_svm(vcpu)->vmcb->save.rflags = rflags;
}
static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
{
return 0;
}
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
switch (reg) {
@ -5413,8 +5408,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
.get_pkru = svm_get_pkru,
.tlb_flush = svm_flush_tlb,
.run = svm_vcpu_run,

View File

@ -636,8 +636,6 @@ struct vcpu_vmx {
u64 current_tsc_ratio;
bool guest_pkru_valid;
u32 guest_pkru;
u32 host_pkru;
/*
@ -2383,11 +2381,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
}
static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
{
return to_vmx(vcpu)->guest_pkru;
}
static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
{
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
@ -9020,8 +9013,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0);
if (vmx->guest_pkru_valid)
__write_pkru(vmx->guest_pkru);
if (static_cpu_has(X86_FEATURE_PKU) &&
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
vcpu->arch.pkru != vmx->host_pkru)
__write_pkru(vcpu->arch.pkru);
atomic_switch_perf_msrs(vmx);
debugctlmsr = get_debugctlmsr();
@ -9169,13 +9164,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* back on host, so it is safe to read guest PKRU from current
* XSAVE.
*/
if (boot_cpu_has(X86_FEATURE_OSPKE)) {
vmx->guest_pkru = __read_pkru();
if (vmx->guest_pkru != vmx->host_pkru) {
vmx->guest_pkru_valid = true;
if (static_cpu_has(X86_FEATURE_PKU) &&
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
vcpu->arch.pkru = __read_pkru();
if (vcpu->arch.pkru != vmx->host_pkru)
__write_pkru(vmx->host_pkru);
} else
vmx->guest_pkru_valid = false;
}
/*
@ -11682,8 +11675,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
.get_pkru = vmx_get_pkru,
.tlb_flush = vmx_flush_tlb,
.run = vmx_vcpu_run,

View File

@ -3245,7 +3245,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
u32 size, offset, ecx, edx;
cpuid_count(XSTATE_CPUID, index,
&size, &offset, &ecx, &edx);
memcpy(dest + offset, src, size);
if (feature == XFEATURE_MASK_PKRU)
memcpy(dest + offset, &vcpu->arch.pkru,
sizeof(vcpu->arch.pkru));
else
memcpy(dest + offset, src, size);
}
valid -= feature;
@ -3283,7 +3288,11 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
u32 size, offset, ecx, edx;
cpuid_count(XSTATE_CPUID, index,
&size, &offset, &ecx, &edx);
memcpy(dest, src + offset, size);
if (feature == XFEATURE_MASK_PKRU)
memcpy(&vcpu->arch.pkru, src + offset,
sizeof(vcpu->arch.pkru));
else
memcpy(dest, src + offset, size);
}
valid -= feature;
@ -6725,17 +6734,6 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
/*
* The physical address of apic access page is stored in the VMCS.
* Update it when it becomes invalid.
*/
if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
}
/*
* Returns 1 to let vcpu_run() continue the guest execution loop without
* exiting to the userspace. Otherwise, the value will be returned to the
@ -7633,7 +7631,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
*/
vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin();
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
/* PKRU is separately restored in kvm_x86_ops->run. */
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
~XFEATURE_MASK_PKRU);
trace_kvm_fpu(1);
}

View File

@ -50,7 +50,7 @@ void foo(void)
DEFINE(HOST_GS, GS);
DEFINE(HOST_ORIG_AX, ORIG_EAX);
#else
#if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET)
#ifdef FP_XSTATE_MAGIC1
DEFINE(HOST_FP_SIZE, sizeof(struct _xstate) / sizeof(unsigned long));
#else
DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));

View File

@ -75,6 +75,8 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(STATS),
QUEUE_FLAG_NAME(POLL_STATS),
QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
QUEUE_FLAG_NAME(QUIESCED),
};
#undef QUEUE_FLAG_NAME
@ -265,6 +267,7 @@ static const char *const cmd_flag_name[] = {
CMD_FLAG_NAME(RAHEAD),
CMD_FLAG_NAME(BACKGROUND),
CMD_FLAG_NAME(NOUNMAP),
CMD_FLAG_NAME(NOWAIT),
};
#undef CMD_FLAG_NAME

View File

@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
} \
} while (0)
static inline unsigned int throtl_bio_data_size(struct bio *bio)
{
/* assume it's one sector */
if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
return 512;
return bio->bi_iter.bi_size;
}
static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
{
INIT_LIST_HEAD(&qn->node);
@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
bool rw = bio_data_dir(bio);
u64 bytes_allowed, extra_bytes, tmp;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
unsigned int bio_size = throtl_bio_data_size(bio);
jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
do_div(tmp, HZ);
bytes_allowed = tmp;
if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
if (wait)
*wait = 0;
return true;
}
/* Calc approx time to dispatch */
extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
if (!jiffy_wait)
@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
{
bool rw = bio_data_dir(bio);
unsigned int bio_size = throtl_bio_data_size(bio);
/* Charge the bio to the group */
tg->bytes_disp[rw] += bio->bi_iter.bi_size;
tg->bytes_disp[rw] += bio_size;
tg->io_disp[rw]++;
tg->last_bytes_disp[rw] += bio->bi_iter.bi_size;
tg->last_bytes_disp[rw] += bio_size;
tg->last_io_disp[rw]++;
/*

View File

@ -29,26 +29,25 @@
#include <scsi/scsi_cmnd.h>
/**
* bsg_destroy_job - routine to teardown/delete a bsg job
* bsg_teardown_job - routine to teardown a bsg job
* @job: bsg_job that is to be torn down
*/
static void bsg_destroy_job(struct kref *kref)
static void bsg_teardown_job(struct kref *kref)
{
struct bsg_job *job = container_of(kref, struct bsg_job, kref);
struct request *rq = job->req;
blk_end_request_all(rq, BLK_STS_OK);
put_device(job->dev); /* release reference for the request */
kfree(job->request_payload.sg_list);
kfree(job->reply_payload.sg_list);
kfree(job);
blk_end_request_all(rq, BLK_STS_OK);
}
void bsg_job_put(struct bsg_job *job)
{
kref_put(&job->kref, bsg_destroy_job);
kref_put(&job->kref, bsg_teardown_job);
}
EXPORT_SYMBOL_GPL(bsg_job_put);
@ -100,7 +99,7 @@ EXPORT_SYMBOL_GPL(bsg_job_done);
*/
static void bsg_softirq_done(struct request *rq)
{
struct bsg_job *job = rq->special;
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
bsg_job_put(job);
}
@ -122,33 +121,20 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
}
/**
* bsg_create_job - create the bsg_job structure for the bsg request
* bsg_prepare_job - create the bsg_job structure for the bsg request
* @dev: device that is being sent the bsg request
* @req: BSG request that needs a job structure
*/
static int bsg_create_job(struct device *dev, struct request *req)
static int bsg_prepare_job(struct device *dev, struct request *req)
{
struct request *rsp = req->next_rq;
struct request_queue *q = req->q;
struct scsi_request *rq = scsi_req(req);
struct bsg_job *job;
struct bsg_job *job = blk_mq_rq_to_pdu(req);
int ret;
BUG_ON(req->special);
job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
if (!job)
return -ENOMEM;
req->special = job;
job->req = req;
if (q->bsg_job_size)
job->dd_data = (void *)&job[1];
job->request = rq->cmd;
job->request_len = rq->cmd_len;
job->reply = rq->sense;
job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
* allocated */
if (req->bio) {
ret = bsg_map_buffer(&job->request_payload, req);
if (ret)
@ -187,7 +173,6 @@ static void bsg_request_fn(struct request_queue *q)
{
struct device *dev = q->queuedata;
struct request *req;
struct bsg_job *job;
int ret;
if (!get_device(dev))
@ -199,7 +184,7 @@ static void bsg_request_fn(struct request_queue *q)
break;
spin_unlock_irq(q->queue_lock);
ret = bsg_create_job(dev, req);
ret = bsg_prepare_job(dev, req);
if (ret) {
scsi_req(req)->result = ret;
blk_end_request_all(req, BLK_STS_OK);
@ -207,8 +192,7 @@ static void bsg_request_fn(struct request_queue *q)
continue;
}
job = req->special;
ret = q->bsg_job_fn(job);
ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
spin_lock_irq(q->queue_lock);
if (ret)
break;
@ -219,6 +203,35 @@ static void bsg_request_fn(struct request_queue *q)
spin_lock_irq(q->queue_lock);
}
static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
{
struct bsg_job *job = blk_mq_rq_to_pdu(req);
struct scsi_request *sreq = &job->sreq;
memset(job, 0, sizeof(*job));
scsi_req_init(sreq);
sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
sreq->sense = kzalloc(sreq->sense_len, gfp);
if (!sreq->sense)
return -ENOMEM;
job->req = req;
job->reply = sreq->sense;
job->reply_len = sreq->sense_len;
job->dd_data = job + 1;
return 0;
}
static void bsg_exit_rq(struct request_queue *q, struct request *req)
{
struct bsg_job *job = blk_mq_rq_to_pdu(req);
struct scsi_request *sreq = &job->sreq;
kfree(sreq->sense);
}
/**
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
* @dev: device to attach bsg device to
@ -235,7 +248,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
q = blk_alloc_queue(GFP_KERNEL);
if (!q)
return ERR_PTR(-ENOMEM);
q->cmd_size = sizeof(struct scsi_request);
q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
q->init_rq_fn = bsg_init_rq;
q->exit_rq_fn = bsg_exit_rq;
q->request_fn = bsg_request_fn;
ret = blk_init_allocated_queue(q);
@ -243,7 +258,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
goto out_cleanup_queue;
q->queuedata = dev;
q->bsg_job_size = dd_job_size;
q->bsg_job_fn = job_fn;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);

View File

@ -87,8 +87,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
}
sgl = sreq->tsg;
n = sg_nents(sgl);
for_each_sg(sgl, sg, n, i)
put_page(sg_page(sg));
for_each_sg(sgl, sg, n, i) {
struct page *page = sg_page(sg);
/* some SGs may not have a page mapped */
if (page && page_ref_count(page))
put_page(page);
}
kfree(sreq->tsg);
}

View File

@ -91,9 +91,14 @@ int crypto_chacha20_crypt(struct skcipher_request *req)
crypto_chacha20_init(state, ctx, walk.iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes);
err = skcipher_walk_done(&walk, 0);
nbytes);
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;

View File

@ -32675,6 +32675,10 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = {
"\x5b\x86\x2f\x37\x30\xe3\x7c\xfd"
"\xc4\xfd\x80\x6c\x22\xf2\x21",
.rlen = 375,
.also_non_np = 1,
.np = 3,
.tap = { 375 - 20, 4, 16 },
}, { /* RFC7539 A.2. Test Vector #3 */
.key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
"\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
@ -33049,6 +33053,9 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = {
"\xa1\xed\xad\xd5\x76\xfa\x24\x8f"
"\x98",
.rlen = 1281,
.also_non_np = 1,
.np = 3,
.tap = { 1200, 1, 80 },
},
};

View File

@ -100,9 +100,13 @@ acpi_evaluate_object_typed(acpi_handle handle,
free_buffer_on_error = TRUE;
}
status = acpi_get_handle(handle, pathname, &target_handle);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
if (pathname) {
status = acpi_get_handle(handle, pathname, &target_handle);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
} else {
target_handle = handle;
}
full_pathname = acpi_ns_get_external_pathname(target_handle);

View File

@ -1741,7 +1741,7 @@ error:
* functioning ECDT EC first in order to handle the events.
* https://bugzilla.kernel.org/show_bug.cgi?id=115021
*/
int __init acpi_ec_ecdt_start(void)
static int __init acpi_ec_ecdt_start(void)
{
acpi_handle handle;
@ -2003,20 +2003,17 @@ static inline void acpi_ec_query_exit(void)
int __init acpi_ec_init(void)
{
int result;
int ecdt_fail, dsdt_fail;
/* register workqueue for _Qxx evaluations */
result = acpi_ec_query_init();
if (result)
goto err_exit;
/* Now register the driver for the EC */
result = acpi_bus_register_driver(&acpi_ec_driver);
if (result)
goto err_exit;
return result;
err_exit:
if (result)
acpi_ec_query_exit();
return result;
/* Drivers must be started after acpi_ec_query_init() */
ecdt_fail = acpi_ec_ecdt_start();
dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
return ecdt_fail && dsdt_fail ? -ENODEV : 0;
}
/* EC driver currently not unloadable */

View File

@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
int acpi_ec_init(void);
int acpi_ec_ecdt_probe(void);
int acpi_ec_dsdt_probe(void);
int acpi_ec_ecdt_start(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,

View File

@ -1047,7 +1047,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
fwnode_for_each_child_node(fwnode, child) {
u32 nr;
if (!fwnode_property_read_u32(fwnode, prop_name, &nr))
if (fwnode_property_read_u32(child, prop_name, &nr))
continue;
if (val == nr)

View File

@ -2084,7 +2084,6 @@ int __init acpi_scan_init(void)
acpi_gpe_apply_masked_gpes();
acpi_update_all_gpes();
acpi_ec_ecdt_start();
acpi_scan_initialized = true;

View File

@ -3362,7 +3362,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
const char *failure_string;
struct binder_buffer *buffer;
if (proc->tsk != current)
if (proc->tsk != current->group_leader)
return -EINVAL;
if ((vma->vm_end - vma->vm_start) > SZ_4M)

View File

@ -216,12 +216,16 @@ static int ahci_da850_probe(struct platform_device *pdev)
return rc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
if (!res) {
rc = -ENODEV;
goto disable_resources;
}
pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res));
if (!pwrdn_reg)
if (!pwrdn_reg) {
rc = -ENOMEM;
goto disable_resources;
}
da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy);

View File

@ -2411,6 +2411,9 @@ static void ata_dev_config_trusted(struct ata_device *dev)
u64 trusted_cap;
unsigned int err;
if (!ata_id_has_trusted(dev->id))
return;
if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
ata_dev_warn(dev,
"Security Log not supported\n");

View File

@ -221,8 +221,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
}
static int
figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
loff_t logical_blocksize)
figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
{
loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
sector_t x = (sector_t)size;
@ -234,12 +233,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
lo->lo_offset = offset;
if (lo->lo_sizelimit != sizelimit)
lo->lo_sizelimit = sizelimit;
if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
lo->lo_logical_blocksize = logical_blocksize;
blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
blk_queue_logical_block_size(lo->lo_queue,
lo->lo_logical_blocksize);
}
set_capacity(lo->lo_disk, x);
bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
/* let user-space know about the new size */
@ -820,7 +813,6 @@ static void loop_config_discard(struct loop_device *lo)
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue;
int lo_bits = 9;
/*
* We use punch hole to reclaim the free space used by the
@ -840,11 +832,9 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0;
if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
lo_bits = blksize_bits(lo->lo_logical_blocksize);
blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits);
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits);
blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
}
@ -938,7 +928,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->use_dio = false;
lo->lo_blocksize = lo_blocksize;
lo->lo_logical_blocksize = 512;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
@ -1104,7 +1093,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
int err;
struct loop_func_table *xfer;
kuid_t uid = current_uid();
int lo_flags = lo->lo_flags;
if (lo->lo_encrypt_key_size &&
!uid_eq(lo->lo_key_owner, uid) &&
@ -1137,26 +1125,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (err)
goto exit;
if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
lo->lo_logical_blocksize = 512;
lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
if (LO_INFO_BLOCKSIZE(info) != 512 &&
LO_INFO_BLOCKSIZE(info) != 1024 &&
LO_INFO_BLOCKSIZE(info) != 2048 &&
LO_INFO_BLOCKSIZE(info) != 4096)
return -EINVAL;
if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
return -EINVAL;
}
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit ||
lo->lo_flags != lo_flags ||
((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
LO_INFO_BLOCKSIZE(info))) {
lo->lo_sizelimit != info->lo_sizelimit) {
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
err = -EFBIG;
goto exit;
}
@ -1348,8 +1319,7 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit,
lo->lo_logical_blocksize);
return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
}
static int loop_set_dio(struct loop_device *lo, unsigned long arg)

View File

@ -49,7 +49,6 @@ struct loop_device {
struct file * lo_backing_file;
struct block_device *lo_device;
unsigned lo_blocksize;
unsigned lo_logical_blocksize;
void *key_data;
gfp_t old_gfp_mask;

View File

@ -381,6 +381,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10];
char *envp[] = { "RESIZE=1", NULL };
unsigned long long nblocks;
u64 capacity;
/* Host must always specify the capacity. */
@ -393,16 +394,19 @@ static void virtblk_config_changed_work(struct work_struct *work)
capacity = (sector_t)-1;
}
string_get_size(capacity, queue_logical_block_size(q),
nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
string_get_size(capacity, queue_logical_block_size(q),
string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
dev_notice(&vdev->dev,
"new size: %llu %d-byte logical blocks (%s/%s)\n",
(unsigned long long)capacity,
queue_logical_block_size(q),
cap_str_10, cap_str_2);
"new size: %llu %d-byte logical blocks (%s/%s)\n",
nblocks,
queue_logical_block_size(q),
cap_str_10,
cap_str_2);
set_capacity(vblk->disk, capacity);
revalidate_disk(vblk->disk);

View File

@ -244,6 +244,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
{
struct pending_req *req, *n;
unsigned int j, r;
bool busy = false;
for (r = 0; r < blkif->nr_rings; r++) {
struct xen_blkif_ring *ring = &blkif->rings[r];
@ -261,8 +262,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
* don't have any discard_io or other_io requests. So, checking
* for inflight IO is enough.
*/
if (atomic_read(&ring->inflight) > 0)
return -EBUSY;
if (atomic_read(&ring->inflight) > 0) {
busy = true;
continue;
}
if (ring->irq) {
unbind_from_irqhandler(ring->irq, ring);
@ -300,6 +303,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
ring->active = false;
}
if (busy)
return -EBUSY;
blkif->nr_ring_pages = 0;
/*
* blkif->rings was allocated in connect_ring, so we should free it in

View File

@ -717,8 +717,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);
tdc->irq = of_irq_get(pdev->dev.of_node, i);
if (tdc->irq < 0) {
ret = tdc->irq;
if (tdc->irq <= 0) {
ret = tdc->irq ?: -ENXIO;
goto irq_dispose;
}

View File

@ -557,7 +557,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
edge_cause = mvebu_gpio_read_edge_cause(mvchip);
edge_mask = mvebu_gpio_read_edge_mask(mvchip);
cause = (data_in ^ level_mask) | (edge_cause & edge_mask);
cause = (data_in & level_mask) | (edge_cause & edge_mask);
for (i = 0; i < mvchip->chip.ngpio; i++) {
int irq;

View File

@ -2,6 +2,7 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/sysfs.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
@ -432,6 +433,11 @@ static struct attribute *gpiochip_attrs[] = {
};
ATTRIBUTE_GROUPS(gpiochip);
static struct gpio_desc *gpio_to_valid_desc(int gpio)
{
return gpio_is_valid(gpio) ? gpio_to_desc(gpio) : NULL;
}
/*
* /sys/class/gpio/export ... write-only
* integer N ... number of GPIO to export (full access)
@ -450,7 +456,7 @@ static ssize_t export_store(struct class *class,
if (status < 0)
goto done;
desc = gpio_to_desc(gpio);
desc = gpio_to_valid_desc(gpio);
/* reject invalid GPIOs */
if (!desc) {
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
@ -493,7 +499,7 @@ static ssize_t unexport_store(struct class *class,
if (status < 0)
goto done;
desc = gpio_to_desc(gpio);
desc = gpio_to_valid_desc(gpio);
/* reject bogus commands (gpio_unexport ignores them) */
if (!desc) {
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);

View File

@ -146,36 +146,6 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
}
}
/**
* amdgpu_mn_invalidate_page - callback to notify about mm change
*
* @mn: our notifier
* @mn: the mm this callback is about
* @address: address of invalidate page
*
* Invalidation of a single page. Blocks for all BOs mapping it
* and unmap them by move them into system domain again.
*/
static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
mutex_lock(&rmn->lock);
it = interval_tree_iter_first(&rmn->objects, address, address);
if (it) {
struct amdgpu_mn_node *node;
node = container_of(it, struct amdgpu_mn_node, it);
amdgpu_mn_invalidate_node(node, address, address);
}
mutex_unlock(&rmn->lock);
}
/**
* amdgpu_mn_invalidate_range_start - callback to notify about mm change
*
@ -215,7 +185,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release,
.invalidate_page = amdgpu_mn_invalidate_page,
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
};

View File

@ -597,9 +597,9 @@ static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx,
struct sii8620_mt_msg *msg)
{
u8 reg = msg->reg[0] & 0x7f;
u8 reg = msg->reg[1] & 0x7f;
if (msg->reg[0] & 0x80)
if (msg->reg[1] & 0x80)
ctx->xdevcap[reg] = msg->ret;
else
ctx->devcap[reg] = msg->ret;

View File

@ -1655,6 +1655,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
if (config->funcs->atomic_check)
ret = config->funcs->atomic_check(state->dev, state);
if (ret)
return ret;
if (!state->allow_modeset) {
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
@ -1665,7 +1668,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
return ret;
return 0;
}
EXPORT_SYMBOL(drm_atomic_check_only);
@ -2167,10 +2170,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane;
struct drm_out_fence_state *fence_state = NULL;
struct drm_out_fence_state *fence_state;
unsigned plane_mask;
int ret = 0;
unsigned int i, j, num_fences = 0;
unsigned int i, j, num_fences;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@ -2211,6 +2214,8 @@ retry:
plane_mask = 0;
copied_objs = 0;
copied_props = 0;
fence_state = NULL;
num_fences = 0;
for (i = 0; i < arg->count_objs; i++) {
uint32_t obj_id, count_props;

View File

@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
drm_gem_object_handle_put_unlocked(obj);
return 0;

View File

@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
crtc = drm_crtc_find(dev, plane_req->crtc_id);
if (!crtc) {
drm_framebuffer_put(fb);
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
return -ENOENT;

View File

@ -2714,7 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
unmap_src:
i915_gem_object_unpin_map(obj);
put_obj:
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
i915_gem_object_put(obj);
return ret;
}

View File

@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
uint8_t aux_channel, ddc_pin;
/* Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port and abort if more
* than one is found. */
* so look for all the possible values for each port.
*/
int dvo_ports[][3] = {
{DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
{DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
};
/* Find the child device to use, abort if more than one found. */
/*
* Find the first child device to reference the port, report if more
* than one found.
*/
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
it = dev_priv->vbt.child_dev + i;
@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (it->common.dvo_port == dvo_ports[port][j]) {
if (child) {
DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
port_name(port));
return;
} else {
child = it;
}
child = it;
}
}
}

View File

@ -46,7 +46,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
struct intel_encoder *encoder = connector->encoder;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct mipi_dsi_device *dsi_device;
u8 data;
u8 data = 0;
enum port port;
/* FIXME: Need to take care of 16 bit brightness level */

View File

@ -306,7 +306,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
if (!gpio_desc) {
gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
"panel", gpio_index,
NULL, gpio_index,
value ? GPIOD_OUT_LOW :
GPIOD_OUT_HIGH);

View File

@ -1221,6 +1221,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
static u8 gtiir[] = {
[RCS] = 0,
[BCS] = 0,
[VCS] = 1,
[VCS2] = 1,
[VECS] = 3,
};
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@ -1245,9 +1253,22 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
/* After a GPU reset, we may have requests to replay */
GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
/*
* Clear any pending interrupt state.
*
* We do it twice out of paranoia that some of the IIR are double
* buffered, and if we only reset it once there may still be
* an interrupt pending.
*/
I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
/* After a GPU reset, we may have requests to replay */
submit = false;
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
if (!port_isset(&port[n]))

View File

@ -210,8 +210,8 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
if (!IS_GEN9(dev_priv)) {
DRM_ERROR("LSPCON is supported on GEN9 only\n");
if (!HAS_LSPCON(dev_priv)) {
DRM_ERROR("LSPCON is not supported on this platform\n");
return false;
}

View File

@ -545,15 +545,13 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
return;
}
ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
switch (ipu_plane->dp_flow) {
case IPU_DP_FLOW_SYNC_BG:
ipu_dp_setup_channel(ipu_plane->dp,
IPUV3_COLORSPACE_RGB,
IPUV3_COLORSPACE_RGB);
ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB);
ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
case IPU_DP_FLOW_SYNC_FG:
ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format);
ipu_dp_setup_channel(ipu_plane->dp, ics,
IPUV3_COLORSPACE_UNKNOWN);
/* Enable local alpha on partial plane */

View File

@ -275,11 +275,15 @@ static void rockchip_drm_fb_resume(struct drm_device *drm)
static int rockchip_drm_sys_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct rockchip_drm_private *priv = drm->dev_private;
struct rockchip_drm_private *priv;
if (!drm)
return 0;
drm_kms_helper_poll_disable(drm);
rockchip_drm_fb_suspend(drm);
priv = drm->dev_private;
priv->state = drm_atomic_helper_suspend(drm);
if (IS_ERR(priv->state)) {
rockchip_drm_fb_resume(drm);
@ -293,8 +297,12 @@ static int rockchip_drm_sys_suspend(struct device *dev)
static int rockchip_drm_sys_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct rockchip_drm_private *priv = drm->dev_private;
struct rockchip_drm_private *priv;
if (!drm)
return 0;
priv = drm->dev_private;
drm_atomic_helper_resume(drm, priv->state);
rockchip_drm_fb_resume(drm);
drm_kms_helper_poll_enable(drm);

View File

@ -25,12 +25,20 @@
#include "sun4i_framebuffer.h"
#include "sun4i_tcon.h"
static void sun4i_drv_lastclose(struct drm_device *dev)
{
struct sun4i_drv *drv = dev->dev_private;
drm_fbdev_cma_restore_mode(drv->fbdev);
}
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
static struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
/* Generic Operations */
.lastclose = sun4i_drv_lastclose,
.fops = &sun4i_drv_fops,
.name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine",

View File

@ -1567,10 +1567,34 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
}
/**
* vmw_kms_atomic_commit - Perform an atomic state commit
*
* @dev: DRM device
* @state: the driver state object
* @nonblock: Whether nonblocking behaviour is requested
*
* This is a simple wrapper around drm_atomic_helper_commit() for
* us to clear the nonblocking value.
*
* Nonblocking commits currently cause synchronization issues
* for vmwgfx.
*
* RETURNS
* Zero for success or negative error code on failure.
*/
int vmw_kms_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock)
{
return drm_atomic_helper_commit(dev, state, false);
}
static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
.atomic_check = vmw_kms_atomic_check_modeset,
.atomic_commit = drm_atomic_helper_commit,
.atomic_commit = vmw_kms_atomic_commit,
};
static int vmw_kms_generic_present(struct vmw_private *dev_priv,

View File

@ -1,6 +1,7 @@
config IMX_IPUV3_CORE
tristate "IPUv3 core support"
depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
depends on DRM || !DRM # if DRM=m, this can't be 'y'
select GENERIC_IRQ_CHIP
help
Choose this if you have a i.MX5/6 system and want to use the Image

View File

@ -410,10 +410,11 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
}
/* We are in an invalid state; reset bus to a known state. */
if (!bus->msgs && bus->master_state != ASPEED_I2C_MASTER_STOP) {
if (!bus->msgs) {
dev_err(bus->dev, "bus in unknown state");
bus->cmd_err = -EIO;
aspeed_i2c_do_stop(bus);
if (bus->master_state != ASPEED_I2C_MASTER_STOP)
aspeed_i2c_do_stop(bus);
goto out_no_complete;
}
msg = &bus->msgs[bus->msgs_index];

View File

@ -198,8 +198,7 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED |
DW_IC_CON_SPEED_FAST;
DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
dev->mode = DW_IC_SLAVE;
@ -430,7 +429,7 @@ static void dw_i2c_plat_complete(struct device *dev)
#endif
#ifdef CONFIG_PM
static int dw_i2c_plat_suspend(struct device *dev)
static int dw_i2c_plat_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
@ -452,11 +451,21 @@ static int dw_i2c_plat_resume(struct device *dev)
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int dw_i2c_plat_suspend(struct device *dev)
{
pm_runtime_resume(dev);
return dw_i2c_plat_runtime_suspend(dev);
}
#endif
static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
.prepare = dw_i2c_plat_prepare,
.complete = dw_i2c_plat_complete,
SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
dw_i2c_plat_resume,
NULL)
};
#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)

View File

@ -177,6 +177,8 @@ static int i2c_dw_reg_slave(struct i2c_client *slave)
return -EBUSY;
if (slave->flags & I2C_CLIENT_TEN)
return -EAFNOSUPPORT;
pm_runtime_get_sync(dev->dev);
/*
* Set slave address in the IC_SAR register,
* the address to which the DW_apb_i2c responds.
@ -205,6 +207,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
dev->disable_int(dev);
dev->disable(dev);
dev->slave = NULL;
pm_runtime_put(dev->dev);
return 0;
}
@ -272,7 +275,7 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
slave_activity = ((dw_readl(dev, DW_IC_STATUS) &
DW_IC_STATUS_SLAVE_ACTIVITY) >> 6);
if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY))
if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave)
return 0;
dev_dbg(dev->dev,
@ -382,7 +385,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
ret = i2c_add_numbered_adapter(adap);
if (ret)
dev_err(dev->dev, "failure adding adapter: %d\n", ret);
pm_runtime_put_noidle(dev->dev);
return ret;
}

View File

@ -127,8 +127,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
iounmap(pd->reg);
err_res:
release_resource(pd->ioarea);
kfree(pd->ioarea);
release_mem_region(pd->ioarea->start, size);
err:
kfree(pd);
@ -142,8 +141,7 @@ static int simtec_i2c_remove(struct platform_device *dev)
i2c_del_adapter(&pd->adap);
iounmap(pd->reg);
release_resource(pd->ioarea);
kfree(pd->ioarea);
release_mem_region(pd->ioarea->start, resource_size(pd->ioarea));
kfree(pd);
return 0;

View File

@ -353,8 +353,8 @@ static int i2c_device_probe(struct device *dev)
}
/*
* An I2C ID table is not mandatory, if and only if, a suitable Device
* Tree match table entry is supplied for the probing device.
* An I2C ID table is not mandatory, if and only if, a suitable OF
* or ACPI ID table is supplied for the probing device.
*/
if (!driver->id_table &&
!i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&

View File

@ -644,7 +644,7 @@ static int ina2xx_capture_thread(void *data)
{
struct iio_dev *indio_dev = data;
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
unsigned int sampling_us = SAMPLING_PERIOD(chip);
int sampling_us = SAMPLING_PERIOD(chip);
int buffer_us;
/*

View File

@ -64,7 +64,7 @@
#define STM32H7_CKMODE_MASK GENMASK(17, 16)
/* STM32 H7 maximum analog clock rate (from datasheet) */
#define STM32H7_ADC_MAX_CLK_RATE 72000000
#define STM32H7_ADC_MAX_CLK_RATE 36000000
/**
* stm32_adc_common_regs - stm32 common registers, compatible dependent data
@ -148,14 +148,14 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
return -EINVAL;
}
priv->common.rate = rate;
priv->common.rate = rate / stm32f4_pclk_div[i];
val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR);
val &= ~STM32F4_ADC_ADCPRE_MASK;
val |= i << STM32F4_ADC_ADCPRE_SHIFT;
writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR);
dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n",
rate / (stm32f4_pclk_div[i] * 1000));
priv->common.rate / 1000);
return 0;
}
@ -250,7 +250,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
out:
/* rate used later by each ADC instance to control BOOST mode */
priv->common.rate = rate;
priv->common.rate = rate / div;
/* Set common clock mode and prescaler */
val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR);
@ -260,7 +260,7 @@ out:
writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR);
dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n",
ckmode ? "bus" : "adc", div, rate / (div * 1000));
ckmode ? "bus" : "adc", div, priv->common.rate / 1000);
return 0;
}

View File

@ -111,8 +111,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
s32 poll_value = 0;
if (state) {
if (!atomic_read(&st->user_requested_state))
return 0;
if (sensor_hub_device_open(st->hsdev))
return -EIO;
@ -161,6 +159,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
&report_val);
}
pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
st->pdev->name, state_val, report_val);
sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
st->power_state.index,
sizeof(state_val), &state_val);
@ -182,6 +183,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
ret = pm_runtime_get_sync(&st->pdev->dev);
else {
pm_runtime_mark_last_busy(&st->pdev->dev);
pm_runtime_use_autosuspend(&st->pdev->dev);
ret = pm_runtime_put_autosuspend(&st->pdev->dev);
}
if (ret < 0) {
@ -285,8 +287,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
/* Default to 3 seconds, but can be changed from sysfs */
pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
3000);
pm_runtime_use_autosuspend(&attrb->pdev->dev);
return ret;
error_unreg_trigger:
iio_trigger_unregister(trig);

Some files were not shown because too many files have changed in this diff Show More