1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts were simply overlapping changes.  In the net/ipv4/route.c
case the code had simply moved around a little bit and the same fix
was made in both 'net' and 'net-next'.

In the net/sched/sch_generic.c case a fix in 'net' happened at
the same time that a new argument was added to qdisc_hash_add().

Signed-off-by: David S. Miller <davem@davemloft.net>
zero-colors
David S. Miller 2017-04-15 21:16:30 -04:00
commit 6b6cbc1471
298 changed files with 3274 additions and 1847 deletions

View File

@ -99,6 +99,8 @@ Linas Vepstas <linas@austin.ibm.com>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Mark Brown <broonie@sirena.org.uk>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Matthieu CASTET <castet.matthieu@free.fr>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
@ -171,6 +173,7 @@ Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
Yusuke Goda <goda.yusuke@renesas.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi>

View File

@ -58,8 +58,7 @@ prototypes:
int (*permission) (struct inode *, int, unsigned int);
int (*get_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (const struct path *, struct dentry *, struct kstat *,
u32, unsigned int);
int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
void (*update_time)(struct inode *, struct timespec *, int);

View File

@ -600,3 +600,9 @@ in your dentry operations instead.
[recommended]
->readlink is optional for symlinks. Don't set, unless filesystem needs
to fake something for readlink(2).
--
[mandatory]
->getattr() is now passed a struct path rather than a vfsmount and
dentry separately, and it now has request_mask and query_flags arguments
to specify the fields and sync type requested by statx. Filesystems not
supporting any statx-specific features may ignore the new arguments.

View File

@ -382,8 +382,7 @@ struct inode_operations {
int (*permission) (struct inode *, int);
int (*get_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (const struct path *, struct dentry *, struct kstat *,
u32, unsigned int);
int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
void (*update_time)(struct inode *, struct timespec *, int);
int (*atomic_open)(struct inode *, struct dentry *, struct file *,

View File

@ -77,9 +77,15 @@ static struct pinctrl_desc foo_desc = {
int __init foo_probe(void)
{
int error;
struct pinctrl_dev *pctl;
return pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
error = pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
if (error)
return error;
return pinctrl_enable(pctl);
}
To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and

View File

@ -124,7 +124,7 @@ specified in the following format in the sign-off area:
.. code-block:: none
Cc: <stable@vger.kernel.org> # 3.3.x-
Cc: <stable@vger.kernel.org> # 3.3.x
The tag has the meaning of:

View File

@ -83,6 +83,12 @@ Groups:
Bits for undefined preemption levels are RAZ/WI.
For historical reasons and to provide ABI compatibility with userspace we
export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
field in the lower 5 bits of a word, meaning that userspace must always
use the lower 5 bits to communicate with the KVM device and must shift the
value left by 3 places to obtain the actual priority mask level.
Limitations:
- Priorities are not implemented, and registers are RAZ/WI
- Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.

View File

@ -4124,14 +4124,13 @@ F: drivers/block/drbd/
F: lib/lru_cache.c
F: Documentation/blockdev/drbd/
DRIVER CORE, KOBJECTS, DEBUGFS, KERNFS AND SYSFS
DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
S: Supported
F: Documentation/kobject.txt
F: drivers/base/
F: fs/debugfs/
F: fs/kernfs/
F: fs/sysfs/
F: include/linux/debugfs.h
F: include/linux/kobj*
@ -7216,6 +7215,14 @@ F: arch/mips/include/uapi/asm/kvm*
F: arch/mips/include/asm/kvm*
F: arch/mips/kvm/
KERNFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Tejun Heo <tj@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
S: Supported
F: include/linux/kernfs.h
F: fs/kernfs/
KEXEC
M: Eric Biederman <ebiederm@xmission.com>
W: http://kernel.org/pub/linux/utils/kernel/kexec/
@ -13311,7 +13318,7 @@ F: drivers/virtio/
F: tools/virtio/
F: drivers/net/virtio_net.c
F: drivers/block/virtio_blk.c
F: include/linux/virtio_*.h
F: include/linux/virtio*.h
F: include/uapi/linux/virtio_*.h
F: drivers/crypto/virtio/

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 11
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Fearless Coyote
# *DOCUMENTATION*

View File

@ -1290,7 +1290,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
/* copy relevant bits of struct timex. */
if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
offsetof(struct timex32, time)))
offsetof(struct timex32, tick)))
return -EFAULT;
ret = do_adjtimex(&txc);

View File

@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void)
if (__hyp_get_vectors() == hyp_default_vectors)
cpu_init_hyp_mode(NULL);
}
if (vgic_present)
kvm_vgic_init_cpu_hardware();
}
static void cpu_hyp_reset(void)

View File

@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
phys_addr_t addr = start, end = start + size;
phys_addr_t next;
assert_spin_locked(&kvm->mmu_lock);
pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do {
next = stage2_pgd_addr_end(addr, end);
if (!stage2_pgd_none(*pgd))
unmap_stage2_puds(kvm, pgd, addr, next);
/*
* If the range is too large, release the kvm->mmu_lock
* to prevent starvation and lockup detector warnings.
*/
if (next != end)
cond_resched_lock(&kvm->mmu_lock);
} while (pgd++, addr = next, addr != end);
}
@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
int idx;
idx = srcu_read_lock(&kvm->srcu);
down_read(&current->mm->mmap_sem);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
up_read(&current->mm->mmap_sem);
srcu_read_unlock(&kvm->srcu, idx);
}
@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
if (kvm->arch.pgd == NULL)
return;
spin_lock(&kvm->mmu_lock);
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
spin_unlock(&kvm->mmu_lock);
/* Free the HW pgd, one page at a time */
free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
kvm->arch.pgd = NULL;
@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(KVM_PHYS_SIZE >> PAGE_SHIFT))
return -EFAULT;
down_read(&current->mm->mmap_sem);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
return -EINVAL;
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
ret = -EINVAL;
goto out;
}
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
vm_end - vm_start,
@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
} while (hva < reg_end);
if (change == KVM_MR_FLAGS_ONLY)
return ret;
goto out;
spin_lock(&kvm->mmu_lock);
if (ret)
@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
else
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
out:
up_read(&current->mm->mmap_sem);
return ret;
}

View File

@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
}
/*
* The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
* that the intention is to allow exporting memory allocated via the
* coherent DMA APIs through the dma_buf API, which only accepts a
* scattertable. This presents a couple of problems:
* 1. Not all memory allocated via the coherent DMA APIs is backed by
* a struct page
* 2. Passing coherent DMA memory into the streaming APIs is not allowed
* as we will try to flush the memory through a different alias to that
* actually being used (and the flushes are redundant.)
*/
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
{
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
unsigned long pfn = dma_to_pfn(dev, handle);
struct page *page;
int ret;
/* If the PFN is not valid, we do not have a struct page */
if (!pfn_valid(pfn))
return -ENXIO;
page = pfn_to_page(pfn);
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (unlikely(ret))
return ret;

View File

@ -303,7 +303,10 @@ static inline void set_vbar(unsigned long val)
*/
static inline bool security_extensions_enabled(void)
{
return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
/* Check CPUID Identification Scheme before ID_PFR1 read */
if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
return 0;
}
static unsigned long __init setup_vectors_base(void)

View File

@ -266,11 +266,20 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
#endif
if (p) {
if (cur) {
if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
/*
* Probe hit but conditional execution check failed,
* so just skip the instruction and continue as if
* nothing had happened.
* In this case, we can skip recursing check too.
*/
singlestep_skip(p, regs);
} else if (cur) {
/* Kprobe is pending, so we're recursing. */
switch (kcb->kprobe_status) {
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
case KPROBE_HIT_SS:
/* A pre- or post-handler probe got us here. */
kprobes_inc_nmissed_count(p);
save_previous_kprobe(kcb);
@ -279,11 +288,16 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
singlestep(p, regs, kcb);
restore_previous_kprobe(kcb);
break;
case KPROBE_REENTER:
/* A nested probe was hit in FIQ, it is a BUG */
pr_warn("Unrecoverable kprobe detected at %p.\n",
p->addr);
/* fall through */
default:
/* impossible cases */
BUG();
}
} else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
} else {
/* Probe hit and conditional execution check ok. */
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@ -304,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
}
reset_current_kprobe();
}
} else {
/*
* Probe hit but conditional execution check failed,
* so just skip the instruction and continue as if
* nothing had happened.
*/
singlestep_skip(p, regs);
}
} else if (cur) {
/* We probably hit a jprobe. Call its break handler. */
@ -434,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
@ -456,15 +464,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
@ -476,6 +476,33 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
correct_ret_addr = ri->ret_addr;
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {

View File

@ -977,7 +977,10 @@ static void coverage_end(void)
void __naked __kprobes_test_case_start(void)
{
__asm__ __volatile__ (
"stmdb sp!, {r4-r11} \n\t"
"mov r2, sp \n\t"
"bic r3, r2, #7 \n\t"
"mov sp, r3 \n\t"
"stmdb sp!, {r2-r11} \n\t"
"sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
"bic r0, lr, #1 @ r0 = inline data \n\t"
"mov r1, sp \n\t"
@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void)
"movne pc, r0 \n\t"
"mov r0, r4 \n\t"
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
"ldmia sp!, {r4-r11} \n\t"
"ldmia sp!, {r2-r11} \n\t"
"mov sp, r2 \n\t"
"mov pc, r0 \n\t"
);
}
@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void)
"bxne r0 \n\t"
"mov r0, r4 \n\t"
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
"ldmia sp!, {r4-r11} \n\t"
"ldmia sp!, {r2-r11} \n\t"
"mov sp, r2 \n\t"
"bx r0 \n\t"
);
}

View File

@ -42,7 +42,20 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
static const char *fault_name(unsigned int esr);
struct fault_info {
int (*fn)(unsigned long addr, unsigned int esr,
struct pt_regs *regs);
int sig;
int code;
const char *name;
};
static const struct fault_info fault_info[];
static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
{
return fault_info + (esr & 63);
}
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
@ -197,10 +210,12 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
struct pt_regs *regs)
{
struct siginfo si;
const struct fault_info *inf;
if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
inf = esr_to_fault_info(esr);
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
tsk->comm, task_pid_nr(tsk), inf->name, sig,
addr, esr);
show_pte(tsk->mm, addr);
show_regs(regs);
@ -219,14 +234,16 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
const struct fault_info *inf;
/*
* If we are in kernel mode at this point, we have no context to
* handle this fault with.
*/
if (user_mode(regs))
__do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
else
if (user_mode(regs)) {
inf = esr_to_fault_info(esr);
__do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
} else
__do_kernel_fault(mm, addr, esr, regs);
}
@ -488,12 +505,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
return 1;
}
static const struct fault_info {
int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
int sig;
int code;
const char *name;
} fault_info[] = {
static const struct fault_info fault_info[] = {
{ do_bad, SIGBUS, 0, "ttbr address size fault" },
{ do_bad, SIGBUS, 0, "level 1 address size fault" },
{ do_bad, SIGBUS, 0, "level 2 address size fault" },
@ -560,19 +572,13 @@ static const struct fault_info {
{ do_bad, SIGBUS, 0, "unknown 63" },
};
static const char *fault_name(unsigned int esr)
{
const struct fault_info *inf = fault_info + (esr & 63);
return inf->name;
}
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
const struct fault_info *inf = fault_info + (esr & 63);
const struct fault_info *inf = esr_to_fault_info(esr);
struct siginfo info;
if (!inf->fn(addr, esr, regs))

View File

@ -294,10 +294,6 @@ static __init int setup_hugepagesz(char *opt)
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else if (ps == (PAGE_SIZE * CONT_PTES)) {
hugetlb_add_hstate(CONT_PTE_SHIFT);
} else if (ps == (PMD_SIZE * CONT_PMDS)) {
hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
} else {
hugetlb_bad_size();
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@ -306,13 +302,3 @@ static __init int setup_hugepagesz(char *opt)
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
#ifdef CONFIG_ARM64_64K_PAGES
static __init int add_default_hugepagesz(void)
{
if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
hugetlb_add_hstate(CONT_PTE_SHIFT);
return 0;
}
arch_initcall(add_default_hugepagesz);
#endif

View File

@ -0,0 +1,29 @@
#ifndef _ASM_IA64_ASM_PROTOTYPES_H
#define _ASM_IA64_ASM_PROTOTYPES_H
#include <asm/cacheflush.h>
#include <asm/checksum.h>
#include <asm/esi.h>
#include <asm/ftrace.h>
#include <asm/page.h>
#include <asm/pal.h>
#include <asm/string.h>
#include <asm/uaccess.h>
#include <asm/unwind.h>
#include <asm/xor.h>
extern const char ia64_ivt[];
signed int __divsi3(signed int, unsigned int);
signed int __modsi3(signed int, unsigned int);
signed long long __divdi3(signed long long, unsigned long long);
signed long long __moddi3(signed long long, unsigned long long);
unsigned int __udivsi3(unsigned int, unsigned int);
unsigned int __umodsi3(unsigned int, unsigned int);
unsigned long long __udivdi3(unsigned long long, unsigned long long);
unsigned long long __umoddi3(unsigned long long, unsigned long long);
#endif /* _ASM_IA64_ASM_PROTOTYPES_H */

View File

@ -24,25 +24,25 @@ AFLAGS___modsi3.o = -DMODULO
AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO
$(obj)/__divdi3.o: $(src)/idiv64.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__udivdi3.o: $(src)/idiv64.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__moddi3.o: $(src)/idiv64.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__umoddi3.o: $(src)/idiv64.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__divsi3.o: $(src)/idiv32.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__udivsi3.o: $(src)/idiv32.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__modsi3.o: $(src)/idiv32.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)
$(obj)/__umodsi3.o: $(src)/idiv32.S FORCE
$(call if_changed_dep,as_o_S)
$(call if_changed_rule,as_o_S)

View File

@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
#define strlen_user(str) strnlen_user(str, 32767)
extern unsigned long __must_check __copy_user_zeroing(void *to,
const void __user *from,
unsigned long n);
extern unsigned long raw_copy_from_user(void *to, const void __user *from,
unsigned long n);
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n)))
return __copy_user_zeroing(to, from, n);
memset(to, 0, n);
return n;
res = raw_copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
#define __copy_from_user_inatomic __copy_from_user
extern unsigned long __must_check __copy_user(void __user *to,

View File

@ -29,7 +29,6 @@
COPY \
"1:\n" \
" .section .fixup,\"ax\"\n" \
" MOV D1Ar1,#0\n" \
FIXUP \
" MOVT D1Ar1,#HI(1b)\n" \
" JUMP D1Ar1,#LO(1b)\n" \
@ -260,27 +259,31 @@
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #32\n" \
"23:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"SUB %3, %3, #32\n" \
"24:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"25:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"26:\n" \
"SUB %3, %3, #32\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
"25:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"26:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #32\n" \
"27:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"28:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %0, %0, #8\n" \
"29:\n" \
"SUB %3, %3, #32\n" \
"30:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"31:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"32:\n" \
"SUB %0, %0, #8\n" \
"33:\n" \
"SETL [%0++], D0.7, D1.7\n" \
"SUB %3, %3, #32\n" \
"1:" \
@ -312,11 +315,15 @@
" .long 26b,3b\n" \
" .long 27b,3b\n" \
" .long 28b,3b\n" \
" .long 29b,4b\n" \
" .long 29b,3b\n" \
" .long 30b,3b\n" \
" .long 31b,3b\n" \
" .long 32b,3b\n" \
" .long 33b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
: "D1Ar1", "D0Ar2", "memory")
: "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
@ -342,7 +349,7 @@
#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
"AND D0Ar2, D0Ar2, #0x7\n" \
"ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
@ -403,47 +410,55 @@
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
"23:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"24:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
"25:\n" \
"24:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"26:\n" \
"25:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"26:\n" \
"SUB %3, %3, #16\n" \
"27:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"28:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"29:\n" \
"SUB %3, %3, #16\n" \
"30:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"31:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"32:\n" \
"SUB %3, %3, #16\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
"29:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"30:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
"31:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"32:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
"33:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"34:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
"35:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"SUB %3, %3, #16\n" \
"36:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %0, %0, #4\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"37:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"38:\n" \
"SUB %3, %3, #16\n" \
"39:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"40:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"41:\n" \
"SUB %3, %3, #16\n" \
"42:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"43:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"44:\n" \
"SUB %0, %0, #4\n" \
"45:\n" \
"SETD [%0++], D0.7\n" \
"SUB %3, %3, #16\n" \
"1:" \
@ -483,11 +498,19 @@
" .long 34b,3b\n" \
" .long 35b,3b\n" \
" .long 36b,3b\n" \
" .long 37b,4b\n" \
" .long 37b,3b\n" \
" .long 38b,3b\n" \
" .long 39b,3b\n" \
" .long 40b,3b\n" \
" .long 41b,3b\n" \
" .long 42b,3b\n" \
" .long 43b,3b\n" \
" .long 44b,3b\n" \
" .long 45b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
: "D1Ar1", "D0Ar2", "memory")
: "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
@ -513,7 +536,7 @@
#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
"AND D0Ar2, D0Ar2, #0x7\n" \
"ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
if ((unsigned long) src & 1) {
__asm_copy_to_user_1(dst, src, retn);
n--;
if (retn)
return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
while (n > 0) {
__asm_copy_to_user_1(dst, src, retn);
n--;
if (retn)
return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
if (retn)
return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
while (n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
if (retn)
return retn + n;
}
}
@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
if (retn)
return retn + n;
}
}
if (n >= RAPF_MIN_BUF_SIZE) {
@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
if (retn)
return retn + n;
}
}
#endif
@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 16) {
__asm_copy_to_user_16(dst, src, retn);
n -= 16;
if (retn)
return retn + n;
}
while (n >= 4) {
__asm_copy_to_user_4(dst, src, retn);
n -= 4;
if (retn)
return retn + n;
}
switch (n) {
@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
break;
}
/*
* If we get here, retn correctly reflects the number of failing
* bytes.
*/
return retn;
}
EXPORT_SYMBOL(__copy_user);
@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
__asm_copy_user_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"2: SETB [%0++],D1Ar1\n", \
"3: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
"3: ADD %2,%2,#1\n", \
" .long 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"2: SETW [%0++],D1Ar1\n" COPY, \
"3: ADD %2,%2,#2\n" \
" SETW [%0++],D1Ar1\n" FIXUP, \
"3: ADD %2,%2,#2\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \
@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
__asm_copy_from_user_2x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"4: SETB [%0++],D1Ar1\n", \
"5: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
"5: ADD %2,%2,#1\n", \
" .long 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"2: SETD [%0++],D1Ar1\n" COPY, \
"3: ADD %2,%2,#4\n" \
" SETD [%0++],D1Ar1\n" FIXUP, \
"3: ADD %2,%2,#4\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_5(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"4: SETB [%0++],D1Ar1\n", \
"5: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 4b,5b\n")
#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_4x_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"4: SETW [%0++],D1Ar1\n" COPY, \
"5: ADD %2,%2,#2\n" \
" SETW [%0++],D1Ar1\n" FIXUP, \
" .long 4b,5b\n" TENTRY)
#define __asm_copy_from_user_6(to, from, ret) \
__asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_7(to, from, ret) \
__asm_copy_from_user_6x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"6: SETB [%0++],D1Ar1\n", \
"7: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 6b,7b\n")
#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_4x_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"4: SETD [%0++],D1Ar1\n" COPY, \
"5: ADD %2,%2,#4\n" \
" SETD [%0++],D1Ar1\n" FIXUP, \
" .long 4b,5b\n" TENTRY)
#define __asm_copy_from_user_8(to, from, ret) \
__asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_9(to, from, ret) \
__asm_copy_from_user_8x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"6: SETB [%0++],D1Ar1\n", \
"7: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 6b,7b\n")
#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_8x_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"6: SETW [%0++],D1Ar1\n" COPY, \
"7: ADD %2,%2,#2\n" \
" SETW [%0++],D1Ar1\n" FIXUP, \
" .long 6b,7b\n" TENTRY)
#define __asm_copy_from_user_10(to, from, ret) \
__asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_11(to, from, ret) \
__asm_copy_from_user_10x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"8: SETB [%0++],D1Ar1\n", \
"9: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 8b,9b\n")
#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_8x_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"6: SETD [%0++],D1Ar1\n" COPY, \
"7: ADD %2,%2,#4\n" \
" SETD [%0++],D1Ar1\n" FIXUP, \
" .long 6b,7b\n" TENTRY)
#define __asm_copy_from_user_12(to, from, ret) \
__asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_13(to, from, ret) \
__asm_copy_from_user_12x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"8: SETB [%0++],D1Ar1\n", \
"9: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 8b,9b\n")
#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_12x_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"8: SETW [%0++],D1Ar1\n" COPY, \
"9: ADD %2,%2,#2\n" \
" SETW [%0++],D1Ar1\n" FIXUP, \
" .long 8b,9b\n" TENTRY)
#define __asm_copy_from_user_14(to, from, ret) \
__asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_15(to, from, ret) \
__asm_copy_from_user_14x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"10: SETB [%0++],D1Ar1\n", \
"11: ADD %2,%2,#1\n" \
" SETB [%0++],D1Ar1\n", \
" .long 10b,11b\n")
#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_from_user_12x_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"8: SETD [%0++],D1Ar1\n" COPY, \
"9: ADD %2,%2,#4\n" \
" SETD [%0++],D1Ar1\n" FIXUP, \
" .long 8b,9b\n" TENTRY)
#define __asm_copy_from_user_16(to, from, ret) \
__asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_8x64(to, from, ret) \
asm volatile ( \
" GETL D0Ar2,D1Ar1,[%1++]\n" \
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
"1:\n" \
" .section .fixup,\"ax\"\n" \
" MOV D1Ar1,#0\n" \
" MOV D0Ar2,#0\n" \
"3: ADD %2,%2,#8\n" \
" SETL [%0++],D0Ar2,D1Ar1\n" \
" MOVT D0Ar2,#HI(1b)\n" \
" JUMP D0Ar2,#LO(1b)\n" \
" .previous\n" \
@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
* source. Since the fault is at a single address, we only
* need to rewind by 8 bytes.
* source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
* corrected.
* corrected, but the source must be rewound to the beginning of
* the block, which is LSM_STEP*8 bytes.
* LSM_STEP is bits 10:8 in TXSTATUS which is already read
* and stored in D0Ar2
*
* NOTE: If a fault occurs at the last operation in M{G,S}ETL
* LSM_STEP will be 0. ie: we do 4 writes in our case, if
* a fault happens at the 4th write, LSM_STEP will be 0
* instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
"SUB %1, %1, #8\n")
"LSR D0Ar2, D0Ar2, #5\n" \
"ANDS D0Ar2, D0Ar2, #0x38\n" \
"ADDZ D0Ar2, D0Ar2, #32\n" \
"SUB %1, %1, D0Ar2\n")
/* rewind 'from' pointer when a fault occurs
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
* source. Since the fault is at a single address, we only
* need to rewind by 4 bytes.
* source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
* corrected.
* corrected, but the source must be rewound to the beginning of
* the block, which is LSM_STEP*4 bytes.
* LSM_STEP is bits 10:8 in TXSTATUS which is already read
* and stored in D0Ar2
*
* NOTE: If a fault occurs at the last operation in M{G,S}ETL
* LSM_STEP will be 0. ie: we do 4 writes in our case, if
* a fault happens at the 4th write, LSM_STEP will be 0
* instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
"SUB %1, %1, #4\n")
"LSR D0Ar2, D0Ar2, #6\n" \
"ANDS D0Ar2, D0Ar2, #0x1c\n" \
"ADDZ D0Ar2, D0Ar2, #16\n" \
"SUB %1, %1, D0Ar2\n")
/* Copy from user to kernel, zeroing the bytes that were inaccessible in
userland. The return-value is the number of bytes that were
inaccessible. */
unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
unsigned long n)
/*
* Copy from user to kernel. The return-value is the number of bytes that were
* inaccessible.
*/
unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
unsigned long n)
{
register char *dst asm ("A0.2") = pdst;
register const char __user *src asm ("A1.2") = psrc;
@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
if ((unsigned long) src & 1) {
__asm_copy_from_user_1(dst, src, retn);
n--;
if (retn)
return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_1(dst, src, retn);
n--;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
if (retn)
return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
}
/* We only need one check after the unalignment-adjustments,
because if both adjustments were done, either both or
neither reference had an exception. */
if (retn != 0)
goto copy_exception_bytes;
#ifdef USE_RAPF
/* 64 bit copy loop */
if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
}
@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
}
#endif
@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
n -= 4;
if (retn)
goto copy_exception_bytes;
return retn + n;
}
/* If we get here, there were no memory read faults. */
@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
/* If we get here, retn correctly reflects the number of failing
bytes. */
return retn;
copy_exception_bytes:
/* We already have "retn" bytes cleared, and need to clear the
remaining "n" bytes. A non-optimized simple byte-for-byte in-line
memset is preferred here, since this isn't speed-critical code and
we'd rather have this a leaf-function than calling memset. */
{
char *endp;
for (endp = dst + n; dst < endp; dst++)
*dst = 0;
}
return retn + n;
}
EXPORT_SYMBOL(__copy_user_zeroing);
EXPORT_SYMBOL(raw_copy_from_user);
#define __asm_clear_8x64(to, ret) \
asm volatile ( \

View File

@ -1531,7 +1531,7 @@ config CPU_MIPS64_R6
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_MSA
select GENERIC_CSUM
select MIPS_O32_FP64_SUPPORT if MIPS32_O32
select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
select HAVE_KVM
help
Choose this option to build a kernel for release 6 or later of the

View File

@ -21,6 +21,7 @@
#include <asm/cpu-features.h>
#include <asm/fpu_emulator.h>
#include <asm/hazards.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/current.h>
#include <asm/msa.h>

View File

@ -18,9 +18,24 @@
#include <irq.h>
#define IRQ_STACK_SIZE THREAD_SIZE
#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
extern void *irq_stack[NR_CPUS];
/*
* The highest address on the IRQ stack contains a dummy frame put down in
* genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
*
* top ------------
* | task sp | <- irq_stack[cpu] + IRQ_STACK_START
* ------------
* | | <- First frame of IRQ context
* ------------
*
* task sp holds a copy of the task stack pointer where the struct pt_regs
* from exception entry can be found.
*/
static inline bool on_irq_stack(int cpu, unsigned long sp)
{
unsigned long low = (unsigned long)irq_stack[cpu];

View File

@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
" andi %[ticket], %[ticket], 0xffff \n"
" bne %[ticket], %[my_ticket], 4f \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
"2: \n"
"2: .insn \n"
" .subsection 2 \n"
"4: andi %[ticket], %[ticket], 0xffff \n"
" sll %[ticket], 5 \n"
@ -202,7 +202,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
" sc %[ticket], %[ticket_ptr] \n"
" beqz %[ticket], 1b \n"
" li %[ticket], 1 \n"
"2: \n"
"2: .insn \n"
" .subsection 2 \n"
"3: b 2b \n"
" li %[ticket], 0 \n"
@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
" .set reorder \n"
__WEAK_LLSC_MB
" li %2, 1 \n"
"2: \n"
"2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
@ -422,7 +422,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" li %2, 1 \n"
"2: \n"
"2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
"=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)

View File

@ -386,17 +386,18 @@
#define __NR_pkey_mprotect (__NR_Linux + 363)
#define __NR_pkey_alloc (__NR_Linux + 364)
#define __NR_pkey_free (__NR_Linux + 365)
#define __NR_statx (__NR_Linux + 366)
/*
* Offset of the last Linux o32 flavoured syscall
*/
#define __NR_Linux_syscalls 365
#define __NR_Linux_syscalls 366
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
#define __NR_O32_Linux_syscalls 365
#define __NR_O32_Linux_syscalls 366
#if _MIPS_SIM == _MIPS_SIM_ABI64
@ -730,16 +731,17 @@
#define __NR_pkey_mprotect (__NR_Linux + 323)
#define __NR_pkey_alloc (__NR_Linux + 324)
#define __NR_pkey_free (__NR_Linux + 325)
#define __NR_statx (__NR_Linux + 326)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
#define __NR_Linux_syscalls 325
#define __NR_Linux_syscalls 326
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
#define __NR_64_Linux_syscalls 325
#define __NR_64_Linux_syscalls 326
#if _MIPS_SIM == _MIPS_SIM_NABI32
@ -1077,15 +1079,16 @@
#define __NR_pkey_mprotect (__NR_Linux + 327)
#define __NR_pkey_alloc (__NR_Linux + 328)
#define __NR_pkey_free (__NR_Linux + 329)
#define __NR_statx (__NR_Linux + 330)
/*
* Offset of the last N32 flavoured syscall
*/
#define __NR_Linux_syscalls 329
#define __NR_Linux_syscalls 330
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
#define __NR_N32_Linux_syscalls 329
#define __NR_N32_Linux_syscalls 330
#endif /* _UAPI_ASM_UNISTD_H */

View File

@ -102,6 +102,7 @@ void output_thread_info_defines(void)
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
BLANK();
}

View File

@ -361,7 +361,7 @@ LEAF(mips_cps_get_bootcfg)
END(mips_cps_get_bootcfg)
LEAF(mips_cps_boot_vpes)
PTR_L ta2, COREBOOTCFG_VPEMASK(a0)
lw ta2, COREBOOTCFG_VPEMASK(a0)
PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
#if defined(CONFIG_CPU_MIPSR6)

View File

@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
}
decode_configs(c);
c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
break;
default:

View File

@ -215,9 +215,11 @@ NESTED(handle_int, PT_SIZE, sp)
beq t0, t1, 2f
/* Switch to IRQ stack */
li t1, _IRQ_STACK_SIZE
li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
/* Save task's sp on IRQ stack so that unwinding can follow it */
LONG_S s1, 0(sp)
2:
jal plat_irq_dispatch
@ -325,9 +327,11 @@ NESTED(except_vec_vi_handler, 0, sp)
beq t0, t1, 2f
/* Switch to IRQ stack */
li t1, _IRQ_STACK_SIZE
li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
/* Save task's sp on IRQ stack so that unwinding can follow it */
LONG_S s1, 0(sp)
2:
jalr v0
@ -519,7 +523,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER reserved reserved sti verbose /* others */
.align 5
LEAF(handle_ri_rdhwr_vivt)
LEAF(handle_ri_rdhwr_tlbp)
.set push
.set noat
.set noreorder
@ -538,7 +542,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set pop
bltz k1, handle_ri /* slow path */
/* fall thru */
END(handle_ri_rdhwr_vivt)
END(handle_ri_rdhwr_tlbp)
LEAF(handle_ri_rdhwr)
.set push

View File

@ -488,31 +488,52 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
unsigned long pc,
unsigned long *ra)
{
unsigned long low, high, irq_stack_high;
struct mips_frame_info info;
unsigned long size, ofs;
struct pt_regs *regs;
int leaf;
extern void ret_from_irq(void);
extern void ret_from_exception(void);
if (!stack_page)
return 0;
/*
* If we reached the bottom of interrupt context,
* return saved pc in pt_regs.
* IRQ stacks start at IRQ_STACK_START
* task stacks at THREAD_SIZE - 32
*/
if (pc == (unsigned long)ret_from_irq ||
pc == (unsigned long)ret_from_exception) {
struct pt_regs *regs;
if (*sp >= stack_page &&
*sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
regs = (struct pt_regs *)*sp;
pc = regs->cp0_epc;
if (!user_mode(regs) && __kernel_text_address(pc)) {
*sp = regs->regs[29];
*ra = regs->regs[31];
return pc;
}
low = stack_page;
if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
high = stack_page + IRQ_STACK_START;
irq_stack_high = high;
} else {
high = stack_page + THREAD_SIZE - 32;
irq_stack_high = 0;
}
/*
* If we reached the top of the interrupt stack, start unwinding
* the interrupted task stack.
*/
if (unlikely(*sp == irq_stack_high)) {
unsigned long task_sp = *(unsigned long *)*sp;
/*
* Check that the pointer saved in the IRQ stack head points to
* something within the stack of the current task
*/
if (!object_is_on_stack((void *)task_sp))
return 0;
/*
* Follow pointer to tasks kernel stack frame where interrupted
* state was saved.
*/
regs = (struct pt_regs *)task_sp;
pc = regs->cp0_epc;
if (!user_mode(regs) && __kernel_text_address(pc)) {
*sp = regs->regs[29];
*ra = regs->regs[31];
return pc;
}
return 0;
}
@ -533,8 +554,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
if (leaf < 0)
return 0;
if (*sp < stack_page ||
*sp + info.frame_size > stack_page + THREAD_SIZE - 32)
if (*sp < low || *sp + info.frame_size > high)
return 0;
if (leaf)

View File

@ -600,3 +600,4 @@ EXPORT(sys_call_table)
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free /* 4365 */
PTR sys_statx

View File

@ -438,4 +438,5 @@ EXPORT(sys_call_table)
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free /* 5325 */
PTR sys_statx
.size sys_call_table,.-sys_call_table

View File

@ -433,4 +433,5 @@ EXPORT(sysn32_call_table)
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free
PTR sys_statx /* 6330 */
.size sysn32_call_table,.-sysn32_call_table

View File

@ -588,4 +588,5 @@ EXPORT(sys32_call_table)
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free /* 4365 */
PTR sys_statx
.size sys32_call_table,.-sys32_call_table

View File

@ -83,7 +83,7 @@ extern asmlinkage void handle_dbe(void);
extern asmlinkage void handle_sys(void);
extern asmlinkage void handle_bp(void);
extern asmlinkage void handle_ri(void);
extern asmlinkage void handle_ri_rdhwr_vivt(void);
extern asmlinkage void handle_ri_rdhwr_tlbp(void);
extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
@ -2408,9 +2408,18 @@ void __init trap_init(void)
set_except_vector(EXCCODE_SYS, handle_sys);
set_except_vector(EXCCODE_BP, handle_bp);
set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
(cpu_has_vtag_icache ?
handle_ri_rdhwr_vivt : handle_ri_rdhwr));
if (rdhwr_noopt)
set_except_vector(EXCCODE_RI, handle_ri);
else {
if (cpu_has_vtag_icache)
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
else if (current_cpu_type() == CPU_LOONGSON3)
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
else
set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
}
set_except_vector(EXCCODE_CPU, handle_cpu);
set_except_vector(EXCCODE_OV, handle_ov);
set_except_vector(EXCCODE_TR, handle_tr);

View File

@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
if (!np_xbar)
panic("Failed to load xbar nodes from devicetree");
if (of_address_to_resource(np_pmu, 0, &res_xbar))
if (of_address_to_resource(np_xbar, 0, &res_xbar))
panic("Failed to get xbar resources");
if (!request_mem_region(res_xbar.start, resource_size(&res_xbar),
res_xbar.name))

View File

@ -1562,6 +1562,7 @@ static void probe_vcache(void)
vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
c->vcache.waybit = 0;
c->vcache.waysize = vcache_size / c->vcache.ways;
pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
@ -1664,6 +1665,7 @@ static void __init loongson3_sc_init(void)
/* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
scache_size *= 4;
c->scache.waybit = 0;
c->scache.waysize = scache_size / c->scache.ways;
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
if (scache_size)

View File

@ -760,7 +760,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
struct uasm_label **l,
unsigned int pte,
unsigned int ptr)
unsigned int ptr,
unsigned int flush)
{
#ifdef CONFIG_SMP
UASM_i_SC(p, pte, 0, ptr);
@ -769,6 +770,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
#else
UASM_i_SW(p, pte, 0, ptr);
#endif
if (cpu_has_ftlb && flush) {
BUG_ON(!cpu_has_tlbinv);
UASM_i_MFC0(p, ptr, C0_ENTRYHI);
uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
UASM_i_MTC0(p, ptr, C0_ENTRYHI);
build_tlb_write_entry(p, l, r, tlb_indexed);
uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
UASM_i_MTC0(p, ptr, C0_ENTRYHI);
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
return;
}
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
}
@ -2199,7 +2216,7 @@ static void build_r4000_tlb_load_handler(void)
uasm_l_tlbl_goaround2(&l, p);
}
uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbl(&l, p);
@ -2254,7 +2271,7 @@ static void build_r4000_tlb_store_handler(void)
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbs(&l, p);
@ -2310,7 +2327,7 @@ static void build_r4000_tlb_modify_handler(void)
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
#endif
uasm_l_nopage_tlbm(&l, p);

View File

@ -35,7 +35,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
static struct rt2880_pmx_func pci_func[] = {
FUNC("pci-dev", 0, 40, 32),
FUNC("pci-host2", 1, 40, 32),
@ -43,7 +43,7 @@ static struct rt2880_pmx_func pci_func[] = {
FUNC("pci-fnc", 3, 40, 32)
};
static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
static struct rt2880_pmx_group rt3883_pinmux_data[] = {
GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),

View File

@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
}
if (len & ~VMX_ALIGN_MASK) {
preempt_disable();
pagefault_disable();
enable_kernel_altivec();
crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
disable_kernel_altivec();
pagefault_enable();
preempt_enable();
}
tail = len & VMX_ALIGN_MASK;

View File

@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs)
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
/* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
nb = 8;
flags = LD+SW;
} else if (IS_XFORM(instruction) &&
((instruction >> 1) & 0x3ff) == 660) {
nb = 8;
flags = ST+SW;
/*
* Handle some cases which give overlaps in the DSISR values.
*/
if (IS_XFORM(instruction)) {
switch (get_xop(instruction)) {
case 532: /* ldbrx */
nb = 8;
flags = LD+SW;
break;
case 660: /* stdbrx */
nb = 8;
flags = ST+SW;
break;
case 20: /* lwarx */
case 84: /* ldarx */
case 116: /* lharx */
case 276: /* lqarx */
return 0; /* not emulated ever */
}
}
/* Byteswap little endian loads and stores */

View File

@ -67,7 +67,7 @@ PPC64_CACHES:
* flush all bytes from start through stop-1 inclusive
*/
_GLOBAL(flush_icache_range)
_GLOBAL_TOC(flush_icache_range)
BEGIN_FTR_SECTION
PURGE_PREFETCHED_INS
blr
@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range)
*
* flush all bytes from start to stop-1 inclusive
*/
_GLOBAL(flush_dcache_range)
_GLOBAL_TOC(flush_dcache_range)
/*
* Flush the data cache to memory

View File

@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void)
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
}
/*
* Fixup HFSCR:TM based on CPU features. The bit is set by our
* early asm init because at that point we haven't updated our
* CPU features from firmware and device-tree. Here we have,
* so let's do it.
*/
if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
/* Set IR and DR in PACA MSR */
get_paca()->kernel_msr = MSR_KERNEL;
}

View File

@ -1487,6 +1487,10 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
/* start new resize */
resize = kzalloc(sizeof(*resize), GFP_KERNEL);
if (!resize) {
ret = -ENOMEM;
goto out;
}
resize->order = shift;
resize->kvm = kvm;
INIT_WORK(&resize->work, resize_hpt_prepare_work);

View File

@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local)
unsigned long psize = batch->psize;
int ssize = batch->ssize;
int i;
unsigned int use_local;
use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
local_irq_save(flags);
@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local)
} pte_iterate_hashed_end();
}
if (mmu_has_feature(MMU_FTR_TLBIEL) &&
mmu_psize_defs[psize].tlbiel && local) {
if (use_local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
vpn = batch->vpn[i];

View File

@ -168,8 +168,7 @@ union page_table_entry {
unsigned long z : 1; /* Zero Bit */
unsigned long i : 1; /* Page-Invalid Bit */
unsigned long p : 1; /* DAT-Protection Bit */
unsigned long co : 1; /* Change-Recording Override */
unsigned long : 8;
unsigned long : 9;
};
};
@ -745,8 +744,6 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
return PGM_PAGE_TRANSLATION;
if (pte.z)
return PGM_TRANSLATION_SPEC;
if (pte.co && !edat1)
return PGM_TRANSLATION_SPEC;
dat_protection |= pte.p;
raddr.pfra = pte.pfra;
real_address:
@ -1182,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
if (!rc && pte.i)
rc = PGM_PAGE_TRANSLATION;
if (!rc && (pte.z || (pte.co && sg->edat_level < 1)))
if (!rc && pte.z)
rc = PGM_TRANSLATION_SPEC;
shadow_page:
pte.p |= dat_protection;

View File

@ -17,6 +17,7 @@
#define HPAGE_SHIFT 23
#define REAL_HPAGE_SHIFT 22
#define HPAGE_2GB_SHIFT 31
#define HPAGE_256MB_SHIFT 28
#define HPAGE_64K_SHIFT 16
#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
@ -27,7 +28,7 @@
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
#define HUGE_MAX_HSTATE 3
#define HUGE_MAX_HSTATE 4
#endif
#ifndef __ASSEMBLY__

View File

@ -679,6 +679,14 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return pte_pfn(pte);
}
#define __HAVE_ARCH_PMD_WRITE
static inline unsigned long pmd_write(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
return pte_write(pte);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline unsigned long pmd_dirty(pmd_t pmd)
{
@ -694,13 +702,6 @@ static inline unsigned long pmd_young(pmd_t pmd)
return pte_young(pte);
}
static inline unsigned long pmd_write(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
return pte_write(pte);
}
static inline unsigned long pmd_trans_huge(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));

View File

@ -18,12 +18,6 @@
#include <asm/signal.h>
#include <asm/page.h>
/*
* The sparc has no problems with write protection
*/
#define wp_works_ok 1
#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
* That one page is used to protect kernel from intruders, so that
* we can make our access_ok test faster

View File

@ -18,10 +18,6 @@
#include <asm/ptrace.h>
#include <asm/page.h>
/* The sparc has no problems with write protection */
#define wp_works_ok 1
#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
/*
* User lives in his very own context, and cannot reference us. Note
* that TASK_SIZE is a misnomer, it really gives maximum user virtual

View File

@ -96,6 +96,7 @@ sparc64_boot:
andn %g1, PSTATE_AM, %g1
wrpr %g1, 0x0, %pstate
ba,a,pt %xcc, 1f
nop
.globl prom_finddev_name, prom_chosen_path, prom_root_node
.globl prom_getprop_name, prom_mmu_name, prom_peer_name
@ -613,6 +614,7 @@ niagara_tlb_fixup:
nop
ba,a,pt %xcc, 80f
nop
niagara4_patch:
call niagara4_patch_copyops
nop
@ -622,6 +624,7 @@ niagara4_patch:
nop
ba,a,pt %xcc, 80f
nop
niagara2_patch:
call niagara2_patch_copyops
@ -632,6 +635,7 @@ niagara2_patch:
nop
ba,a,pt %xcc, 80f
nop
niagara_patch:
call niagara_patch_copyops

View File

@ -82,6 +82,7 @@ do_stdfmna:
call handle_stdfmna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
nop
.size do_stdfmna,.-do_stdfmna
.type breakpoint_trap,#function

View File

@ -237,6 +237,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
bne,pt %xcc, user_rtt_fill_32bit
wrpr %g1, %cwp
ba,a,pt %xcc, user_rtt_fill_64bit
nop
user_rtt_fill_fixup_dax:
ba,pt %xcc, user_rtt_fill_fixup_common

View File

@ -86,6 +86,7 @@ __spitfire_cee_trap_continue:
rd %pc, %g7
ba,a,pt %xcc, 2f
nop
1: ba,pt %xcc, etrap_irq
rd %pc, %g7

View File

@ -352,6 +352,7 @@ sun4v_mna:
call sun4v_do_mna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
nop
/* Privileged Action. */
sun4v_privact:

View File

@ -92,6 +92,7 @@ user_rtt_fill_fixup_common:
call sun4v_data_access_exception
nop
ba,a,pt %xcc, rtrap
nop
1: call spitfire_data_access_exception
nop

View File

@ -152,6 +152,8 @@ fill_fixup_dax:
call sun4v_data_access_exception
nop
ba,a,pt %xcc, rtrap
nop
1: call spitfire_data_access_exception
nop
ba,a,pt %xcc, rtrap
nop

View File

@ -326,11 +326,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
blu 170f
nop
ba,a,pt %xcc, 180f
nop
4: /* 32 <= low bits < 48 */
blu 150f
nop
ba,a,pt %xcc, 160f
nop
5: /* 0 < low bits < 32 */
blu,a 6f
cmp %g2, 8
@ -338,6 +340,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
blu 130f
nop
ba,a,pt %xcc, 140f
nop
6: /* 0 < low bits < 16 */
bgeu 120f
nop
@ -475,6 +478,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %o2, 85f
sub %o0, %o1, GLOBAL_SPARE
ba,a,pt %XCC, 90f
nop
.align 64
75: /* 16 < len <= 64 */

View File

@ -530,4 +530,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
bne,pt %icc, 1b
EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
ba,a,pt %icc, .Lexit
nop
.size FUNC_NAME, .-FUNC_NAME

View File

@ -102,4 +102,5 @@ NG4bzero:
bne,pt %icc, 1b
add %o0, 0x30, %o0
ba,a,pt %icc, .Lpostloop
nop
.size NG4bzero,.-NG4bzero

View File

@ -394,6 +394,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
brz,pt %i2, 85f
sub %o0, %i1, %i3
ba,a,pt %XCC, 90f
nop
.align 64
70: /* 16 < len <= 64 */

View File

@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
switch (shift) {
case HPAGE_2GB_SHIFT:
hugepage_size = _PAGE_SZ2GB_4V;
pte_val(entry) |= _PAGE_PMD_HUGE;
break;
case HPAGE_256MB_SHIFT:
hugepage_size = _PAGE_SZ256MB_4V;
pte_val(entry) |= _PAGE_PMD_HUGE;
@ -183,6 +187,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
unsigned int shift;
switch (tte_szbits) {
case _PAGE_SZ2GB_4V:
shift = HPAGE_2GB_SHIFT;
break;
case _PAGE_SZ256MB_4V:
shift = HPAGE_256MB_SHIFT;
break;
@ -261,7 +268,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
if (!pmd)
return NULL;
if (sz == PMD_SHIFT)
if (sz >= PMD_SIZE)
pte = (pte_t *)pmd;
else
pte = pte_alloc_map(mm, pmd, addr);

View File

@ -337,6 +337,10 @@ static int __init setup_hugepagesz(char *string)
hugepage_shift = ilog2(hugepage_size);
switch (hugepage_shift) {
case HPAGE_2GB_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_2GB;
hv_pgsz_idx = HV_PGSZ_IDX_2GB;
break;
case HPAGE_256MB_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_256MB;
hv_pgsz_idx = HV_PGSZ_IDX_256MB;
@ -1563,7 +1567,7 @@ bool kern_addr_valid(unsigned long addr)
if ((long)addr < 0L) {
unsigned long pa = __pa(addr);
if ((addr >> max_phys_bits) != 0UL)
if ((pa >> max_phys_bits) != 0UL)
return false;
return pfn_valid(pa >> PAGE_SHIFT);

View File

@ -54,6 +54,7 @@
enum mbus_module srmmu_modtype;
static unsigned int hwbug_bitmask;
int vac_cache_size;
EXPORT_SYMBOL(vac_cache_size);
int vac_line_size;
extern struct resource sparc_iomap;

View File

@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
if (pte_val(*pte) & _PAGE_VALID) {
bool exec = pte_exec(*pte);
tlb_batch_add_one(mm, vaddr, exec, false);
tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
}
pte++;
vaddr += PAGE_SIZE;
@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pte_t orig_pte = __pte(pmd_val(orig));
bool exec = pte_exec(orig_pte);
tlb_batch_add_one(mm, addr, exec, true);
tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
true);
REAL_HPAGE_SHIFT);
} else {
tlb_batch_pmd_scan(mm, addr, orig);
}

View File

@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_lock_irqsave(&mm->context.lock, flags);
if (tb->hugepage_shift < HPAGE_SHIFT) {
if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
spin_lock_irqsave(&mm->context.lock, flags);
if (hugepage_shift < HPAGE_SHIFT) {
if (hugepage_shift < REAL_HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)

View File

@ -30,8 +30,10 @@ static int __init vdso32_setup(char *s)
{
vdso32_enabled = simple_strtoul(s, NULL, 0);
if (vdso32_enabled > 1)
if (vdso32_enabled > 1) {
pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
vdso32_enabled = 0;
}
return 1;
}
@ -62,13 +64,18 @@ subsys_initcall(sysenter_setup);
/* Register vsyscall32 into the ABI table */
#include <linux/sysctl.h>
static const int zero;
static const int one = 1;
static struct ctl_table abi_table2[] = {
{
.procname = "vsyscall32",
.data = &vdso32_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
.proc_handler = proc_dointvec_minmax,
.extra1 = (int *)&zero,
.extra2 = (int *)&one,
},
{}
};

View File

@ -507,6 +507,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
cpuc->lbr_entries[i].to = msr_lastbranch.to;
cpuc->lbr_entries[i].mispred = 0;
cpuc->lbr_entries[i].predicted = 0;
cpuc->lbr_entries[i].in_tx = 0;
cpuc->lbr_entries[i].abort = 0;
cpuc->lbr_entries[i].cycles = 0;
cpuc->lbr_entries[i].reserved = 0;
}
cpuc->lbr_stack.nr = i;

View File

@ -287,7 +287,7 @@ struct task_struct;
#define ARCH_DLINFO_IA32 \
do { \
if (vdso32_enabled) { \
if (VDSO_CURRENT_BASE) { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
} \

View File

@ -200,11 +200,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
}
out:
rdtgroup_kn_unlock(of->kn);
for_each_enabled_rdt_resource(r) {
kfree(r->tmp_cbms);
r->tmp_cbms = NULL;
}
rdtgroup_kn_unlock(of->kn);
return ret ?: nbytes;
}

View File

@ -846,7 +846,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
me->comm, me->pid, where, frame,
regs->ip, regs->sp, regs->orig_ax);
print_vma_addr(" in ", regs->ip);
print_vma_addr(KERN_CONT " in ", regs->ip);
pr_cont("\n");
}

View File

@ -151,8 +151,8 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from,
if (from->si_signo == SIGSEGV) {
if (from->si_code == SEGV_BNDERR) {
compat_uptr_t lower = (unsigned long)&to->si_lower;
compat_uptr_t upper = (unsigned long)&to->si_upper;
compat_uptr_t lower = (unsigned long)from->si_lower;
compat_uptr_t upper = (unsigned long)from->si_upper;
put_user_ex(lower, &to->si_lower);
put_user_ex(upper, &to->si_upper);
}

View File

@ -255,7 +255,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
tsk->comm, tsk->pid, str,
regs->ip, regs->sp, error_code);
print_vma_addr(" in ", regs->ip);
print_vma_addr(KERN_CONT " in ", regs->ip);
pr_cont("\n");
}
@ -519,7 +519,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
tsk->comm, task_pid_nr(tsk),
regs->ip, regs->sp, error_code);
print_vma_addr(" in ", regs->ip);
print_vma_addr(KERN_CONT " in ", regs->ip);
pr_cont("\n");
}

View File

@ -8198,6 +8198,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
case EXIT_REASON_PREEMPTION_TIMER:
return false;
case EXIT_REASON_PML_FULL:
/* We don't expose PML support to L1. */
return false;
default:
return true;
}
@ -10267,6 +10270,18 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
}
if (enable_pml) {
/*
* Conceptually we want to copy the PML address and index from
* vmcs01 here, and then back to vmcs01 on nested vmexit. But,
* since we always flush the log on each vmexit, this happens
* to be equivalent to simply resetting the fields in vmcs02.
*/
ASSERT(vmx->pml_pg);
vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
}
if (nested_cpu_has_ept(vmcs12)) {
kvm_mmu_unload(vcpu);
nested_ept_init_mmu_context(vcpu);

View File

@ -643,21 +643,40 @@ void __init init_mem_mapping(void)
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number.
*
*
* On x86, access has to be given to the first megabyte of ram because that area
* contains BIOS code and data regions used by X and dosemu and similar apps.
* Access has to be given to non-kernel-ram areas as well, these contain the PCI
* mmio resources as well as potential bios/acpi data regions.
* On x86, access has to be given to the first megabyte of RAM because that
* area traditionally contains BIOS code and data regions used by X, dosemu,
* and similar apps. Since they map the entire memory range, the whole range
* must be allowed (for mapping), but any areas that would otherwise be
* disallowed are flagged as being "zero filled" instead of rejected.
* Access has to be given to non-kernel-ram areas as well, these contain the
* PCI mmio resources as well as potential bios/acpi data regions.
*/
int devmem_is_allowed(unsigned long pagenr)
{
if (pagenr < 256)
return 1;
if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
if (page_is_ram(pagenr)) {
/*
* For disallowed memory regions in the low 1MB range,
* request that the page be shown as all zeros.
*/
if (pagenr < 256)
return 2;
return 0;
if (!page_is_ram(pagenr))
return 1;
return 0;
}
/*
* This must follow RAM test, since System RAM is considered a
* restricted resource under CONFIG_STRICT_IOMEM.
*/
if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
/* Low 1MB bypasses iomem restrictions. */
if (pagenr < 256)
return 1;
return 0;
}
return 1;
}
void free_init_pages(char *what, unsigned long begin, unsigned long end)

View File

@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
return;
}
/* No need to reserve regions that will never be freed. */
if (md.attribute & EFI_MEMORY_RUNTIME)
return;
size += addr % EFI_PAGE_SIZE;
size = round_up(size, EFI_PAGE_SIZE);
addr = round_down(addr, EFI_PAGE_SIZE);

View File

@ -171,7 +171,8 @@ void blk_mq_sched_put_request(struct request *rq)
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
struct elevator_queue *e = hctx->queue->elevator;
struct request_queue *q = hctx->queue;
struct elevator_queue *e = q->elevator;
const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
bool did_work = false;
LIST_HEAD(rq_list);
@ -203,10 +204,10 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
*/
if (!list_empty(&rq_list)) {
blk_mq_sched_mark_restart_hctx(hctx);
did_work = blk_mq_dispatch_rq_list(hctx, &rq_list);
did_work = blk_mq_dispatch_rq_list(q, &rq_list);
} else if (!has_sched_dispatch) {
blk_mq_flush_busy_ctxs(hctx, &rq_list);
blk_mq_dispatch_rq_list(hctx, &rq_list);
blk_mq_dispatch_rq_list(q, &rq_list);
}
/*
@ -222,7 +223,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
if (!rq)
break;
list_add(&rq->queuelist, &rq_list);
} while (blk_mq_dispatch_rq_list(hctx, &rq_list));
} while (blk_mq_dispatch_rq_list(q, &rq_list));
}
}
@ -317,25 +318,68 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
return true;
}
static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
if (blk_mq_hctx_has_pending(hctx))
if (blk_mq_hctx_has_pending(hctx)) {
blk_mq_run_hw_queue(hctx, true);
return true;
}
}
return false;
}
void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
unsigned int i;
/**
* list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
* @pos: loop cursor.
* @skip: the list element that will not be examined. Iteration starts at
* @skip->next.
* @head: head of the list to examine. This list must have at least one
* element, namely @skip.
* @member: name of the list_head structure within typeof(*pos).
*/
#define list_for_each_entry_rcu_rr(pos, skip, head, member) \
for ((pos) = (skip); \
(pos = (pos)->member.next != (head) ? list_entry_rcu( \
(pos)->member.next, typeof(*pos), member) : \
list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \
(pos) != (skip); )
if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) {
if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) {
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_sched_restart_hctx(hctx);
/*
* Called after a driver tag has been freed to check whether a hctx needs to
* be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware
* queues in a round-robin fashion if the tag set of @hctx is shared with other
* hardware queues.
*/
void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
{
struct blk_mq_tags *const tags = hctx->tags;
struct blk_mq_tag_set *const set = hctx->queue->tag_set;
struct request_queue *const queue = hctx->queue, *q;
struct blk_mq_hw_ctx *hctx2;
unsigned int i, j;
if (set->flags & BLK_MQ_F_TAG_SHARED) {
rcu_read_lock();
list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
tag_set_list) {
queue_for_each_hw_ctx(q, hctx2, i)
if (hctx2->tags == tags &&
blk_mq_sched_restart_hctx(hctx2))
goto done;
}
j = hctx->queue_num + 1;
for (i = 0; i < queue->nr_hw_queues; i++, j++) {
if (j == queue->nr_hw_queues)
j = 0;
hctx2 = queue->queue_hw_ctx[j];
if (hctx2->tags == tags &&
blk_mq_sched_restart_hctx(hctx2))
break;
}
done:
rcu_read_unlock();
} else {
blk_mq_sched_restart_hctx(hctx);
}
@ -431,54 +475,26 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
}
}
int blk_mq_sched_setup(struct request_queue *q)
static int blk_mq_sched_alloc_tags(struct request_queue *q,
struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
{
struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_hw_ctx *hctx;
int ret, i;
int ret;
/*
* Default to 256, since we don't split into sync/async like the
* old code did. Additionally, this is a per-hw queue depth.
*/
q->nr_requests = 2 * BLKDEV_MAX_RQ;
hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
set->reserved_tags);
if (!hctx->sched_tags)
return -ENOMEM;
/*
* We're switching to using an IO scheduler, so setup the hctx
* scheduler tags and switch the request map from the regular
* tags to scheduler tags. First allocate what we need, so we
* can safely fail and fallback, if needed.
*/
ret = 0;
queue_for_each_hw_ctx(q, hctx, i) {
hctx->sched_tags = blk_mq_alloc_rq_map(set, i,
q->nr_requests, set->reserved_tags);
if (!hctx->sched_tags) {
ret = -ENOMEM;
break;
}
ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests);
if (ret)
break;
}
ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
if (ret)
blk_mq_sched_free_tags(set, hctx, hctx_idx);
/*
* If we failed, free what we did allocate
*/
if (ret) {
queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx->sched_tags)
continue;
blk_mq_sched_free_tags(set, hctx, i);
}
return ret;
}
return 0;
return ret;
}
void blk_mq_sched_teardown(struct request_queue *q)
static void blk_mq_sched_tags_teardown(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_hw_ctx *hctx;
@ -488,6 +504,71 @@ void blk_mq_sched_teardown(struct request_queue *q)
blk_mq_sched_free_tags(set, hctx, i);
}
int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
{
struct elevator_queue *e = q->elevator;
if (!e)
return 0;
return blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
}
void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
{
struct elevator_queue *e = q->elevator;
if (!e)
return;
blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
}
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
int ret;
if (!e) {
q->elevator = NULL;
return 0;
}
/*
* Default to 256, since we don't split into sync/async like the
* old code did. Additionally, this is a per-hw queue depth.
*/
q->nr_requests = 2 * BLKDEV_MAX_RQ;
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_sched_alloc_tags(q, hctx, i);
if (ret)
goto err;
}
ret = e->ops.mq.init_sched(q, e);
if (ret)
goto err;
return 0;
err:
blk_mq_sched_tags_teardown(q);
q->elevator = NULL;
return ret;
}
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
{
if (e->type->ops.mq.exit_sched)
e->type->ops.mq.exit_sched(e);
blk_mq_sched_tags_teardown(q);
q->elevator = NULL;
}
int blk_mq_sched_init(struct request_queue *q)
{
int ret;

View File

@ -19,7 +19,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async, bool can_block);
@ -32,8 +32,13 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
struct list_head *rq_list,
struct request *(*get_rq)(struct blk_mq_hw_ctx *));
int blk_mq_sched_setup(struct request_queue *q);
void blk_mq_sched_teardown(struct request_queue *q);
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx);
void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx);
int blk_mq_sched_init(struct request_queue *q);
@ -131,20 +136,6 @@ static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
/*
* Mark a hardware queue and the request queue it belongs to as needing a
* restart.
*/
static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
}
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
{
return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);

View File

@ -321,7 +321,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
blk_mq_put_ctx(alloc_data.ctx);
blk_queue_exit(q);
if (!rq)
@ -349,7 +348,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
blk_mq_sched_completed_request(hctx, rq);
blk_mq_sched_restart_queues(hctx);
blk_mq_sched_restart(hctx);
blk_queue_exit(q);
}
@ -846,12 +845,8 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
};
if (rq->tag != -1) {
done:
if (hctx)
*hctx = data.hctx;
return true;
}
if (rq->tag != -1)
goto done;
if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
data.flags |= BLK_MQ_REQ_RESERVED;
@ -863,10 +858,12 @@ done:
atomic_inc(&data.hctx->nr_active);
}
data.hctx->tags->rqs[rq->tag] = rq;
goto done;
}
return false;
done:
if (hctx)
*hctx = data.hctx;
return rq->tag != -1;
}
static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
@ -963,14 +960,17 @@ static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
return true;
}
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
{
struct request_queue *q = hctx->queue;
struct blk_mq_hw_ctx *hctx;
struct request *rq;
LIST_HEAD(driver_list);
struct list_head *dptr;
int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
if (list_empty(list))
return false;
/*
* Start off with dptr being NULL, so we start the first request
* immediately, even if we have more pending.
@ -981,7 +981,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
* Now process all the entries, sending them to the driver.
*/
errors = queued = 0;
while (!list_empty(list)) {
do {
struct blk_mq_queue_data bd;
rq = list_first_entry(list, struct request, queuelist);
@ -1052,7 +1052,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
*/
if (!dptr && list->next != list->prev)
dptr = &driver_list;
}
} while (!list_empty(list));
hctx->dispatched[queued_to_index(queued)]++;
@ -1135,7 +1135,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
return hctx->next_cpu;
}
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
unsigned long msecs)
{
if (unlikely(blk_mq_hctx_stopped(hctx) ||
!blk_mq_hw_queue_mapped(hctx)))
@ -1152,7 +1153,24 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
put_cpu();
}
kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
if (msecs == 0)
kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
&hctx->run_work);
else
kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
&hctx->delayed_run_work,
msecs_to_jiffies(msecs));
}
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
{
__blk_mq_delay_run_hw_queue(hctx, true, msecs);
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
__blk_mq_delay_run_hw_queue(hctx, async, 0);
}
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@ -1255,6 +1273,15 @@ static void blk_mq_run_work_fn(struct work_struct *work)
__blk_mq_run_hw_queue(hctx);
}
static void blk_mq_delayed_run_work_fn(struct work_struct *work)
{
struct blk_mq_hw_ctx *hctx;
hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
__blk_mq_run_hw_queue(hctx);
}
static void blk_mq_delay_work_fn(struct work_struct *work)
{
struct blk_mq_hw_ctx *hctx;
@ -1924,6 +1951,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
hctx->fq->flush_rq, hctx_idx,
flush_start_tag + hctx_idx);
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
@ -1960,6 +1989,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
node = hctx->numa_node = set->numa_node;
INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch);
@ -1990,9 +2020,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
goto exit_hctx;
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
if (!hctx->fq)
goto exit_hctx;
goto sched_exit_hctx;
if (set->ops->init_request &&
set->ops->init_request(set->driver_data,
@ -2007,6 +2040,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
free_fq:
kfree(hctx->fq);
sched_exit_hctx:
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
@ -2233,8 +2268,6 @@ void blk_mq_release(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
unsigned int i;
blk_mq_sched_teardown(q);
/* hctx kobj stays in hctx */
queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx)
@ -2565,6 +2598,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
return 0;
}
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{
if (set->ops->map_queues)
return set->ops->map_queues(set);
else
return blk_mq_map_queues(set);
}
/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
@ -2619,10 +2660,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (!set->mq_map)
goto out_free_tags;
if (set->ops->map_queues)
ret = set->ops->map_queues(set);
else
ret = blk_mq_map_queues(set);
ret = blk_mq_update_queue_map(set);
if (ret)
goto out_free_mq_map;
@ -2714,6 +2752,7 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
blk_mq_freeze_queue(q);
set->nr_hw_queues = nr_hw_queues;
blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q);

View File

@ -31,7 +31,7 @@ void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,

View File

@ -816,7 +816,7 @@ static void blk_release_queue(struct kobject *kobj)
if (q->elevator) {
ioc_clear_queue(q);
elevator_exit(q->elevator);
elevator_exit(q, q->elevator);
}
blk_exit_rl(&q->root_rl);

View File

@ -242,26 +242,21 @@ int elevator_init(struct request_queue *q, char *name)
}
}
if (e->uses_mq) {
err = blk_mq_sched_setup(q);
if (!err)
err = e->ops.mq.init_sched(q, e);
} else
if (e->uses_mq)
err = blk_mq_init_sched(q, e);
else
err = e->ops.sq.elevator_init_fn(q, e);
if (err) {
if (e->uses_mq)
blk_mq_sched_teardown(q);
if (err)
elevator_put(e);
}
return err;
}
EXPORT_SYMBOL(elevator_init);
void elevator_exit(struct elevator_queue *e)
void elevator_exit(struct request_queue *q, struct elevator_queue *e)
{
mutex_lock(&e->sysfs_lock);
if (e->uses_mq && e->type->ops.mq.exit_sched)
e->type->ops.mq.exit_sched(e);
blk_mq_exit_sched(q, e);
else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
e->type->ops.sq.elevator_exit_fn(e);
mutex_unlock(&e->sysfs_lock);
@ -946,6 +941,45 @@ void elv_unregister(struct elevator_type *e)
}
EXPORT_SYMBOL_GPL(elv_unregister);
static int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e)
{
int ret;
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
if (q->elevator) {
if (q->elevator->registered)
elv_unregister_queue(q);
ioc_clear_queue(q);
elevator_exit(q, q->elevator);
}
ret = blk_mq_init_sched(q, new_e);
if (ret)
goto out;
if (new_e) {
ret = elv_register_queue(q);
if (ret) {
elevator_exit(q, q->elevator);
goto out;
}
}
if (new_e)
blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
else
blk_add_trace_msg(q, "elv switch: none");
out:
blk_mq_unfreeze_queue(q);
blk_mq_start_stopped_hw_queues(q, true);
return ret;
}
/*
* switch to new_e io scheduler. be careful not to introduce deadlocks -
* we don't free the old io scheduler, before we have allocated what we
@ -958,10 +992,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
bool old_registered = false;
int err;
if (q->mq_ops) {
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
}
if (q->mq_ops)
return elevator_switch_mq(q, new_e);
/*
* Turn on BYPASS and drain all requests w/ elevator private data.
@ -973,11 +1005,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
if (old) {
old_registered = old->registered;
if (old->uses_mq)
blk_mq_sched_teardown(q);
if (!q->mq_ops)
blk_queue_bypass_start(q);
blk_queue_bypass_start(q);
/* unregister and clear all auxiliary data of the old elevator */
if (old_registered)
@ -987,56 +1015,32 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
}
/* allocate, init and register new elevator */
if (new_e) {
if (new_e->uses_mq) {
err = blk_mq_sched_setup(q);
if (!err)
err = new_e->ops.mq.init_sched(q, new_e);
} else
err = new_e->ops.sq.elevator_init_fn(q, new_e);
if (err)
goto fail_init;
err = new_e->ops.sq.elevator_init_fn(q, new_e);
if (err)
goto fail_init;
err = elv_register_queue(q);
if (err)
goto fail_register;
} else
q->elevator = NULL;
err = elv_register_queue(q);
if (err)
goto fail_register;
/* done, kill the old one and finish */
if (old) {
elevator_exit(old);
if (!q->mq_ops)
blk_queue_bypass_end(q);
elevator_exit(q, old);
blk_queue_bypass_end(q);
}
if (q->mq_ops) {
blk_mq_unfreeze_queue(q);
blk_mq_start_stopped_hw_queues(q, true);
}
if (new_e)
blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
else
blk_add_trace_msg(q, "elv switch: none");
blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
return 0;
fail_register:
if (q->mq_ops)
blk_mq_sched_teardown(q);
elevator_exit(q->elevator);
elevator_exit(q, q->elevator);
fail_init:
/* switch failed, restore and re-register old elevator */
if (old) {
q->elevator = old;
elv_register_queue(q);
if (!q->mq_ops)
blk_queue_bypass_end(q);
}
if (q->mq_ops) {
blk_mq_unfreeze_queue(q);
blk_mq_start_stopped_hw_queues(q, true);
blk_queue_bypass_end(q);
}
return err;

View File

@ -421,10 +421,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
/*
* The absolute minimum resource template is one end_tag descriptor.
* However, we will treat a lone end_tag as just a simple buffer.
*/
/* The absolute minimum resource template is one end_tag descriptor */
if (aml_length < sizeof(struct aml_resource_end_tag)) {
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
@ -456,8 +454,9 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
/* Invoke the user function */
if (user_function) {
status = user_function(aml, length, offset,
resource_index, context);
status =
user_function(aml, length, offset, resource_index,
context);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@ -481,12 +480,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
*context = aml;
}
/* Check if buffer is defined to be longer than the resource length */
if (aml_length > (offset + length)) {
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/* Normal exit */
return_ACPI_STATUS(AE_OK);

View File

@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
return -ENODEV;
/*
* If the device has a _HID (or _CID) returning a valid ACPI/PNP
* device ID, it is better to make it look less attractive here, so that
* the other device with the same _ADR value (that may not have a valid
* device ID) can be matched going forward. [This means a second spec
* violation in a row, so whatever we do here is best effort anyway.]
* If the device has a _HID returning a valid ACPI/PNP device ID, it is
* better to make it look less attractive here, so that the other device
* with the same _ADR value (that may not have a valid device ID) can be
* matched going forward. [This means a second spec violation in a row,
* so whatever we do here is best effort anyway.]
*/
return sta_present && list_empty(&adev->pnp.ids) ?
return sta_present && !adev->pnp.type.platform_id ?
FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
}

View File

@ -1857,15 +1857,20 @@ static void acpi_bus_attach(struct acpi_device *device)
return;
device->flags.match_driver = true;
if (!ret) {
ret = device_attach(&device->dev);
if (ret < 0)
return;
if (!ret && device->pnp.type.platform_id)
acpi_default_enumeration(device);
if (ret > 0) {
acpi_device_set_enumerated(device);
goto ok;
}
ret = device_attach(&device->dev);
if (ret < 0)
return;
if (ret > 0 || !device->pnp.type.platform_id)
acpi_device_set_enumerated(device);
else
acpi_default_enumeration(device);
ok:
list_for_each_entry(child, &device->children, node)
acpi_bus_attach(child);

View File

@ -278,11 +278,6 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { &info, &info };
/* SB600/700 don't have secondary port wired */
if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) ||
(pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE))
ppi[1] = &ata_dummy_port_info;
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
}

View File

@ -644,14 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id,
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
/* enable IRQ on hotplug */
pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
dev_dbg(&pdev->dev,
"enabling SATA hotplug (0x%x)\n",
(int) tmp8);
tmp8 |= SATA_HOTPLUG;
pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
if (board_id == vt6421) {
/* enable IRQ on hotplug */
pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
dev_dbg(&pdev->dev,
"enabling SATA hotplug (0x%x)\n",
(int) tmp8);
tmp8 |= SATA_HOTPLUG;
pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
}
}
/*

View File

@ -523,7 +523,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
copy_page(mem, cmem);
memcpy(mem, cmem, PAGE_SIZE);
} else {
struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
@ -717,7 +717,7 @@ compress_again:
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
src = kmap_atomic(page);
copy_page(cmem, src);
memcpy(cmem, src, PAGE_SIZE);
kunmap_atomic(src);
} else {
memcpy(cmem, src, clen);
@ -928,7 +928,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
}
index = sector >> SECTORS_PER_PAGE_SHIFT;
offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
bv.bv_page = page;
bv.bv_len = PAGE_SIZE;

View File

@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
#endif
#ifdef CONFIG_STRICT_DEVMEM
static inline int page_is_allowed(unsigned long pfn)
{
return devmem_is_allowed(pfn);
}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
u64 from = ((u64)pfn) << PAGE_SHIFT;
@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1;
}
#else
static inline int page_is_allowed(unsigned long pfn)
{
return 1;
}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
return 1;
@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
while (count > 0) {
unsigned long remaining;
int allowed;
sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, count))
allowed = page_is_allowed(p >> PAGE_SHIFT);
if (!allowed)
return -EPERM;
if (allowed == 2) {
/* Show zeros for restricted memory. */
remaining = clear_user(buf, sz);
} else {
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr)
return -EFAULT;
/*
* On ia64 if a page has been mapped somewhere as uncached, then
* it must also be accessed uncached by the kernel or data
* corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr)
return -EFAULT;
remaining = copy_to_user(buf, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
}
remaining = copy_to_user(buf, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
if (remaining)
return -EFAULT;
@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
#endif
while (count > 0) {
int allowed;
sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, sz))
allowed = page_is_allowed(p >> PAGE_SHIFT);
if (!allowed)
return -EPERM;
/*
* On ia64 if a page has been mapped somewhere as uncached, then
* it must also be accessed uncached by the kernel or data
* corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr) {
if (written)
break;
return -EFAULT;
}
/* Skip actual writing when a page is marked as restricted. */
if (allowed == 1) {
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr) {
if (written)
break;
return -EFAULT;
}
copied = copy_from_user(ptr, buf, sz);
unxlate_dev_mem_ptr(p, ptr);
if (copied) {
written += sz - copied;
if (written)
break;
return -EFAULT;
copied = copy_from_user(ptr, buf, sz);
unxlate_dev_mem_ptr(p, ptr);
if (copied) {
written += sz - copied;
if (written)
break;
return -EFAULT;
}
}
buf += sz;

View File

@ -2202,14 +2202,16 @@ static int virtcons_freeze(struct virtio_device *vdev)
vdev->config->reset(vdev);
virtqueue_disable_cb(portdev->c_ivq);
if (use_multiport(portdev))
virtqueue_disable_cb(portdev->c_ivq);
cancel_work_sync(&portdev->control_work);
cancel_work_sync(&portdev->config_work);
/*
* Once more: if control_work_handler() was running, it would
* enable the cb as the last step.
*/
virtqueue_disable_cb(portdev->c_ivq);
if (use_multiport(portdev))
virtqueue_disable_cb(portdev->c_ivq);
remove_controlq_data(portdev);
list_for_each_entry(port, &portdev->ports, list) {

View File

@ -2398,6 +2398,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
*********************************************************************/
static enum cpuhp_state hp_online;
static int cpuhp_cpufreq_online(unsigned int cpu)
{
cpufreq_online(cpu);
return 0;
}
static int cpuhp_cpufreq_offline(unsigned int cpu)
{
cpufreq_offline(cpu);
return 0;
}
/**
* cpufreq_register_driver - register a CPU Frequency driver
* @driver_data: A struct cpufreq_driver containing the values#
@ -2460,8 +2474,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
}
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
cpufreq_online,
cpufreq_offline);
cpuhp_cpufreq_online,
cpuhp_cpufreq_offline);
if (ret < 0)
goto err_if_unreg;
hp_online = ret;

View File

@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
ctx->dev = caam_jr_alloc();
if (IS_ERR(ctx->dev)) {
dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n");
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(ctx->dev);
}

View File

@ -281,7 +281,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
/* Try to run it through DECO0 */
ret = run_descriptor_deco0(ctrldev, desc, &status);
if (ret || status) {
if (ret ||
(status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
dev_err(ctrldev,
"Failed to deinstantiate RNG4 SH%d\n",
sh_idx);
@ -301,15 +302,13 @@ static int caam_remove(struct platform_device *pdev)
struct device *ctrldev;
struct caam_drv_private *ctrlpriv;
struct caam_ctrl __iomem *ctrl;
int ring;
ctrldev = &pdev->dev;
ctrlpriv = dev_get_drvdata(ctrldev);
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
/* Remove platform devices for JobRs */
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++)
of_device_unregister(ctrlpriv->jrpdev[ring]);
/* Remove platform devices under the crypto node */
of_platform_depopulate(ctrldev);
/* De-initialize RNG state handles initialized by this driver. */
if (ctrlpriv->rng4_sh_init)
@ -418,10 +417,21 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
#endif
static const struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
},
{
.compatible = "fsl,sec4.0",
},
{},
};
MODULE_DEVICE_TABLE(of, caam_match);
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
int ret, ring, ridx, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
u64 caam_id;
struct device *dev;
struct device_node *nprop, *np;
@ -597,47 +607,24 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
/*
* Detect and enable JobRs
* First, find out how many ring spec'ed, allocate references
* for all, then go probe each one.
*/
rspec = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
rspec++;
ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
if (ctrlpriv->jrpdev == NULL) {
ret = -ENOMEM;
ret = of_platform_populate(nprop, caam_match, NULL, dev);
if (ret) {
dev_err(dev, "JR platform devices creation error\n");
goto iounmap_ctrl;
}
ring = 0;
ridx = 0;
ctrlpriv->total_jobrs = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
ctrlpriv->jrpdev[ring] =
of_platform_device_create(np, NULL, dev);
if (!ctrlpriv->jrpdev[ring]) {
pr_warn("JR physical index %d: Platform device creation error\n",
ridx);
ridx++;
continue;
}
ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
((__force uint8_t *)ctrl +
(ridx + JR_BLOCK_NUMBER) *
(ring + JR_BLOCK_NUMBER) *
BLOCK_OFFSET
);
ctrlpriv->total_jobrs++;
ring++;
ridx++;
}
}
/* Check to see if QI present. If so, enable */
ctrlpriv->qi_present =
@ -847,17 +834,6 @@ disable_caam_ipg:
return ret;
}
static struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
},
{
.compatible = "fsl,sec4.0",
},
{},
};
MODULE_DEVICE_TABLE(of, caam_match);
static struct platform_driver caam_driver = {
.driver = {
.name = "caam",

View File

@ -66,7 +66,6 @@ struct caam_drv_private_jr {
struct caam_drv_private {
struct device *dev;
struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
struct platform_device *pdev;
/* Physical-presence section */

View File

@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
status = __gop_query32(sys_table_arg, gop32, &info, &size,
&current_fb_base);
if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
* provide multiple GOP devices, not all of which are
@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
status = __gop_query64(sys_table_arg, gop64, &info, &size,
&current_fb_base);
if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
* provide multiple GOP devices, not all of which are

View File

@ -1317,7 +1317,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
if (!fence) {
event_free(gpu, event);
ret = -ENOMEM;
goto out_pm_put;
goto out_unlock;
}
gpu->event[event].fence = fence;
@ -1357,6 +1357,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
hangcheck_timer_reset(gpu);
ret = 0;
out_unlock:
mutex_unlock(&gpu->lock);
out_pm_put:

View File

@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
{
int ret;
if (vgpu->failsafe)
return 0;
if (WARN_ON(bytes > 4))
return -EINVAL;

View File

@ -775,7 +775,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
_EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
ctx_status_ptr.read_ptr = 0;
ctx_status_ptr.write_ptr = 0x7;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}

Some files were not shown because too many files have changed in this diff Show More