1
0
Fork 0

Merge branch 'akpm' (patches from Andrew)

Merge fixes from Andrew Morton:
 "7 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  proc: add Alexey to MAINTAINERS
  kasan: depend on CONFIG_SLUB_DEBUG
  include/linux/dax.h: dax_iomap_fault() returns vm_fault_t
  x86/e820: put !E820_TYPE_RAM regions into memblock.reserved
  slub: fix failure when we delete and create a slab cache
  Revert mm/vmstat.c: fix vmstat_update() preemption BUG
  lib/percpu_ida.c: don't do alloc from per-CPU list if there is none
This commit is contained in:
Linus Torvalds 2018-06-28 11:42:56 -07:00
commit ea5f39f2f9
9 changed files with 38 additions and 8 deletions

View file

@ -11481,6 +11481,15 @@ W: http://wireless.kernel.org/en/users/Drivers/p54
S: Obsolete
F: drivers/net/wireless/intersil/prism54/
PROC FILESYSTEM
R: Alexey Dobriyan <adobriyan@gmail.com>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/proc/
F: include/linux/proc_fs.h
F: tools/testing/selftests/proc/
PROC SYSCTL
M: "Luis R. Rodriguez" <mcgrof@kernel.org>
M: Kees Cook <keescook@chromium.org>

View file

@ -1248,6 +1248,7 @@ void __init e820__memblock_setup(void)
{
int i;
u64 end;
u64 addr = 0;
/*
* The bootstrap memblock region count maximum is 128 entries
@ -1264,13 +1265,21 @@ void __init e820__memblock_setup(void)
struct e820_entry *entry = &e820_table->entries[i];
end = entry->addr + entry->size;
if (addr < entry->addr)
memblock_reserve(addr, entry->addr - addr);
addr = end;
if (end != (resource_size_t)end)
continue;
/*
* all !E820_TYPE_RAM ranges (including gap ranges) are put
* into memblock.reserved to make sure that struct pages in
* such regions are not left uninitialized after bootup.
*/
if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
continue;
memblock_add(entry->addr, entry->size);
memblock_reserve(entry->addr, entry->size);
else
memblock_add(entry->addr, entry->size);
}
/* Throw away partial pages: */

View file

@ -135,7 +135,7 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
enum page_entry_size pe_size, pfn_t pfn);

View file

@ -155,8 +155,12 @@ struct kmem_cache {
#ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS
void sysfs_slab_unlink(struct kmem_cache *);
void sysfs_slab_release(struct kmem_cache *);
#else
static inline void sysfs_slab_unlink(struct kmem_cache *s)
{
}
static inline void sysfs_slab_release(struct kmem_cache *s)
{
}

View file

@ -6,6 +6,7 @@ if HAVE_ARCH_KASAN
config KASAN
bool "KASan: runtime memory debugger"
depends on SLUB || (SLAB && !DEBUG_SLAB)
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
select STACKDEPOT
help

View file

@ -141,7 +141,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
spin_lock_irqsave(&tags->lock, flags);
/* Fastpath */
if (likely(tags->nr_free >= 0)) {
if (likely(tags->nr_free)) {
tag = tags->freelist[--tags->nr_free];
spin_unlock_irqrestore(&tags->lock, flags);
return tag;

View file

@ -567,10 +567,14 @@ static int shutdown_cache(struct kmem_cache *s)
list_del(&s->list);
if (s->flags & SLAB_TYPESAFE_BY_RCU) {
#ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_unlink(s);
#endif
list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
schedule_work(&slab_caches_to_rcu_destroy_work);
} else {
#ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_unlink(s);
sysfs_slab_release(s);
#else
slab_kmem_cache_release(s);

View file

@ -5667,7 +5667,6 @@ static void sysfs_slab_remove_workfn(struct work_struct *work)
kset_unregister(s->memcg_kset);
#endif
kobject_uevent(&s->kobj, KOBJ_REMOVE);
kobject_del(&s->kobj);
out:
kobject_put(&s->kobj);
}
@ -5752,6 +5751,12 @@ static void sysfs_slab_remove(struct kmem_cache *s)
schedule_work(&s->kobj_remove_work);
}
void sysfs_slab_unlink(struct kmem_cache *s)
{
if (slab_state >= FULL)
kobject_del(&s->kobj);
}
void sysfs_slab_release(struct kmem_cache *s)
{
if (slab_state >= FULL)

View file

@ -1796,11 +1796,9 @@ static void vmstat_update(struct work_struct *w)
* to occur in the future. Keep on running the
* update worker thread.
*/
preempt_disable();
queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
this_cpu_ptr(&vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
preempt_enable();
}
}