1
0
Fork 0

Merge branch 'akpm' (patches from Andrew)

Merge fixes from Andrew Morton:
 "11 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm: silently skip readahead for DAX inodes
  dax: fix device-dax region base
  fs/seq_file: fix out-of-bounds read
  mm: memcontrol: avoid unused function warning
  mm: clarify COMPACTION Kconfig text
  treewide: replace config_enabled() with IS_ENABLED() (2nd round)
  printk: fix parsing of "brl=" option
  soft_dirty: fix soft_dirty during THP split
  sysctl: handle error writing UINT_MAX to u32 fields
  get_maintainer: quiet noisy implicit -f vcs_file_exists checking
  byteswap: don't use __builtin_bswap*() with sparse
hifive-unleashed-5.1
Linus Torvalds 2016-08-26 23:12:12 -07:00
commit 5e608a0270
14 changed files with 108 additions and 35 deletions

View File

@ -164,7 +164,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
*/
static inline unsigned long ___pa(unsigned long x)
{
if (config_enabled(CONFIG_64BIT)) {
if (IS_ENABLED(CONFIG_64BIT)) {
/*
* For MIPS64 the virtual address may either be in one of
* the compatibility segements ckseg0 or ckseg1, or it may
@ -173,7 +173,7 @@ static inline unsigned long ___pa(unsigned long x)
return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
}
if (!config_enabled(CONFIG_EVA)) {
if (!IS_ENABLED(CONFIG_EVA)) {
/*
* We're using the standard MIPS32 legacy memory map, ie.
* the address x is going to be in kseg0 or kseg1. We can

View File

@ -204,11 +204,9 @@ static void __init conmode_default(void)
#endif
}
} else if (MACHINE_IS_KVM) {
if (sclp.has_vt220 &&
config_enabled(CONFIG_SCLP_VT220_CONSOLE))
if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
SET_CONSOLE_VT220;
else if (sclp.has_linemode &&
config_enabled(CONFIG_SCLP_CONSOLE))
else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
SET_CONSOLE_SCLP;
else
SET_CONSOLE_HVC;

View File

@ -77,7 +77,7 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region)
*/
static inline bool kaslr_memory_enabled(void)
{
return kaslr_enabled() && !config_enabled(CONFIG_KASAN);
return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
}
/* Initialize base and padding for each memory region randomized with KASLR */

View File

@ -116,6 +116,9 @@ static int dax_pmem_probe(struct device *dev)
if (rc)
return rc;
/* adjust the dax_region resource to the start of data */
res.start += le64_to_cpu(pfn_sb->dataoff);
nd_region = to_nd_region(dev->parent);
dax_region = alloc_dax_region(dev, nd_region->id, &res,
le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);

View File

@ -223,8 +223,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
size -= n;
buf += n;
copied += n;
if (!m->count)
if (!m->count) {
m->from = 0;
m->index++;
}
if (!size)
goto Done;
}

View File

@ -242,7 +242,11 @@
*/
#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
/*
* sparse (__CHECKER__) pretends to be gcc, but can't do constant
* folding in __builtin_bswap*() (yet), so don't set these for it.
*/
#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
#if GCC_VERSION >= 40400
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
@ -250,7 +254,7 @@
#if GCC_VERSION >= 40800
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
#if GCC_VERSION >= 50000
#define KASAN_ABI_VERSION 4

View File

@ -42,6 +42,8 @@ extern int proc_dostring(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_douintvec(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_minmax(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_jiffies(struct ctl_table *, int,

View File

@ -9,10 +9,10 @@
char *_braille_console_setup(char **str, char **brl_options)
{
if (!memcmp(*str, "brl,", 4)) {
if (!strncmp(*str, "brl,", 4)) {
*brl_options = "";
*str += 4;
} else if (!memcmp(str, "brl=", 4)) {
} else if (!strncmp(*str, "brl=", 4)) {
*brl_options = *str + 4;
*str = strchr(*brl_options, ',');
if (!*str)

View File

@ -2140,6 +2140,21 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,
return 0;
}
static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
int *valp,
int write, void *data)
{
if (write) {
if (*negp)
return -EINVAL;
*valp = *lvalp;
} else {
unsigned int val = *valp;
*lvalp = (unsigned long)val;
}
return 0;
}
static const char proc_wspace_sep[] = { ' ', '\t', '\n' };
static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
@ -2259,8 +2274,27 @@ static int do_proc_dointvec(struct ctl_table *table, int write,
int proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table,write,buffer,lenp,ppos,
NULL,NULL);
return do_proc_dointvec(table, write, buffer, lenp, ppos, NULL, NULL);
}
/**
* proc_douintvec - read a vector of unsigned integers
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
*
* Reads/writes up to table->maxlen/sizeof(unsigned int) unsigned integer
* values from/to the user buffer, treated as an ASCII string.
*
* Returns 0 on success.
*/
int proc_douintvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table, write, buffer, lenp, ppos,
do_proc_douintvec_conv, NULL);
}
/*
@ -2858,6 +2892,12 @@ int proc_dointvec(struct ctl_table *table, int write,
return -ENOSYS;
}
int proc_douintvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@ -2903,6 +2943,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
* exception granted :-)
*/
EXPORT_SYMBOL(proc_dointvec);
EXPORT_SYMBOL(proc_douintvec);
EXPORT_SYMBOL(proc_dointvec_jiffies);
EXPORT_SYMBOL(proc_dointvec_minmax);
EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);

View File

@ -262,7 +262,14 @@ config COMPACTION
select MIGRATION
depends on MMU
help
Allows the compaction of memory for the allocation of huge pages.
Compaction is the only memory management component to form
high order (larger physically contiguous) memory blocks
reliably. The page allocator relies on compaction heavily and
the lack of the feature can lead to unexpected OOM killer
invocations for high order memory requests. You shouldn't
disable this option unless there really is a strong reason for
it and then we would be really interested to hear about that at
linux-mm@kvack.org.
#
# support for page migration

View File

@ -1512,7 +1512,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
struct page *page;
pgtable_t pgtable;
pmd_t _pmd;
bool young, write, dirty;
bool young, write, dirty, soft_dirty;
unsigned long addr;
int i;
@ -1546,6 +1546,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
write = pmd_write(*pmd);
young = pmd_young(*pmd);
dirty = pmd_dirty(*pmd);
soft_dirty = pmd_soft_dirty(*pmd);
pmdp_huge_split_prepare(vma, haddr, pmd);
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
@ -1562,6 +1563,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
swp_entry_t swp_entry;
swp_entry = make_migration_entry(page + i, write);
entry = swp_entry_to_pte(swp_entry);
if (soft_dirty)
entry = pte_swp_mksoft_dirty(entry);
} else {
entry = mk_pte(page + i, vma->vm_page_prot);
entry = maybe_mkwrite(entry, vma);
@ -1569,6 +1572,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
entry = pte_wrprotect(entry);
if (!young)
entry = pte_mkold(entry);
if (soft_dirty)
entry = pte_mksoft_dirty(entry);
}
if (dirty)
SetPageDirty(page + i);

View File

@ -4082,24 +4082,6 @@ static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
atomic_add(n, &memcg->id.ref);
}
static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
{
while (!atomic_inc_not_zero(&memcg->id.ref)) {
/*
* The root cgroup cannot be destroyed, so it's refcount must
* always be >= 1.
*/
if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
VM_BUG_ON(1);
break;
}
memcg = parent_mem_cgroup(memcg);
if (!memcg)
memcg = root_mem_cgroup;
}
return memcg;
}
static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
{
if (atomic_sub_and_test(n, &memcg->id.ref)) {
@ -5821,6 +5803,24 @@ static int __init mem_cgroup_init(void)
subsys_initcall(mem_cgroup_init);
#ifdef CONFIG_MEMCG_SWAP
static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
{
while (!atomic_inc_not_zero(&memcg->id.ref)) {
/*
* The root cgroup cannot be destroyed, so it's refcount must
* always be >= 1.
*/
if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
VM_BUG_ON(1);
break;
}
memcg = parent_mem_cgroup(memcg);
if (!memcg)
memcg = root_mem_cgroup;
}
return memcg;
}
/**
* mem_cgroup_swapout - transfer a memsw charge to swap
* @page: page whose memsw charge to transfer

View File

@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
#include <linux/dax.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/blkdev.h>
@ -544,6 +545,14 @@ do_readahead(struct address_space *mapping, struct file *filp,
if (!mapping || !mapping->a_ops)
return -EINVAL;
/*
* Readahead doesn't make sense for DAX inodes, but we don't want it
* to report a failure either. Instead, we just return success and
* don't do any work.
*/
if (dax_mapping(mapping))
return 0;
return force_page_cache_readahead(mapping, filp, index, nr);
}

View File

@ -2136,9 +2136,11 @@ sub vcs_file_exists {
my $cmd = $VCS_cmds{"file_exists_cmd"};
$cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd
$cmd .= " 2>&1";
$exists = &{$VCS_cmds{"execute_cmd"}}($cmd);
return 0 if ($? != 0);
return $exists;
}