1
0
Fork 0

mm: update callers to use HASH_ZERO flag

Update dcache, inode, pid, mountpoint, and mount hash tables to use
HASH_ZERO, and remove initialization after allocations.  In case of
places where HASH_EARLY was used such as in __pv_init_lock_hash the
zeroed hash table was already assumed, because memblock zeroes the
memory.

CPU: SPARC M6, Memory: 7T
Before fix:
  Dentry cache hash table entries: 1073741824
  Inode-cache hash table entries: 536870912
  Mount-cache hash table entries: 16777216
  Mountpoint-cache hash table entries: 16777216
  ftrace: allocating 20414 entries in 40 pages
  Total time: 11.798s

After fix:
  Dentry cache hash table entries: 1073741824
  Inode-cache hash table entries: 536870912
  Mount-cache hash table entries: 16777216
  Mountpoint-cache hash table entries: 16777216
  ftrace: allocating 20414 entries in 40 pages
  Total time: 3.198s

CPU: Intel Xeon E5-2630, Memory: 2.2T:
Before fix:
  Dentry cache hash table entries: 536870912
  Inode-cache hash table entries: 268435456
  Mount-cache hash table entries: 8388608
  Mountpoint-cache hash table entries: 8388608
  CPU: Physical Processor ID: 0
  Total time: 3.245s

After fix:
  Dentry cache hash table entries: 536870912
  Inode-cache hash table entries: 268435456
  Mount-cache hash table entries: 8388608
  Mountpoint-cache hash table entries: 8388608
  CPU: Physical Processor ID: 0
  Total time: 3.244s

Link: http://lkml.kernel.org/r/1488432825-92126-4-git-send-email-pasha.tatashin@oracle.com
Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
Reviewed-by: Babu Moger <babu.moger@oracle.com>
Cc: David Miller <davem@davemloft.net>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Pavel Tatashin 2017-07-06 15:39:11 -07:00 committed by Linus Torvalds
parent 3749a8f008
commit 3d375d7859
5 changed files with 12 additions and 40 deletions

View File

@ -3546,8 +3546,6 @@ __setup("dhash_entries=", set_dhash_entries);
static void __init dcache_init_early(void) static void __init dcache_init_early(void)
{ {
unsigned int loop;
/* If hashes are distributed across NUMA nodes, defer /* If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available. * hash allocation until vmalloc space is available.
*/ */
@ -3559,24 +3557,19 @@ static void __init dcache_init_early(void)
sizeof(struct hlist_bl_head), sizeof(struct hlist_bl_head),
dhash_entries, dhash_entries,
13, 13,
HASH_EARLY, HASH_EARLY | HASH_ZERO,
&d_hash_shift, &d_hash_shift,
&d_hash_mask, &d_hash_mask,
0, 0,
0); 0);
for (loop = 0; loop < (1U << d_hash_shift); loop++)
INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
} }
static void __init dcache_init(void) static void __init dcache_init(void)
{ {
unsigned int loop; /*
/*
* A constructor could be added for stable state like the lists, * A constructor could be added for stable state like the lists,
* but it is probably not worth it because of the cache nature * but it is probably not worth it because of the cache nature
* of the dcache. * of the dcache.
*/ */
dentry_cache = KMEM_CACHE(dentry, dentry_cache = KMEM_CACHE(dentry,
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT); SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT);
@ -3590,14 +3583,11 @@ static void __init dcache_init(void)
sizeof(struct hlist_bl_head), sizeof(struct hlist_bl_head),
dhash_entries, dhash_entries,
13, 13,
0, HASH_ZERO,
&d_hash_shift, &d_hash_shift,
&d_hash_mask, &d_hash_mask,
0, 0,
0); 0);
for (loop = 0; loop < (1U << d_hash_shift); loop++)
INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
} }
/* SLAB cache for __getname() consumers */ /* SLAB cache for __getname() consumers */

View File

@ -1915,8 +1915,6 @@ __setup("ihash_entries=", set_ihash_entries);
*/ */
void __init inode_init_early(void) void __init inode_init_early(void)
{ {
unsigned int loop;
/* If hashes are distributed across NUMA nodes, defer /* If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available. * hash allocation until vmalloc space is available.
*/ */
@ -1928,20 +1926,15 @@ void __init inode_init_early(void)
sizeof(struct hlist_head), sizeof(struct hlist_head),
ihash_entries, ihash_entries,
14, 14,
HASH_EARLY, HASH_EARLY | HASH_ZERO,
&i_hash_shift, &i_hash_shift,
&i_hash_mask, &i_hash_mask,
0, 0,
0); 0);
for (loop = 0; loop < (1U << i_hash_shift); loop++)
INIT_HLIST_HEAD(&inode_hashtable[loop]);
} }
void __init inode_init(void) void __init inode_init(void)
{ {
unsigned int loop;
/* inode slab cache */ /* inode slab cache */
inode_cachep = kmem_cache_create("inode_cache", inode_cachep = kmem_cache_create("inode_cache",
sizeof(struct inode), sizeof(struct inode),
@ -1959,14 +1952,11 @@ void __init inode_init(void)
sizeof(struct hlist_head), sizeof(struct hlist_head),
ihash_entries, ihash_entries,
14, 14,
0, HASH_ZERO,
&i_hash_shift, &i_hash_shift,
&i_hash_mask, &i_hash_mask,
0, 0,
0); 0);
for (loop = 0; loop < (1U << i_hash_shift); loop++)
INIT_HLIST_HEAD(&inode_hashtable[loop]);
} }
void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)

View File

@ -3239,7 +3239,6 @@ static void __init init_mount_tree(void)
void __init mnt_init(void) void __init mnt_init(void)
{ {
unsigned u;
int err; int err;
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
@ -3248,22 +3247,17 @@ void __init mnt_init(void)
mount_hashtable = alloc_large_system_hash("Mount-cache", mount_hashtable = alloc_large_system_hash("Mount-cache",
sizeof(struct hlist_head), sizeof(struct hlist_head),
mhash_entries, 19, mhash_entries, 19,
0, HASH_ZERO,
&m_hash_shift, &m_hash_mask, 0, 0); &m_hash_shift, &m_hash_mask, 0, 0);
mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
sizeof(struct hlist_head), sizeof(struct hlist_head),
mphash_entries, 19, mphash_entries, 19,
0, HASH_ZERO,
&mp_hash_shift, &mp_hash_mask, 0, 0); &mp_hash_shift, &mp_hash_mask, 0, 0);
if (!mount_hashtable || !mountpoint_hashtable) if (!mount_hashtable || !mountpoint_hashtable)
panic("Failed to allocate mount hash table\n"); panic("Failed to allocate mount hash table\n");
for (u = 0; u <= m_hash_mask; u++)
INIT_HLIST_HEAD(&mount_hashtable[u]);
for (u = 0; u <= mp_hash_mask; u++)
INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
kernfs_init(); kernfs_init();
err = sysfs_init(); err = sysfs_init();

View File

@ -193,7 +193,8 @@ void __init __pv_init_lock_hash(void)
*/ */
pv_lock_hash = alloc_large_system_hash("PV qspinlock", pv_lock_hash = alloc_large_system_hash("PV qspinlock",
sizeof(struct pv_hash_entry), sizeof(struct pv_hash_entry),
pv_hash_size, 0, HASH_EARLY, pv_hash_size, 0,
HASH_EARLY | HASH_ZERO,
&pv_lock_hash_bits, NULL, &pv_lock_hash_bits, NULL,
pv_hash_size, pv_hash_size); pv_hash_size, pv_hash_size);
} }

View File

@ -575,16 +575,13 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
*/ */
void __init pidhash_init(void) void __init pidhash_init(void)
{ {
unsigned int i, pidhash_size; unsigned int pidhash_size;
pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
HASH_EARLY | HASH_SMALL, HASH_EARLY | HASH_SMALL | HASH_ZERO,
&pidhash_shift, NULL, &pidhash_shift, NULL,
0, 4096); 0, 4096);
pidhash_size = 1U << pidhash_shift; pidhash_size = 1U << pidhash_shift;
for (i = 0; i < pidhash_size; i++)
INIT_HLIST_HEAD(&pid_hash[i]);
} }
void __init pidmap_init(void) void __init pidmap_init(void)