1
0
Fork 0

list_lru: get rid of ->active_nodes

The active_nodes mask allows us to skip empty nodes when walking over
list_lru items from all nodes in list_lru_count/walk.  However, these
functions are never called from hot paths, so it doesn't seem we need
such kind of optimization there.  OTOH, removing the mask will make it
easier to make list_lru per-memcg.

Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Greg Thelen <gthelen@google.com>
Cc: Glauber Costa <glommer@gmail.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Vladimir Davydov 2015-02-12 14:59:04 -08:00 committed by Linus Torvalds
parent 05257a1a3d
commit ff0b67ef5b
2 changed files with 5 additions and 10 deletions

View File

@ -31,7 +31,6 @@ struct list_lru_node {
struct list_lru {
struct list_lru_node *node;
nodemask_t active_nodes;
};
void list_lru_destroy(struct list_lru *lru);
@ -94,7 +93,7 @@ static inline unsigned long list_lru_count(struct list_lru *lru)
long count = 0;
int nid;
for_each_node_mask(nid, lru->active_nodes)
for_each_node_state(nid, N_NORMAL_MEMORY)
count += list_lru_count_node(lru, nid);
return count;
@ -142,7 +141,7 @@ list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
long isolated = 0;
int nid;
for_each_node_mask(nid, lru->active_nodes) {
for_each_node_state(nid, N_NORMAL_MEMORY) {
isolated += list_lru_walk_node(lru, nid, isolate,
cb_arg, &nr_to_walk);
if (nr_to_walk <= 0)

View File

@ -19,8 +19,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
WARN_ON_ONCE(nlru->nr_items < 0);
if (list_empty(item)) {
list_add_tail(item, &nlru->list);
if (nlru->nr_items++ == 0)
node_set(nid, lru->active_nodes);
nlru->nr_items++;
spin_unlock(&nlru->lock);
return true;
}
@ -37,8 +36,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
spin_lock(&nlru->lock);
if (!list_empty(item)) {
list_del_init(item);
if (--nlru->nr_items == 0)
node_clear(nid, lru->active_nodes);
nlru->nr_items--;
WARN_ON_ONCE(nlru->nr_items < 0);
spin_unlock(&nlru->lock);
return true;
@ -90,8 +88,7 @@ restart:
case LRU_REMOVED_RETRY:
assert_spin_locked(&nlru->lock);
case LRU_REMOVED:
if (--nlru->nr_items == 0)
node_clear(nid, lru->active_nodes);
nlru->nr_items--;
WARN_ON_ONCE(nlru->nr_items < 0);
isolated++;
/*
@ -133,7 +130,6 @@ int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key)
if (!lru->node)
return -ENOMEM;
nodes_clear(lru->active_nodes);
for (i = 0; i < nr_node_ids; i++) {
spin_lock_init(&lru->node[i].lock);
if (key)