1
0
Fork 0

mm: memcontrol: use int for event/state parameter in several functions

Several functions use an enum type as parameter for an event/state, but
are called in some locations with an argument of a different enum type.
Adjust the interface of these functions to reality by changing the
parameter to int.

This fixes a ton of enum-conversion warnings that are generated when
building the kernel with clang.

[mka@chromium.org: also change parameter type of inc/dec/mod_memcg_page_state()]
  Link: http://lkml.kernel.org/r/20170728213442.93823-1-mka@chromium.org
Link: http://lkml.kernel.org/r/20170727211004.34435-1-mka@chromium.org
Signed-off-by: Matthias Kaehlcke <mka@chromium.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Doug Anderson <dianders@chromium.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
zero-colors
Matthias Kaehlcke 2017-09-06 16:22:09 -07:00 committed by Linus Torvalds
parent 67e5ed9699
commit 04fecbf51b
2 changed files with 35 additions and 21 deletions

View File

@ -488,8 +488,9 @@ struct mem_cgroup *lock_page_memcg(struct page *page);
void __unlock_page_memcg(struct mem_cgroup *memcg); void __unlock_page_memcg(struct mem_cgroup *memcg);
void unlock_page_memcg(struct page *page); void unlock_page_memcg(struct page *page);
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx) int idx)
{ {
long val = 0; long val = 0;
int cpu; int cpu;
@ -503,15 +504,17 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
return val; return val;
} }
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __mod_memcg_state(struct mem_cgroup *memcg, static inline void __mod_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx, int val) int idx, int val)
{ {
if (!mem_cgroup_disabled()) if (!mem_cgroup_disabled())
__this_cpu_add(memcg->stat->count[idx], val); __this_cpu_add(memcg->stat->count[idx], val);
} }
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void mod_memcg_state(struct mem_cgroup *memcg, static inline void mod_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx, int val) int idx, int val)
{ {
if (!mem_cgroup_disabled()) if (!mem_cgroup_disabled())
this_cpu_add(memcg->stat->count[idx], val); this_cpu_add(memcg->stat->count[idx], val);
@ -535,14 +538,14 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
* Kernel pages are an exception to this, since they'll never move. * Kernel pages are an exception to this, since they'll never move.
*/ */
static inline void __mod_memcg_page_state(struct page *page, static inline void __mod_memcg_page_state(struct page *page,
enum memcg_stat_item idx, int val) int idx, int val)
{ {
if (page->mem_cgroup) if (page->mem_cgroup)
__mod_memcg_state(page->mem_cgroup, idx, val); __mod_memcg_state(page->mem_cgroup, idx, val);
} }
static inline void mod_memcg_page_state(struct page *page, static inline void mod_memcg_page_state(struct page *page,
enum memcg_stat_item idx, int val) int idx, int val)
{ {
if (page->mem_cgroup) if (page->mem_cgroup)
mod_memcg_state(page->mem_cgroup, idx, val); mod_memcg_state(page->mem_cgroup, idx, val);
@ -632,8 +635,9 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
this_cpu_add(memcg->stat->events[idx], count); this_cpu_add(memcg->stat->events[idx], count);
} }
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void count_memcg_page_event(struct page *page, static inline void count_memcg_page_event(struct page *page,
enum memcg_stat_item idx) int idx)
{ {
if (page->mem_cgroup) if (page->mem_cgroup)
count_memcg_events(page->mem_cgroup, idx, 1); count_memcg_events(page->mem_cgroup, idx, 1);
@ -846,31 +850,31 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
} }
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx) int idx)
{ {
return 0; return 0;
} }
static inline void __mod_memcg_state(struct mem_cgroup *memcg, static inline void __mod_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx, int idx,
int nr) int nr)
{ {
} }
static inline void mod_memcg_state(struct mem_cgroup *memcg, static inline void mod_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx, int idx,
int nr) int nr)
{ {
} }
static inline void __mod_memcg_page_state(struct page *page, static inline void __mod_memcg_page_state(struct page *page,
enum memcg_stat_item idx, int idx,
int nr) int nr)
{ {
} }
static inline void mod_memcg_page_state(struct page *page, static inline void mod_memcg_page_state(struct page *page,
enum memcg_stat_item idx, int idx,
int nr) int nr)
{ {
} }
@ -924,7 +928,7 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
} }
static inline void count_memcg_page_event(struct page *page, static inline void count_memcg_page_event(struct page *page,
enum memcg_stat_item idx) int idx)
{ {
} }
@ -934,26 +938,30 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
} }
#endif /* CONFIG_MEMCG */ #endif /* CONFIG_MEMCG */
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __inc_memcg_state(struct mem_cgroup *memcg, static inline void __inc_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx) int idx)
{ {
__mod_memcg_state(memcg, idx, 1); __mod_memcg_state(memcg, idx, 1);
} }
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __dec_memcg_state(struct mem_cgroup *memcg, static inline void __dec_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx) int idx)
{ {
__mod_memcg_state(memcg, idx, -1); __mod_memcg_state(memcg, idx, -1);
} }
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __inc_memcg_page_state(struct page *page, static inline void __inc_memcg_page_state(struct page *page,
enum memcg_stat_item idx) int idx)
{ {
__mod_memcg_page_state(page, idx, 1); __mod_memcg_page_state(page, idx, 1);
} }
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __dec_memcg_page_state(struct page *page, static inline void __dec_memcg_page_state(struct page *page,
enum memcg_stat_item idx) int idx)
{ {
__mod_memcg_page_state(page, idx, -1); __mod_memcg_page_state(page, idx, -1);
} }
@ -982,26 +990,30 @@ static inline void __dec_lruvec_page_state(struct page *page,
__mod_lruvec_page_state(page, idx, -1); __mod_lruvec_page_state(page, idx, -1);
} }
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void inc_memcg_state(struct mem_cgroup *memcg, static inline void inc_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx) int idx)
{ {
mod_memcg_state(memcg, idx, 1); mod_memcg_state(memcg, idx, 1);
} }
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void dec_memcg_state(struct mem_cgroup *memcg, static inline void dec_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx) int idx)
{ {
mod_memcg_state(memcg, idx, -1); mod_memcg_state(memcg, idx, -1);
} }
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void inc_memcg_page_state(struct page *page, static inline void inc_memcg_page_state(struct page *page,
enum memcg_stat_item idx) int idx)
{ {
mod_memcg_page_state(page, idx, 1); mod_memcg_page_state(page, idx, 1);
} }
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void dec_memcg_page_state(struct page *page, static inline void dec_memcg_page_state(struct page *page,
enum memcg_stat_item idx) int idx)
{ {
mod_memcg_page_state(page, idx, -1); mod_memcg_page_state(page, idx, -1);
} }

View File

@ -550,10 +550,12 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
* value, and reading all cpu value can be performance bottleneck in some * value, and reading all cpu value can be performance bottleneck in some
* common workload, threshold and synchronization as vmstat[] should be * common workload, threshold and synchronization as vmstat[] should be
* implemented. * implemented.
*
* The parameter idx can be of type enum memcg_event_item or vm_event_item.
*/ */
static unsigned long memcg_sum_events(struct mem_cgroup *memcg, static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
enum memcg_event_item event) int event)
{ {
unsigned long val = 0; unsigned long val = 0;
int cpu; int cpu;