1
0
Fork 0

mm/free_pcppages_bulk: update pcp->count inside

Matthew Wilcox found that all callers of free_pcppages_bulk() currently
update pcp->count immediately after so it's natural to do it inside
free_pcppages_bulk().

No functionality or performance change is expected from this patch.

Link: http://lkml.kernel.org/r/20180301062845.26038-2-aaron.lu@intel.com
Signed-off-by: Aaron Lu <aaron.lu@intel.com>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Kemi Wang <kemi.wang@intel.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Aaron Lu 2018-04-05 16:24:06 -07:00 committed by Linus Torvalds
parent bc3106b26c
commit 77ba9062e4
1 changed files with 3 additions and 7 deletions

View File

@ -1112,6 +1112,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
page = list_last_entry(list, struct page, lru);
/* must delete as __free_one_page list manipulates */
list_del(&page->lru);
pcp->count--;
mt = get_pcppage_migratetype(page);
/* MIGRATE_ISOLATE page should not go to pcplists */
@ -2495,10 +2496,8 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
local_irq_save(flags);
batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch);
if (to_drain > 0) {
if (to_drain > 0)
free_pcppages_bulk(zone, to_drain, pcp);
pcp->count -= to_drain;
}
local_irq_restore(flags);
}
#endif
@ -2520,10 +2519,8 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
if (pcp->count) {
if (pcp->count)
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
local_irq_restore(flags);
}
@ -2747,7 +2744,6 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
free_pcppages_bulk(zone, batch, pcp);
pcp->count -= batch;
}
}