1
0
Fork 0

mm: compaction: add trace events for memory compaction activity

In preparation for a patches promoting the use of memory compaction over
lumpy reclaim, this patch adds trace points for memory compaction
activity.  Using them, we can monitor the scanning activity of the
migration and free page scanners as well as the number and success rates
of pages passed to page migration.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Mel Gorman 2011-01-13 15:45:54 -08:00 committed by Linus Torvalds
parent 2d90508f63
commit b7aba6984d
2 changed files with 87 additions and 1 deletions

View File

@ -0,0 +1,74 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM compaction
#if !defined(_TRACE_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_COMPACTION_H
#include <linux/types.h>
#include <linux/tracepoint.h>
#include "gfpflags.h"
DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
TP_PROTO(unsigned long nr_scanned,
unsigned long nr_taken),
TP_ARGS(nr_scanned, nr_taken),
TP_STRUCT__entry(
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_taken)
),
TP_fast_assign(
__entry->nr_scanned = nr_scanned;
__entry->nr_taken = nr_taken;
),
TP_printk("nr_scanned=%lu nr_taken=%lu",
__entry->nr_scanned,
__entry->nr_taken)
);
DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages,
TP_PROTO(unsigned long nr_scanned,
unsigned long nr_taken),
TP_ARGS(nr_scanned, nr_taken)
);
DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
TP_PROTO(unsigned long nr_scanned,
unsigned long nr_taken),
TP_ARGS(nr_scanned, nr_taken)
);
TRACE_EVENT(mm_compaction_migratepages,
TP_PROTO(unsigned long nr_migrated,
unsigned long nr_failed),
TP_ARGS(nr_migrated, nr_failed),
TP_STRUCT__entry(
__field(unsigned long, nr_migrated)
__field(unsigned long, nr_failed)
),
TP_fast_assign(
__entry->nr_migrated = nr_migrated;
__entry->nr_failed = nr_failed;
),
TP_printk("nr_migrated=%lu nr_failed=%lu",
__entry->nr_migrated,
__entry->nr_failed)
);
#endif /* _TRACE_COMPACTION_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -16,6 +16,9 @@
#include <linux/sysfs.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>
/*
* compact_control is used to track pages being migrated and the free pages
* they are being migrated to during memory compaction. The free_pfn starts
@ -60,7 +63,7 @@ static unsigned long isolate_freepages_block(struct zone *zone,
struct list_head *freelist)
{
unsigned long zone_end_pfn, end_pfn;
int total_isolated = 0;
int nr_scanned = 0, total_isolated = 0;
struct page *cursor;
/* Get the last PFN we should scan for free pages at */
@ -81,6 +84,7 @@ static unsigned long isolate_freepages_block(struct zone *zone,
if (!pfn_valid_within(blockpfn))
continue;
nr_scanned++;
if (!PageBuddy(page))
continue;
@ -100,6 +104,7 @@ static unsigned long isolate_freepages_block(struct zone *zone,
}
}
trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
return total_isolated;
}
@ -234,6 +239,7 @@ static unsigned long isolate_migratepages(struct zone *zone,
struct compact_control *cc)
{
unsigned long low_pfn, end_pfn;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = &cc->migratepages;
/* Do not scan outside zone boundaries */
@ -266,6 +272,7 @@ static unsigned long isolate_migratepages(struct zone *zone,
struct page *page;
if (!pfn_valid_within(low_pfn))
continue;
nr_scanned++;
/* Get the page and skip if free */
page = pfn_to_page(low_pfn);
@ -280,6 +287,7 @@ static unsigned long isolate_migratepages(struct zone *zone,
del_page_from_lru_list(zone, page, page_lru(page));
list_add(&page->lru, migratelist);
cc->nr_migratepages++;
nr_isolated++;
/* Avoid isolating too much */
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
@ -291,6 +299,8 @@ static unsigned long isolate_migratepages(struct zone *zone,
spin_unlock_irq(&zone->lru_lock);
cc->migrate_pfn = low_pfn;
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
return cc->nr_migratepages;
}
@ -401,6 +411,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
if (nr_remaining)
count_vm_events(COMPACTPAGEFAILED, nr_remaining);
trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
nr_remaining);
/* Release LRU pages not migrated */
if (!list_empty(&cc->migratepages)) {