slub: min order when debug_guardpage_minorder > 0
Disable slub debug facilities and allocate slabs at minimal order when debug_guardpage_minorder > 0 to increase probability to catch random memory corruption by cpu exception. Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Stanislaw Gruszka <sgruszka@redhat.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>hifive-unleashed-5.1
parent
c6968e73b9
commit
fc8d8620d3
|
@ -3654,6 +3654,9 @@ void __init kmem_cache_init(void)
|
||||||
struct kmem_cache *temp_kmem_cache_node;
|
struct kmem_cache *temp_kmem_cache_node;
|
||||||
unsigned long kmalloc_size;
|
unsigned long kmalloc_size;
|
||||||
|
|
||||||
|
if (debug_guardpage_minorder())
|
||||||
|
slub_max_order = 0;
|
||||||
|
|
||||||
kmem_size = offsetof(struct kmem_cache, node) +
|
kmem_size = offsetof(struct kmem_cache, node) +
|
||||||
nr_node_ids * sizeof(struct kmem_cache_node *);
|
nr_node_ids * sizeof(struct kmem_cache_node *);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue