1
0
Fork 0

[PATCH] mm: use __GFP_NOMEMALLOC

Use the new __GFP_NOMEMALLOC to simplify the previous handling of
PF_MEMALLOC.

Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
hifive-unleashed-5.1
Nick Piggin 2005-05-01 08:58:37 -07:00 committed by Linus Torvalds
parent 20a77776c2
commit bd53b714d3
2 changed files with 13 additions and 33 deletions

View File

@ -331,25 +331,19 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
struct bio *bio; struct bio *bio;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
int gfp_mask = GFP_NOIO | __GFP_HIGHMEM; int gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
unsigned long flags = current->flags;
unsigned int i; unsigned int i;
/* /*
* Tell VM to act less aggressively and fail earlier. * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and
* This is not necessary but increases throughput. * to fail earlier. This is not necessary but increases throughput.
* FIXME: Is this really intelligent? * FIXME: Is this really intelligent?
*/ */
current->flags &= ~PF_MEMALLOC;
if (base_bio) if (base_bio)
bio = bio_clone(base_bio, GFP_NOIO); bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC);
else else
bio = bio_alloc(GFP_NOIO, nr_iovecs); bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs);
if (!bio) { if (!bio)
if (flags & PF_MEMALLOC)
current->flags |= PF_MEMALLOC;
return NULL; return NULL;
}
/* if the last bio was not complete, continue where that one ended */ /* if the last bio was not complete, continue where that one ended */
bio->bi_idx = *bio_vec_idx; bio->bi_idx = *bio_vec_idx;
@ -386,9 +380,6 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
size -= bv->bv_len; size -= bv->bv_len;
} }
if (flags & PF_MEMALLOC)
current->flags |= PF_MEMALLOC;
if (!bio->bi_size) { if (!bio->bi_size) {
bio_put(bio); bio_put(bio);
return NULL; return NULL;

View File

@ -143,7 +143,6 @@ void __delete_from_swap_cache(struct page *page)
int add_to_swap(struct page * page) int add_to_swap(struct page * page)
{ {
swp_entry_t entry; swp_entry_t entry;
int pf_flags;
int err; int err;
if (!PageLocked(page)) if (!PageLocked(page))
@ -154,29 +153,19 @@ int add_to_swap(struct page * page)
if (!entry.val) if (!entry.val)
return 0; return 0;
/* Radix-tree node allocations are performing /*
* GFP_ATOMIC allocations under PF_MEMALLOC. * Radix-tree node allocations from PF_MEMALLOC contexts could
* They can completely exhaust the page allocator. * completely exhaust the page allocator. __GFP_NOMEMALLOC
* stops emergency reserves from being allocated.
* *
* So PF_MEMALLOC is dropped here. This causes the slab * TODO: this could cause a theoretical memory reclaim
* allocations to fail earlier, so radix-tree nodes will * deadlock in the swap out path.
* then be allocated from the mempool reserves.
*
* We're still using __GFP_HIGH for radix-tree node
* allocations, so some of the emergency pools are available,
* just not all of them.
*/ */
pf_flags = current->flags;
current->flags &= ~PF_MEMALLOC;
/* /*
* Add it to the swap cache and mark it dirty * Add it to the swap cache and mark it dirty
*/ */
err = __add_to_swap_cache(page, entry, GFP_ATOMIC|__GFP_NOWARN); err = __add_to_swap_cache(page, entry,
GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN);
if (pf_flags & PF_MEMALLOC)
current->flags |= PF_MEMALLOC;
switch (err) { switch (err) {
case 0: /* Success */ case 0: /* Success */