ext4 crypto: fix memory leaks in ext4_encrypted_zeroout

ext4_encrypted_zeroout() could end up leaking a bio and bounce page.
Fortunately it's not used much.  While we're fixing things up,
refactor out common code into the static function alloc_bounce_page()
and fix up error handling if mempool_alloc() fails.

Signed-off-by: Theodore Ts'o <tytso@mit.edu>
This commit is contained in:
Theodore Ts'o 2015-05-31 13:34:24 -04:00
parent c936e1ec28
commit 95ea68b4c7

View file

@ -314,6 +314,26 @@ static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
return 0; return 0;
} }
static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
{
struct page *ciphertext_page = alloc_page(GFP_NOFS);
if (!ciphertext_page) {
/* This is a potential bottleneck, but at least we'll have
* forward progress. */
ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
GFP_NOFS);
if (ciphertext_page == NULL)
return ERR_PTR(-ENOMEM);
ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
} else {
ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
}
ctx->flags |= EXT4_WRITE_PATH_FL;
ctx->w.bounce_page = ciphertext_page;
return ciphertext_page;
}
/** /**
* ext4_encrypt() - Encrypts a page * ext4_encrypt() - Encrypts a page
* @inode: The inode for which the encryption should take place * @inode: The inode for which the encryption should take place
@ -343,28 +363,17 @@ struct page *ext4_encrypt(struct inode *inode,
return (struct page *) ctx; return (struct page *) ctx;
/* The encryption operation will require a bounce page. */ /* The encryption operation will require a bounce page. */
ciphertext_page = alloc_page(GFP_NOFS); ciphertext_page = alloc_bounce_page(ctx);
if (!ciphertext_page) { if (IS_ERR(ciphertext_page))
/* This is a potential bottleneck, but at least we'll have goto errout;
* forward progress. */
ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
GFP_NOFS);
if (WARN_ON_ONCE(!ciphertext_page)) {
ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
GFP_NOFS | __GFP_WAIT);
}
ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
} else {
ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
}
ctx->flags |= EXT4_WRITE_PATH_FL;
ctx->w.bounce_page = ciphertext_page;
ctx->w.control_page = plaintext_page; ctx->w.control_page = plaintext_page;
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index, err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
plaintext_page, ciphertext_page); plaintext_page, ciphertext_page);
if (err) { if (err) {
ciphertext_page = ERR_PTR(err);
errout:
ext4_release_crypto_ctx(ctx); ext4_release_crypto_ctx(ctx);
return ERR_PTR(err); return ciphertext_page;
} }
SetPagePrivate(ciphertext_page); SetPagePrivate(ciphertext_page);
set_page_private(ciphertext_page, (unsigned long)ctx); set_page_private(ciphertext_page, (unsigned long)ctx);
@ -424,21 +433,11 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
ciphertext_page = alloc_page(GFP_NOFS); ciphertext_page = alloc_bounce_page(ctx);
if (!ciphertext_page) { if (IS_ERR(ciphertext_page)) {
/* This is a potential bottleneck, but at least we'll have err = PTR_ERR(ciphertext_page);
* forward progress. */ goto errout;
ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
GFP_NOFS);
if (WARN_ON_ONCE(!ciphertext_page)) {
ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
GFP_NOFS | __GFP_WAIT);
}
ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
} else {
ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
} }
ctx->w.bounce_page = ciphertext_page;
while (len--) { while (len--) {
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk, err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
@ -460,6 +459,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
goto errout; goto errout;
} }
err = submit_bio_wait(WRITE, bio); err = submit_bio_wait(WRITE, bio);
bio_put(bio);
if (err) if (err)
goto errout; goto errout;
} }