1
0
Fork 0

fsverity updates for 5.6

- Optimize fs-verity sequential read performance by implementing
   readahead of Merkle tree pages.  This allows the Merkle tree to be
   read in larger chunks.
 
 - Optimize FS_IOC_ENABLE_VERITY performance in the uncached case by
   implementing readahead of data pages.
 
 - Allocate the hash requests from a mempool in order to eliminate the
   possibility of allocation failures during I/O.
 -----BEGIN PGP SIGNATURE-----
 
 iIoEABYIADIWIQSacvsUNc7UX4ntmEPzXCl4vpKOKwUCXi+OuhQcZWJpZ2dlcnNA
 Z29vZ2xlLmNvbQAKCRDzXCl4vpKOK/ZIAP452KKPs6AGXrClZ2l+5nFbkDLN9Or8
 w277B0BeRnu5ogEApmKnYsmRsduLZRJbni7VCpkJLAYI2kmFCwGkFfe3tAQ=
 =svdR
 -----END PGP SIGNATURE-----

Merge tag 'fsverity-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt

Pull fsverity updates from Eric Biggers:

 - Optimize fs-verity sequential read performance by implementing
   readahead of Merkle tree pages. This allows the Merkle tree to be
   read in larger chunks.

 - Optimize FS_IOC_ENABLE_VERITY performance in the uncached case by
   implementing readahead of data pages.

 - Allocate the hash requests from a mempool in order to eliminate the
   possibility of allocation failures during I/O.

* tag 'fsverity-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt:
  fs-verity: use u64_to_user_ptr()
  fs-verity: use mempool for hash requests
  fs-verity: implement readahead of Merkle tree pages
  fs-verity: implement readahead for FS_IOC_ENABLE_VERITY
alistair/sensors
Linus Torvalds 2020-01-28 15:31:03 -08:00
commit c8994374d9
10 changed files with 274 additions and 68 deletions

View File

@ -342,12 +342,55 @@ static int ext4_get_verity_descriptor(struct inode *inode, void *buf,
return desc_size;
}
static struct page *ext4_read_merkle_tree_page(struct inode *inode,
pgoff_t index)
/*
* Prefetch some pages from the file's Merkle tree.
*
* This is basically a stripped-down version of __do_page_cache_readahead()
* which works on pages past i_size.
*/
static void ext4_merkle_tree_readahead(struct address_space *mapping,
pgoff_t start_index, unsigned long count)
{
LIST_HEAD(pages);
unsigned int nr_pages = 0;
struct page *page;
pgoff_t index;
struct blk_plug plug;
for (index = start_index; index < start_index + count; index++) {
page = xa_load(&mapping->i_pages, index);
if (!page || xa_is_value(page)) {
page = __page_cache_alloc(readahead_gfp_mask(mapping));
if (!page)
break;
page->index = index;
list_add(&page->lru, &pages);
nr_pages++;
}
}
blk_start_plug(&plug);
ext4_mpage_readpages(mapping, &pages, NULL, nr_pages, true);
blk_finish_plug(&plug);
}
static struct page *ext4_read_merkle_tree_page(struct inode *inode,
pgoff_t index,
unsigned long num_ra_pages)
{
struct page *page;
index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT;
return read_mapping_page(inode->i_mapping, index, NULL);
page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
if (!page || !PageUptodate(page)) {
if (page)
put_page(page);
else if (num_ra_pages > 1)
ext4_merkle_tree_readahead(inode->i_mapping, index,
num_ra_pages);
page = read_mapping_page(inode->i_mapping, index, NULL);
}
return page;
}
static int ext4_write_merkle_tree_block(struct inode *inode, const void *buf,

View File

@ -1881,7 +1881,7 @@ out:
* use ->readpage() or do the necessary surgery to decouple ->readpages()
* from read-ahead.
*/
static int f2fs_mpage_readpages(struct address_space *mapping,
int f2fs_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
unsigned nr_pages, bool is_readahead)
{

View File

@ -3229,6 +3229,9 @@ int f2fs_reserve_new_block(struct dnode_of_data *dn);
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
int f2fs_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
unsigned nr_pages, bool is_readahead);
struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
int op_flags, bool for_write);
struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);

View File

@ -222,12 +222,55 @@ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
return size;
}
static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
pgoff_t index)
/*
* Prefetch some pages from the file's Merkle tree.
*
* This is basically a stripped-down version of __do_page_cache_readahead()
* which works on pages past i_size.
*/
static void f2fs_merkle_tree_readahead(struct address_space *mapping,
pgoff_t start_index, unsigned long count)
{
LIST_HEAD(pages);
unsigned int nr_pages = 0;
struct page *page;
pgoff_t index;
struct blk_plug plug;
for (index = start_index; index < start_index + count; index++) {
page = xa_load(&mapping->i_pages, index);
if (!page || xa_is_value(page)) {
page = __page_cache_alloc(readahead_gfp_mask(mapping));
if (!page)
break;
page->index = index;
list_add(&page->lru, &pages);
nr_pages++;
}
}
blk_start_plug(&plug);
f2fs_mpage_readpages(mapping, &pages, NULL, nr_pages, true);
blk_finish_plug(&plug);
}
static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
pgoff_t index,
unsigned long num_ra_pages)
{
struct page *page;
index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT;
return read_mapping_page(inode->i_mapping, index, NULL);
page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
if (!page || !PageUptodate(page)) {
if (page)
put_page(page);
else if (num_ra_pages > 1)
f2fs_merkle_tree_readahead(inode->i_mapping, index,
num_ra_pages);
page = read_mapping_page(inode->i_mapping, index, NULL);
}
return page;
}
static int f2fs_write_merkle_tree_block(struct inode *inode, const void *buf,

View File

@ -8,18 +8,48 @@
#include "fsverity_private.h"
#include <crypto/hash.h>
#include <linux/backing-dev.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
static int build_merkle_tree_level(struct inode *inode, unsigned int level,
/*
* Read a file data page for Merkle tree construction. Do aggressive readahead,
* since we're sequentially reading the entire file.
*/
static struct page *read_file_data_page(struct file *filp, pgoff_t index,
struct file_ra_state *ra,
unsigned long remaining_pages)
{
struct page *page;
page = find_get_page_flags(filp->f_mapping, index, FGP_ACCESSED);
if (!page || !PageUptodate(page)) {
if (page)
put_page(page);
else
page_cache_sync_readahead(filp->f_mapping, ra, filp,
index, remaining_pages);
page = read_mapping_page(filp->f_mapping, index, NULL);
if (IS_ERR(page))
return page;
}
if (PageReadahead(page))
page_cache_async_readahead(filp->f_mapping, ra, filp, page,
index, remaining_pages);
return page;
}
static int build_merkle_tree_level(struct file *filp, unsigned int level,
u64 num_blocks_to_hash,
const struct merkle_tree_params *params,
u8 *pending_hashes,
struct ahash_request *req)
{
struct inode *inode = file_inode(filp);
const struct fsverity_operations *vops = inode->i_sb->s_vop;
struct file_ra_state ra = { 0 };
unsigned int pending_size = 0;
u64 dst_block_num;
u64 i;
@ -36,6 +66,8 @@ static int build_merkle_tree_level(struct inode *inode, unsigned int level,
dst_block_num = 0; /* unused */
}
file_ra_state_init(&ra, filp->f_mapping);
for (i = 0; i < num_blocks_to_hash; i++) {
struct page *src_page;
@ -45,7 +77,8 @@ static int build_merkle_tree_level(struct inode *inode, unsigned int level,
if (level == 0) {
/* Leaf: hashing a data block */
src_page = read_mapping_page(inode->i_mapping, i, NULL);
src_page = read_file_data_page(filp, i, &ra,
num_blocks_to_hash - i);
if (IS_ERR(src_page)) {
err = PTR_ERR(src_page);
fsverity_err(inode,
@ -54,9 +87,14 @@ static int build_merkle_tree_level(struct inode *inode, unsigned int level,
return err;
}
} else {
unsigned long num_ra_pages =
min_t(unsigned long, num_blocks_to_hash - i,
inode->i_sb->s_bdi->io_pages);
/* Non-leaf: hashing hash block from level below */
src_page = vops->read_merkle_tree_page(inode,
params->level_start[level - 1] + i);
params->level_start[level - 1] + i,
num_ra_pages);
if (IS_ERR(src_page)) {
err = PTR_ERR(src_page);
fsverity_err(inode,
@ -103,17 +141,18 @@ static int build_merkle_tree_level(struct inode *inode, unsigned int level,
}
/*
* Build the Merkle tree for the given inode using the given parameters, and
* Build the Merkle tree for the given file using the given parameters, and
* return the root hash in @root_hash.
*
* The tree is written to a filesystem-specific location as determined by the
* ->write_merkle_tree_block() method. However, the blocks that comprise the
* tree are the same for all filesystems.
*/
static int build_merkle_tree(struct inode *inode,
static int build_merkle_tree(struct file *filp,
const struct merkle_tree_params *params,
u8 *root_hash)
{
struct inode *inode = file_inode(filp);
u8 *pending_hashes;
struct ahash_request *req;
u64 blocks;
@ -126,9 +165,11 @@ static int build_merkle_tree(struct inode *inode,
return 0;
}
/* This allocation never fails, since it's mempool-backed. */
req = fsverity_alloc_hash_request(params->hash_alg, GFP_KERNEL);
pending_hashes = kmalloc(params->block_size, GFP_KERNEL);
req = ahash_request_alloc(params->hash_alg->tfm, GFP_KERNEL);
if (!pending_hashes || !req)
if (!pending_hashes)
goto out;
/*
@ -139,7 +180,7 @@ static int build_merkle_tree(struct inode *inode,
blocks = (inode->i_size + params->block_size - 1) >>
params->log_blocksize;
for (level = 0; level <= params->num_levels; level++) {
err = build_merkle_tree_level(inode, level, blocks, params,
err = build_merkle_tree_level(filp, level, blocks, params,
pending_hashes, req);
if (err)
goto out;
@ -150,7 +191,7 @@ static int build_merkle_tree(struct inode *inode,
err = 0;
out:
kfree(pending_hashes);
ahash_request_free(req);
fsverity_free_hash_request(params->hash_alg, req);
return err;
}
@ -175,8 +216,7 @@ static int enable_verity(struct file *filp,
/* Get the salt if the user provided one */
if (arg->salt_size &&
copy_from_user(desc->salt,
(const u8 __user *)(uintptr_t)arg->salt_ptr,
copy_from_user(desc->salt, u64_to_user_ptr(arg->salt_ptr),
arg->salt_size)) {
err = -EFAULT;
goto out;
@ -185,8 +225,7 @@ static int enable_verity(struct file *filp,
/* Get the signature if the user provided one */
if (arg->sig_size &&
copy_from_user(desc->signature,
(const u8 __user *)(uintptr_t)arg->sig_ptr,
copy_from_user(desc->signature, u64_to_user_ptr(arg->sig_ptr),
arg->sig_size)) {
err = -EFAULT;
goto out;
@ -227,7 +266,7 @@ static int enable_verity(struct file *filp,
*/
pr_debug("Building Merkle tree...\n");
BUILD_BUG_ON(sizeof(desc->root_hash) < FS_VERITY_MAX_DIGEST_SIZE);
err = build_merkle_tree(inode, &params, desc->root_hash);
err = build_merkle_tree(filp, &params, desc->root_hash);
if (err) {
fsverity_err(inode, "Error %d building Merkle tree", err);
goto rollback;

View File

@ -16,6 +16,7 @@
#include <crypto/sha.h>
#include <linux/fsverity.h>
#include <linux/mempool.h>
struct ahash_request;
@ -37,11 +38,12 @@ struct fsverity_hash_alg {
const char *name; /* crypto API name, e.g. sha256 */
unsigned int digest_size; /* digest size in bytes, e.g. 32 for SHA-256 */
unsigned int block_size; /* block size in bytes, e.g. 64 for SHA-256 */
mempool_t req_pool; /* mempool with a preallocated hash request */
};
/* Merkle tree parameters: hash algorithm, initial hash state, and topology */
struct merkle_tree_params {
const struct fsverity_hash_alg *hash_alg; /* the hash algorithm */
struct fsverity_hash_alg *hash_alg; /* the hash algorithm */
const u8 *hashstate; /* initial hash state or NULL */
unsigned int digest_size; /* same as hash_alg->digest_size */
unsigned int block_size; /* size of data and tree blocks */
@ -50,6 +52,7 @@ struct merkle_tree_params {
unsigned int log_arity; /* log2(hashes_per_block) */
unsigned int num_levels; /* number of levels in Merkle tree */
u64 tree_size; /* Merkle tree size in bytes */
unsigned long level0_blocks; /* number of blocks in tree level 0 */
/*
* Starting block index for each tree level, ordered from leaf level (0)
@ -114,14 +117,18 @@ struct fsverity_signed_digest {
extern struct fsverity_hash_alg fsverity_hash_algs[];
const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
unsigned int num);
const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg,
struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
unsigned int num);
struct ahash_request *fsverity_alloc_hash_request(struct fsverity_hash_alg *alg,
gfp_t gfp_flags);
void fsverity_free_hash_request(struct fsverity_hash_alg *alg,
struct ahash_request *req);
const u8 *fsverity_prepare_hash_state(struct fsverity_hash_alg *alg,
const u8 *salt, size_t salt_size);
int fsverity_hash_page(const struct merkle_tree_params *params,
const struct inode *inode,
struct ahash_request *req, struct page *page, u8 *out);
int fsverity_hash_buffer(const struct fsverity_hash_alg *alg,
int fsverity_hash_buffer(struct fsverity_hash_alg *alg,
const void *data, size_t size, u8 *out);
void __init fsverity_check_hash_algs(void);

View File

@ -24,6 +24,8 @@ struct fsverity_hash_alg fsverity_hash_algs[] = {
},
};
static DEFINE_MUTEX(fsverity_hash_alg_init_mutex);
/**
* fsverity_get_hash_alg() - validate and prepare a hash algorithm
* @inode: optional inode for logging purposes
@ -36,8 +38,8 @@ struct fsverity_hash_alg fsverity_hash_algs[] = {
*
* Return: pointer to the hash alg on success, else an ERR_PTR()
*/
const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
unsigned int num)
struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
unsigned int num)
{
struct fsverity_hash_alg *alg;
struct crypto_ahash *tfm;
@ -50,10 +52,15 @@ const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
}
alg = &fsverity_hash_algs[num];
/* pairs with cmpxchg() below */
tfm = READ_ONCE(alg->tfm);
if (likely(tfm != NULL))
/* pairs with smp_store_release() below */
if (likely(smp_load_acquire(&alg->tfm) != NULL))
return alg;
mutex_lock(&fsverity_hash_alg_init_mutex);
if (alg->tfm != NULL)
goto out_unlock;
/*
* Using the shash API would make things a bit simpler, but the ahash
* API is preferable as it allows the use of crypto accelerators.
@ -64,12 +71,14 @@ const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
fsverity_warn(inode,
"Missing crypto API support for hash algorithm \"%s\"",
alg->name);
return ERR_PTR(-ENOPKG);
alg = ERR_PTR(-ENOPKG);
goto out_unlock;
}
fsverity_err(inode,
"Error allocating hash algorithm \"%s\": %ld",
alg->name, PTR_ERR(tfm));
return ERR_CAST(tfm);
alg = ERR_CAST(tfm);
goto out_unlock;
}
err = -EINVAL;
@ -78,18 +87,61 @@ const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
if (WARN_ON(alg->block_size != crypto_ahash_blocksize(tfm)))
goto err_free_tfm;
err = mempool_init_kmalloc_pool(&alg->req_pool, 1,
sizeof(struct ahash_request) +
crypto_ahash_reqsize(tfm));
if (err)
goto err_free_tfm;
pr_info("%s using implementation \"%s\"\n",
alg->name, crypto_ahash_driver_name(tfm));
/* pairs with READ_ONCE() above */
if (cmpxchg(&alg->tfm, NULL, tfm) != NULL)
crypto_free_ahash(tfm);
return alg;
/* pairs with smp_load_acquire() above */
smp_store_release(&alg->tfm, tfm);
goto out_unlock;
err_free_tfm:
crypto_free_ahash(tfm);
return ERR_PTR(err);
alg = ERR_PTR(err);
out_unlock:
mutex_unlock(&fsverity_hash_alg_init_mutex);
return alg;
}
/**
* fsverity_alloc_hash_request() - allocate a hash request object
* @alg: the hash algorithm for which to allocate the request
* @gfp_flags: memory allocation flags
*
* This is mempool-backed, so this never fails if __GFP_DIRECT_RECLAIM is set in
* @gfp_flags. However, in that case this might need to wait for all
* previously-allocated requests to be freed. So to avoid deadlocks, callers
* must never need multiple requests at a time to make forward progress.
*
* Return: the request object on success; NULL on failure (but see above)
*/
struct ahash_request *fsverity_alloc_hash_request(struct fsverity_hash_alg *alg,
gfp_t gfp_flags)
{
struct ahash_request *req = mempool_alloc(&alg->req_pool, gfp_flags);
if (req)
ahash_request_set_tfm(req, alg->tfm);
return req;
}
/**
* fsverity_free_hash_request() - free a hash request object
* @alg: the hash algorithm
* @req: the hash request object to free
*/
void fsverity_free_hash_request(struct fsverity_hash_alg *alg,
struct ahash_request *req)
{
if (req) {
ahash_request_zero(req);
mempool_free(req, &alg->req_pool);
}
}
/**
@ -101,7 +153,7 @@ err_free_tfm:
* Return: NULL if the salt is empty, otherwise the kmalloc()'ed precomputed
* initial hash state on success or an ERR_PTR() on failure.
*/
const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg,
const u8 *fsverity_prepare_hash_state(struct fsverity_hash_alg *alg,
const u8 *salt, size_t salt_size)
{
u8 *hashstate = NULL;
@ -119,11 +171,8 @@ const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg,
if (!hashstate)
return ERR_PTR(-ENOMEM);
req = ahash_request_alloc(alg->tfm, GFP_KERNEL);
if (!req) {
err = -ENOMEM;
goto err_free;
}
/* This allocation never fails, since it's mempool-backed. */
req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
/*
* Zero-pad the salt to the next multiple of the input size of the hash
@ -158,7 +207,7 @@ const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg,
if (err)
goto err_free;
out:
ahash_request_free(req);
fsverity_free_hash_request(alg, req);
kfree(padded_salt);
return hashstate;
@ -229,7 +278,7 @@ int fsverity_hash_page(const struct merkle_tree_params *params,
*
* Return: 0 on success, -errno on failure
*/
int fsverity_hash_buffer(const struct fsverity_hash_alg *alg,
int fsverity_hash_buffer(struct fsverity_hash_alg *alg,
const void *data, size_t size, u8 *out)
{
struct ahash_request *req;
@ -237,9 +286,8 @@ int fsverity_hash_buffer(const struct fsverity_hash_alg *alg,
DECLARE_CRYPTO_WAIT(wait);
int err;
req = ahash_request_alloc(alg->tfm, GFP_KERNEL);
if (!req)
return -ENOMEM;
/* This allocation never fails, since it's mempool-backed. */
req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
sg_init_one(&sg, data, size);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
@ -249,7 +297,7 @@ int fsverity_hash_buffer(const struct fsverity_hash_alg *alg,
err = crypto_wait_req(crypto_ahash_digest(req), &wait);
ahash_request_free(req);
fsverity_free_hash_request(alg, req);
return err;
}

View File

@ -31,7 +31,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
unsigned int log_blocksize,
const u8 *salt, size_t salt_size)
{
const struct fsverity_hash_alg *hash_alg;
struct fsverity_hash_alg *hash_alg;
int err;
u64 blocks;
u64 offset;
@ -102,6 +102,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
/* temporarily using level_start[] to store blocks in level */
params->level_start[params->num_levels++] = blocks;
}
params->level0_blocks = params->level_start[0];
/* Compute the starting block of each level */
offset = 0;
@ -126,7 +127,7 @@ out_err:
* Compute the file measurement by hashing the fsverity_descriptor excluding the
* signature and with the sig_size field set to 0.
*/
static int compute_file_measurement(const struct fsverity_hash_alg *hash_alg,
static int compute_file_measurement(struct fsverity_hash_alg *hash_alg,
struct fsverity_descriptor *desc,
u8 *measurement)
{

View File

@ -84,7 +84,8 @@ static inline int cmp_hashes(const struct fsverity_info *vi,
* Return: true if the page is valid, else false.
*/
static bool verify_page(struct inode *inode, const struct fsverity_info *vi,
struct ahash_request *req, struct page *data_page)
struct ahash_request *req, struct page *data_page,
unsigned long level0_ra_pages)
{
const struct merkle_tree_params *params = &vi->tree_params;
const unsigned int hsize = params->digest_size;
@ -117,8 +118,8 @@ static bool verify_page(struct inode *inode, const struct fsverity_info *vi,
pr_debug_ratelimited("Level %d: hindex=%lu, hoffset=%u\n",
level, hindex, hoffset);
hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode,
hindex);
hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode, hindex,
level == 0 ? level0_ra_pages : 0);
if (IS_ERR(hpage)) {
err = PTR_ERR(hpage);
fsverity_err(inode,
@ -191,13 +192,12 @@ bool fsverity_verify_page(struct page *page)
struct ahash_request *req;
bool valid;
req = ahash_request_alloc(vi->tree_params.hash_alg->tfm, GFP_NOFS);
if (unlikely(!req))
return false;
/* This allocation never fails, since it's mempool-backed. */
req = fsverity_alloc_hash_request(vi->tree_params.hash_alg, GFP_NOFS);
valid = verify_page(inode, vi, req, page);
valid = verify_page(inode, vi, req, page, 0);
ahash_request_free(req);
fsverity_free_hash_request(vi->tree_params.hash_alg, req);
return valid;
}
@ -222,25 +222,42 @@ void fsverity_verify_bio(struct bio *bio)
{
struct inode *inode = bio_first_page_all(bio)->mapping->host;
const struct fsverity_info *vi = inode->i_verity_info;
const struct merkle_tree_params *params = &vi->tree_params;
struct ahash_request *req;
struct bio_vec *bv;
struct bvec_iter_all iter_all;
unsigned long max_ra_pages = 0;
req = ahash_request_alloc(vi->tree_params.hash_alg->tfm, GFP_NOFS);
if (unlikely(!req)) {
/* This allocation never fails, since it's mempool-backed. */
req = fsverity_alloc_hash_request(params->hash_alg, GFP_NOFS);
if (bio->bi_opf & REQ_RAHEAD) {
/*
* If this bio is for data readahead, then we also do readahead
* of the first (largest) level of the Merkle tree. Namely,
* when a Merkle tree page is read, we also try to piggy-back on
* some additional pages -- up to 1/4 the number of data pages.
*
* This improves sequential read performance, as it greatly
* reduces the number of I/O requests made to the Merkle tree.
*/
bio_for_each_segment_all(bv, bio, iter_all)
SetPageError(bv->bv_page);
return;
max_ra_pages++;
max_ra_pages /= 4;
}
bio_for_each_segment_all(bv, bio, iter_all) {
struct page *page = bv->bv_page;
unsigned long level0_index = page->index >> params->log_arity;
unsigned long level0_ra_pages =
min(max_ra_pages, params->level0_blocks - level0_index);
if (!PageError(page) && !verify_page(inode, vi, req, page))
if (!PageError(page) &&
!verify_page(inode, vi, req, page, level0_ra_pages))
SetPageError(page);
}
ahash_request_free(req);
fsverity_free_hash_request(params->hash_alg, req);
}
EXPORT_SYMBOL_GPL(fsverity_verify_bio);
#endif /* CONFIG_BLOCK */

View File

@ -77,6 +77,10 @@ struct fsverity_operations {
*
* @inode: the inode
* @index: 0-based index of the page within the Merkle tree
* @num_ra_pages: The number of Merkle tree pages that should be
* prefetched starting at @index if the page at @index
* isn't already cached. Implementations may ignore this
* argument; it's only a performance optimization.
*
* This can be called at any time on an open verity file, as well as
* between ->begin_enable_verity() and ->end_enable_verity(). It may be
@ -87,7 +91,8 @@ struct fsverity_operations {
* Return: the page on success, ERR_PTR() on failure
*/
struct page *(*read_merkle_tree_page)(struct inode *inode,
pgoff_t index);
pgoff_t index,
unsigned long num_ra_pages);
/**
* Write a Merkle tree block to the given inode.