1
0
Fork 0

These changes contains a fix for overlayfs interacting with some

(badly behaved) dentry code in various file systems.  These have been
 reviewed by Al and the respective file system mtinainers and are going
 through the ext4 tree for convenience.
 
 This also has a few ext4 encryption bug fixes that were discovered in
 Android testing (yes, we will need to get these sync'ed up with the
 fs/crypto code; I'll take care of that).  It also has some bug fixes
 and a change to ignore the legacy quota options to allow for xfstests
 regression testing of ext4's internal quota feature and to be more
 consistent with how xfs handles this case.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQEcBAABCAAGBQJXBn4aAAoJEPL5WVaVDYGjHWgH/2wXnlQnC2ndJhblBWtPzprz
 OQW4dawdnhxqbTEGUqWe942tZivSb/liu/lF+urCGbWsbgz9jNOCmEAg7JPwlccY
 mjzwDvtVq5U4d2rP+JDWXLy/Gi8XgUclhbQDWFVIIIea6fS7IuFWqoVBR+HPMhra
 9tEygpiy5lNtJA/hqq3/z9x0AywAjwrYR491CuWreo2Uu1aeKg0YZsiDsuAcGioN
 Waa2TgbC/ZZyJuJcPBP8If+VOFAa0ea3F+C/o7Tb9bOqwuz0qSTcaMRgt6eQ2KUt
 P4b9Ecp1XLjJTC7IYOknUOScY3lCyREx/Xya9oGZfFNTSHzbOlLBoplCr3aUpYQ=
 =/HHR
 -----END PGP SIGNATURE-----

Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 bugfixes from Ted Ts'o:
 "These changes contains a fix for overlayfs interacting with some
  (badly behaved) dentry code in various file systems.  These have been
  reviewed by Al and the respective file system mtinainers and are going
  through the ext4 tree for convenience.

  This also has a few ext4 encryption bug fixes that were discovered in
  Android testing (yes, we will need to get these sync'ed up with the
  fs/crypto code; I'll take care of that).  It also has some bug fixes
  and a change to ignore the legacy quota options to allow for xfstests
  regression testing of ext4's internal quota feature and to be more
  consistent with how xfs handles this case"

* tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: ignore quota mount options if the quota feature is enabled
  ext4 crypto: fix some error handling
  ext4: avoid calling dquot_get_next_id() if quota is not enabled
  ext4: retry block allocation for failed DIO and DAX writes
  ext4: add lockdep annotations for i_data_sem
  ext4: allow readdir()'s of large empty directories to be interrupted
  btrfs: fix crash/invalid memory access on fsync when using overlayfs
  ext4 crypto: use dget_parent() in ext4_d_revalidate()
  ext4: use file_dentry()
  ext4: use dget_parent() in ext4_file_open()
  nfs: use file_dentry()
  fs: add file_dentry()
  ext4 crypto: don't let data integrity writebacks fail with ENOMEM
  ext4: check if in-inode xattr is corrupted in ext4_expand_extra_isize_ea()
hifive-unleashed-5.1
Linus Torvalds 2016-04-07 17:22:20 -07:00
commit 93061f390f
19 changed files with 264 additions and 86 deletions

View File

@ -1905,7 +1905,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
*/
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
struct dentry *dentry = file->f_path.dentry;
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;

View File

@ -1667,7 +1667,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
DCACHE_OP_REVALIDATE |
DCACHE_OP_WEAK_REVALIDATE |
DCACHE_OP_DELETE |
DCACHE_OP_SELECT_INODE));
DCACHE_OP_SELECT_INODE |
DCACHE_OP_REAL));
dentry->d_op = op;
if (!op)
return;
@ -1685,6 +1686,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
dentry->d_flags |= DCACHE_OP_PRUNE;
if (op->d_select_inode)
dentry->d_flags |= DCACHE_OP_SELECT_INODE;
if (op->d_real)
dentry->d_flags |= DCACHE_OP_REAL;
}
EXPORT_SYMBOL(d_set_d_op);

View File

@ -91,7 +91,8 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
* Return: An allocated and initialized encryption context on success; error
* value or NULL otherwise.
*/
struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
gfp_t gfp_flags)
{
struct ext4_crypto_ctx *ctx = NULL;
int res = 0;
@ -118,7 +119,7 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
list_del(&ctx->free_list);
spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
if (!ctx) {
ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags);
if (!ctx) {
res = -ENOMEM;
goto out;
@ -255,7 +256,8 @@ static int ext4_page_crypto(struct inode *inode,
ext4_direction_t rw,
pgoff_t index,
struct page *src_page,
struct page *dest_page)
struct page *dest_page,
gfp_t gfp_flags)
{
u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
@ -266,7 +268,7 @@ static int ext4_page_crypto(struct inode *inode,
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;
req = skcipher_request_alloc(tfm, GFP_NOFS);
req = skcipher_request_alloc(tfm, gfp_flags);
if (!req) {
printk_ratelimited(KERN_ERR
"%s: crypto_request_alloc() failed\n",
@ -307,9 +309,10 @@ static int ext4_page_crypto(struct inode *inode,
return 0;
}
static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx,
gfp_t gfp_flags)
{
ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT);
ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags);
if (ctx->w.bounce_page == NULL)
return ERR_PTR(-ENOMEM);
ctx->flags |= EXT4_WRITE_PATH_FL;
@ -332,7 +335,8 @@ static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
* error value or NULL.
*/
struct page *ext4_encrypt(struct inode *inode,
struct page *plaintext_page)
struct page *plaintext_page,
gfp_t gfp_flags)
{
struct ext4_crypto_ctx *ctx;
struct page *ciphertext_page = NULL;
@ -340,17 +344,17 @@ struct page *ext4_encrypt(struct inode *inode,
BUG_ON(!PageLocked(plaintext_page));
ctx = ext4_get_crypto_ctx(inode);
ctx = ext4_get_crypto_ctx(inode, gfp_flags);
if (IS_ERR(ctx))
return (struct page *) ctx;
/* The encryption operation will require a bounce page. */
ciphertext_page = alloc_bounce_page(ctx);
ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
if (IS_ERR(ciphertext_page))
goto errout;
ctx->w.control_page = plaintext_page;
err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
plaintext_page, ciphertext_page);
plaintext_page, ciphertext_page, gfp_flags);
if (err) {
ciphertext_page = ERR_PTR(err);
errout:
@ -378,8 +382,8 @@ int ext4_decrypt(struct page *page)
{
BUG_ON(!PageLocked(page));
return ext4_page_crypto(page->mapping->host,
EXT4_DECRYPT, page->index, page, page);
return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT,
page->index, page, page, GFP_NOFS);
}
int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
@ -398,11 +402,11 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
ctx = ext4_get_crypto_ctx(inode);
ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ciphertext_page = alloc_bounce_page(ctx);
ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
if (IS_ERR(ciphertext_page)) {
err = PTR_ERR(ciphertext_page);
goto errout;
@ -410,11 +414,12 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
while (len--) {
err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page);
ZERO_PAGE(0), ciphertext_page,
GFP_NOFS);
if (err)
goto errout;
bio = bio_alloc(GFP_KERNEL, 1);
bio = bio_alloc(GFP_NOWAIT, 1);
if (!bio) {
err = -ENOMEM;
goto errout;
@ -473,13 +478,16 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
*/
static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct inode *dir = d_inode(dentry->d_parent);
struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
struct dentry *dir;
struct ext4_crypt_info *ci;
int dir_has_key, cached_with_key;
if (!ext4_encrypted_inode(dir))
dir = dget_parent(dentry);
if (!ext4_encrypted_inode(d_inode(dir))) {
dput(dir);
return 0;
}
ci = EXT4_I(d_inode(dir))->i_crypt_info;
if (ci && ci->ci_keyring_key &&
(ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED) |
@ -489,6 +497,7 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
/* this should eventually be an flag in d_flags */
cached_with_key = dentry->d_fsdata != NULL;
dir_has_key = (ci != NULL);
dput(dir);
/*
* If the dentry was cached without the key, and it is a

View File

@ -150,6 +150,11 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
while (ctx->pos < inode->i_size) {
struct ext4_map_blocks map;
if (fatal_signal_pending(current)) {
err = -ERESTARTSYS;
goto errout;
}
cond_resched();
map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
map.m_len = 1;
err = ext4_map_blocks(NULL, inode, &map, 0);

View File

@ -911,6 +911,29 @@ do { \
#include "extents_status.h"
/*
* Lock subclasses for i_data_sem in the ext4_inode_info structure.
*
* These are needed to avoid lockdep false positives when we need to
* allocate blocks to the quota inode during ext4_map_blocks(), while
* holding i_data_sem for a normal (non-quota) inode. Since we don't
* do quota tracking for the quota inode, this avoids deadlock (as
* well as infinite recursion, since it isn't turtles all the way
* down...)
*
* I_DATA_SEM_NORMAL - Used for most inodes
* I_DATA_SEM_OTHER - Used by move_inode.c for the second normal inode
* where the second inode has larger inode number
* than the first
* I_DATA_SEM_QUOTA - Used for quota inodes only
*/
enum {
I_DATA_SEM_NORMAL = 0,
I_DATA_SEM_OTHER,
I_DATA_SEM_QUOTA,
};
/*
* fourth extended file system inode data in memory
*/
@ -2282,11 +2305,13 @@ extern struct kmem_cache *ext4_crypt_info_cachep;
bool ext4_valid_contents_enc_mode(uint32_t mode);
uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
extern struct workqueue_struct *ext4_read_workqueue;
struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode);
struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
gfp_t gfp_flags);
void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
void ext4_restore_control_page(struct page *data_page);
struct page *ext4_encrypt(struct inode *inode,
struct page *plaintext_page);
struct page *plaintext_page,
gfp_t gfp_flags);
int ext4_decrypt(struct page *page);
int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
ext4_fsblk_t pblk, ext4_lblk_t len);

View File

@ -329,7 +329,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
struct super_block *sb = inode->i_sb;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct vfsmount *mnt = filp->f_path.mnt;
struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
struct dentry *dir;
struct path path;
char buf[64], *cp;
int ret;
@ -373,14 +373,18 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
if (ext4_encryption_info(inode) == NULL)
return -ENOKEY;
}
if (ext4_encrypted_inode(dir) &&
!ext4_is_child_context_consistent_with_parent(dir, inode)) {
dir = dget_parent(file_dentry(filp));
if (ext4_encrypted_inode(d_inode(dir)) &&
!ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
ext4_warning(inode->i_sb,
"Inconsistent encryption contexts: %lu/%lu\n",
(unsigned long) dir->i_ino,
(unsigned long) d_inode(dir)->i_ino,
(unsigned long) inode->i_ino);
dput(dir);
return -EPERM;
}
dput(dir);
/*
* Set up the jbd2_inode if we are opening the inode for
* writing and the journal is present

View File

@ -763,39 +763,47 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096
static handle_t *start_dio_trans(struct inode *inode,
struct buffer_head *bh_result)
/*
* Get blocks function for the cases that need to start a transaction -
* generally difference cases of direct IO and DAX IO. It also handles retries
* in case of ENOSPC.
*/
static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int flags)
{
int dio_credits;
handle_t *handle;
int retries = 0;
int ret;
/* Trim mapping request to maximum we can map at once for DIO */
if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
dio_credits = ext4_chunk_trans_blocks(inode,
bh_result->b_size >> inode->i_blkbits);
return ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
retry:
handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = _ext4_get_block(inode, iblock, bh_result, flags);
ext4_journal_stop(handle);
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
return ret;
}
/* Get block function for DIO reads and writes to inodes without extents */
int ext4_dio_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
handle_t *handle;
int ret;
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
if (create) {
handle = start_dio_trans(inode, bh);
if (IS_ERR(handle))
return PTR_ERR(handle);
}
ret = _ext4_get_block(inode, iblock, bh,
create ? EXT4_GET_BLOCKS_CREATE : 0);
if (create)
ext4_journal_stop(handle);
return ret;
if (!create)
return _ext4_get_block(inode, iblock, bh, 0);
return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
}
/*
@ -806,18 +814,13 @@ int ext4_dio_get_block(struct inode *inode, sector_t iblock,
static int ext4_dio_get_block_unwritten_async(struct inode *inode,
sector_t iblock, struct buffer_head *bh_result, int create)
{
handle_t *handle;
int ret;
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
handle = start_dio_trans(inode, bh_result);
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = _ext4_get_block(inode, iblock, bh_result,
EXT4_GET_BLOCKS_IO_CREATE_EXT);
ext4_journal_stop(handle);
ret = ext4_get_block_trans(inode, iblock, bh_result,
EXT4_GET_BLOCKS_IO_CREATE_EXT);
/*
* When doing DIO using unwritten extents, we need io_end to convert
@ -850,18 +853,13 @@ static int ext4_dio_get_block_unwritten_async(struct inode *inode,
static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
sector_t iblock, struct buffer_head *bh_result, int create)
{
handle_t *handle;
int ret;
/* We don't expect handle for direct IO */
WARN_ON_ONCE(ext4_journal_current_handle());
handle = start_dio_trans(inode, bh_result);
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = _ext4_get_block(inode, iblock, bh_result,
EXT4_GET_BLOCKS_IO_CREATE_EXT);
ext4_journal_stop(handle);
ret = ext4_get_block_trans(inode, iblock, bh_result,
EXT4_GET_BLOCKS_IO_CREATE_EXT);
/*
* Mark inode as having pending DIO writes to unwritten extents.

View File

@ -60,10 +60,10 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
{
if (first < second) {
down_write(&EXT4_I(first)->i_data_sem);
down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
} else {
down_write(&EXT4_I(second)->i_data_sem);
down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
}
}
@ -484,6 +484,13 @@ mext_check_arguments(struct inode *orig_inode,
return -EBUSY;
}
if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
ext4_debug("ext4 move extent: The argument files should "
"not be quota files [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EBUSY;
}
/* Ext4 move extent supports only extent based file */
if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
ext4_debug("ext4 move extent: orig file is not extents "

View File

@ -1107,6 +1107,11 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
}
while (1) {
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto errout;
}
cond_resched();
block = dx_get_block(frame->at);
ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
start_hash, start_minor_hash);

View File

@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/backing-dev.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@ -470,9 +471,20 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
nr_to_submit) {
data_page = ext4_encrypt(inode, page);
gfp_t gfp_flags = GFP_NOFS;
retry_encrypt:
data_page = ext4_encrypt(inode, page, gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
if (io->io_bio) {
ext4_io_submit(io);
congestion_wait(BLK_RW_ASYNC, HZ/50);
}
gfp_flags |= __GFP_NOFAIL;
goto retry_encrypt;
}
data_page = NULL;
goto out;
}

View File

@ -279,7 +279,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
if (ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
ctx = ext4_get_crypto_ctx(inode);
ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
goto set_error_page;
}

View File

@ -1113,6 +1113,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
unsigned int flags);
static int ext4_enable_quotas(struct super_block *sb);
static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
static struct dquot **ext4_get_dquots(struct inode *inode)
{
@ -1129,7 +1130,7 @@ static const struct dquot_operations ext4_quota_operations = {
.alloc_dquot = dquot_alloc,
.destroy_dquot = dquot_destroy,
.get_projid = ext4_get_projid,
.get_next_id = dquot_get_next_id,
.get_next_id = ext4_get_next_id,
};
static const struct quotactl_ops ext4_qctl_operations = {
@ -1323,9 +1324,9 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
return -1;
}
if (ext4_has_feature_quota(sb)) {
ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
"when QUOTA feature is enabled");
return -1;
ext4_msg(sb, KERN_INFO, "Journaled quota options "
"ignored when QUOTA feature is enabled");
return 1;
}
qname = match_strdup(args);
if (!qname) {
@ -1688,10 +1689,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
return -1;
}
if (ext4_has_feature_quota(sb)) {
ext4_msg(sb, KERN_ERR,
"Cannot set journaled quota options "
ext4_msg(sb, KERN_INFO,
"Quota format mount options ignored "
"when QUOTA feature is enabled");
return -1;
return 1;
}
sbi->s_jquota_fmt = m->mount_opt;
#endif
@ -1756,11 +1757,11 @@ static int parse_options(char *options, struct super_block *sb,
#ifdef CONFIG_QUOTA
if (ext4_has_feature_quota(sb) &&
(test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
"feature is enabled");
return 0;
}
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
"mount options ignored.");
clear_opt(sb, USRQUOTA);
clear_opt(sb, GRPQUOTA);
} else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sb, USRQUOTA);
@ -5028,6 +5029,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
EXT4_SB(sb)->s_jquota_fmt, type);
}
static void lockdep_set_quota_inode(struct inode *inode, int subclass)
{
struct ext4_inode_info *ei = EXT4_I(inode);
/* The first argument of lockdep_set_subclass has to be
* *exactly* the same as the argument to init_rwsem() --- in
* this case, in init_once() --- or lockdep gets unhappy
* because the name of the lock is set using the
* stringification of the argument to init_rwsem().
*/
(void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
lockdep_set_subclass(&ei->i_data_sem, subclass);
}
/*
* Standard function to be called on quota_on
*/
@ -5067,8 +5082,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
if (err)
return err;
}
return dquot_quota_on(sb, type, format_id, path);
lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
err = dquot_quota_on(sb, type, format_id, path);
if (err)
lockdep_set_quota_inode(path->dentry->d_inode,
I_DATA_SEM_NORMAL);
return err;
}
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
@ -5095,8 +5114,11 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
/* Don't account quota for quota files to avoid recursion */
qf_inode->i_flags |= S_NOQUOTA;
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
err = dquot_enable(qf_inode, type, format_id, flags);
iput(qf_inode);
if (err)
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
return err;
}
@ -5253,6 +5275,17 @@ out:
return len;
}
static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
{
const struct quota_format_ops *ops;
if (!sb_has_quota_loaded(sb, qid->type))
return -ESRCH;
ops = sb_dqopt(sb)->ops[qid->type];
if (!ops || !ops->get_next_id)
return -ENOSYS;
return dquot_get_next_id(sb, qid);
}
#endif
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,

View File

@ -230,6 +230,27 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
return error;
}
static int
__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
void *end, const char *function, unsigned int line)
{
struct ext4_xattr_entry *entry = IFIRST(header);
int error = -EFSCORRUPTED;
if (((void *) header >= end) ||
(header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC)))
goto errout;
error = ext4_xattr_check_names(entry, end, entry);
errout:
if (error)
__ext4_error_inode(inode, function, line, 0,
"corrupted in-inode xattr");
return error;
}
#define xattr_check_inode(inode, header, end) \
__xattr_check_inode((inode), (header), (end), __func__, __LINE__)
static inline int
ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
{
@ -341,7 +362,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
header = IHDR(inode, raw_inode);
entry = IFIRST(header);
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
error = ext4_xattr_check_names(entry, end, entry);
error = xattr_check_inode(inode, header, end);
if (error)
goto cleanup;
error = ext4_xattr_find_entry(&entry, name_index, name,
@ -477,7 +498,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
error = xattr_check_inode(inode, header, end);
if (error)
goto cleanup;
error = ext4_xattr_list_entries(dentry, IFIRST(header),
@ -1040,8 +1061,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
is->s.here = is->s.first;
is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
error = ext4_xattr_check_names(IFIRST(header), is->s.end,
IFIRST(header));
error = xattr_check_inode(inode, header, is->s.end);
if (error)
return error;
/* Find the named attribute. */
@ -1356,6 +1376,10 @@ retry:
last = entry;
total_ino = sizeof(struct ext4_xattr_ibody_header);
error = xattr_check_inode(inode, header, end);
if (error)
goto cleanup;
free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
if (free >= new_extra_isize) {
entry = IFIRST(header);

View File

@ -377,7 +377,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
again:
timestamp = jiffies;
gencount = nfs_inc_attr_generation_counter();
error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages,
error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
NFS_SERVER(inode)->dtsize, desc->plus);
if (error < 0) {
/* We requested READDIRPLUS, but the server doesn't grok it */
@ -560,7 +560,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
count++;
if (desc->plus != 0)
nfs_prime_dcache(desc->file->f_path.dentry, entry);
nfs_prime_dcache(file_dentry(desc->file), entry);
status = nfs_readdir_add_to_array(entry, page);
if (status != 0)
@ -864,7 +864,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
*/
static int nfs_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
nfs_readdir_descriptor_t my_desc,
*desc = &my_desc;

View File

@ -940,7 +940,7 @@ int nfs_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
nfs_file_set_open_context(filp, ctx);

View File

@ -26,7 +26,7 @@ static int
nfs4_file_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
struct dentry *dentry = filp->f_path.dentry;
struct dentry *dentry = file_dentry(filp);
struct dentry *parent = NULL;
struct inode *dir;
unsigned openflags = filp->f_flags;
@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
parent = dget_parent(dentry);
dir = d_inode(parent);
ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
goto out;

View File

@ -295,6 +295,37 @@ static void ovl_dentry_release(struct dentry *dentry)
}
}
static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
{
struct dentry *real;
if (d_is_dir(dentry)) {
if (!inode || inode == d_inode(dentry))
return dentry;
goto bug;
}
real = ovl_dentry_upper(dentry);
if (real && (!inode || inode == d_inode(real)))
return real;
real = ovl_dentry_lower(dentry);
if (!real)
goto bug;
if (!inode || inode == d_inode(real))
return real;
/* Handle recursion */
if (real->d_flags & DCACHE_OP_REAL)
return real->d_op->d_real(real, inode);
bug:
WARN(1, "ovl_d_real(%pd4, %s:%lu\n): real dentry not found\n", dentry,
inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
return dentry;
}
static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
{
struct ovl_entry *oe = dentry->d_fsdata;
@ -339,11 +370,13 @@ static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags)
static const struct dentry_operations ovl_dentry_operations = {
.d_release = ovl_dentry_release,
.d_select_inode = ovl_d_select_inode,
.d_real = ovl_d_real,
};
static const struct dentry_operations ovl_reval_dentry_operations = {
.d_release = ovl_dentry_release,
.d_select_inode = ovl_d_select_inode,
.d_real = ovl_d_real,
.d_revalidate = ovl_dentry_revalidate,
.d_weak_revalidate = ovl_dentry_weak_revalidate,
};

View File

@ -161,6 +161,7 @@ struct dentry_operations {
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(struct dentry *, bool);
struct inode *(*d_select_inode)(struct dentry *, unsigned);
struct dentry *(*d_real)(struct dentry *, struct inode *);
} ____cacheline_aligned;
/*
@ -229,6 +230,7 @@ struct dentry_operations {
#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */
#define DCACHE_OP_REAL 0x08000000
extern seqlock_t rename_lock;
@ -555,4 +557,12 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
return upper;
}
static inline struct dentry *d_real(struct dentry *dentry)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
return dentry->d_op->d_real(dentry, NULL);
else
return dentry;
}
#endif /* __LINUX_DCACHE_H */

View File

@ -1241,6 +1241,16 @@ static inline struct inode *file_inode(const struct file *f)
return f->f_inode;
}
static inline struct dentry *file_dentry(const struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
return dentry->d_op->d_real(dentry, file_inode(file));
else
return dentry;
}
static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
{
return locks_lock_inode_wait(file_inode(filp), fl);