1
0
Fork 0

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull vfs updates from Al Viro:

 - Preparations of parallel lookups (the remaining main obstacle is the
   need to move security_d_instantiate(); once that becomes safe, the
   rest will be a matter of rather short series local to fs/*.c

 - preadv2/pwritev2 series from Christoph

 - assorted fixes

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (32 commits)
  splice: handle zero nr_pages in splice_to_pipe()
  vfs: show_vfsstat: do not ignore errors from show_devname method
  dcache.c: new helper: __d_add()
  don't bother with __d_instantiate(dentry, NULL)
  untangle fsnotify_d_instantiate() a bit
  uninline d_add()
  replace d_add_unique() with saner primitive
  quota: use lookup_one_len_unlocked()
  cifs_get_root(): use lookup_one_len_unlocked()
  nfs_lookup: don't bother with d_instantiate(dentry, NULL)
  kill dentry_unhash()
  ceph_fill_trace(): don't bother with d_instantiate(dn, NULL)
  autofs4: don't bother with d_instantiate(dentry, NULL) in ->lookup()
  configfs: move d_rehash() into configfs_create() for regular files
  ceph: don't bother with d_rehash() in splice_dentry()
  namei: teach lookup_slow() to skip revalidate
  namei: massage lookup_slow() to be usable by lookup_one_len_unlocked()
  lookup_one_len_unlocked(): use lookup_dcache()
  namei: simplify invalidation logics in lookup_dcache()
  namei: change calling conventions for lookup_{fast,slow} and follow_managed()
  ...
steinar/wifi_calib_4_9_kernel
Linus Torvalds 2016-03-19 18:52:29 -07:00
commit 3c2de27d79
33 changed files with 488 additions and 465 deletions

View File

@ -384,3 +384,5 @@
375 i386 membarrier sys_membarrier
376 i386 mlock2 sys_mlock2
377 i386 copy_file_range sys_copy_file_range
378 i386 preadv2 sys_preadv2
379 i386 pwritev2 sys_pwritev2

View File

@ -333,6 +333,8 @@
324 common membarrier sys_membarrier
325 common mlock2 sys_mlock2
326 common copy_file_range sys_copy_file_range
327 64 preadv2 sys_preadv2
328 64 pwritev2 sys_pwritev2
#
# x32-specific system call numbers start at 512 to avoid cache impact

View File

@ -542,8 +542,6 @@ static struct dentry *autofs4_lookup(struct inode *dir,
ino->dentry = dentry;
autofs4_add_active(dentry);
d_instantiate(dentry, NULL);
}
return NULL;
}

View File

@ -162,6 +162,8 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
size_t buflen, loff_t *pos)
{
struct cachefiles_cache *cache = file->private_data;
unsigned long long b_released;
unsigned f_released;
char buffer[256];
int n;
@ -174,6 +176,8 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
cachefiles_has_space(cache, 0, 0);
/* summarise */
f_released = atomic_xchg(&cache->f_released, 0);
b_released = atomic_long_xchg(&cache->b_released, 0);
clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
n = snprintf(buffer, sizeof(buffer),
@ -183,15 +187,18 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
" fstop=%llx"
" brun=%llx"
" bcull=%llx"
" bstop=%llx",
" bstop=%llx"
" freleased=%x"
" breleased=%llx",
test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0',
(unsigned long long) cache->frun,
(unsigned long long) cache->fcull,
(unsigned long long) cache->fstop,
(unsigned long long) cache->brun,
(unsigned long long) cache->bcull,
(unsigned long long) cache->bstop
);
(unsigned long long) cache->bstop,
f_released,
b_released);
if (n > buflen)
return -EMSGSIZE;

View File

@ -291,15 +291,8 @@ static void cachefiles_drop_object(struct fscache_object *_object)
}
/* note that the object is now inactive */
if (test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
write_lock(&cache->active_lock);
if (!test_and_clear_bit(CACHEFILES_OBJECT_ACTIVE,
&object->flags))
BUG();
rb_erase(&object->active_node, &cache->active_nodes);
wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
write_unlock(&cache->active_lock);
}
if (test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags))
cachefiles_mark_object_inactive(cache, object);
dput(object->dentry);
object->dentry = NULL;

View File

@ -66,6 +66,8 @@ struct cachefiles_cache {
struct rb_root active_nodes; /* active nodes (can't be culled) */
rwlock_t active_lock; /* lock for active_nodes */
atomic_t gravecounter; /* graveyard uniquifier */
atomic_t f_released; /* number of objects released lately */
atomic_long_t b_released; /* number of blocks released lately */
unsigned frun_percent; /* when to stop culling (% files) */
unsigned fcull_percent; /* when to start culling (% files) */
unsigned fstop_percent; /* when to stop allocating (% files) */
@ -157,6 +159,8 @@ extern char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type);
/*
* namei.c
*/
extern void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
struct cachefiles_object *object);
extern int cachefiles_delete_object(struct cachefiles_cache *cache,
struct cachefiles_object *object);
extern int cachefiles_walk_to_object(struct cachefiles_object *parent,

View File

@ -257,6 +257,28 @@ requeue:
return -ETIMEDOUT;
}
/*
* Mark an object as being inactive.
*/
void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
struct cachefiles_object *object)
{
write_lock(&cache->active_lock);
rb_erase(&object->active_node, &cache->active_nodes);
clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
write_unlock(&cache->active_lock);
wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
/* This object can now be culled, so we need to let the daemon know
* that there is something it can remove if it needs to.
*/
atomic_long_add(d_backing_inode(object->dentry)->i_blocks,
&cache->b_released);
if (atomic_inc_return(&cache->f_released))
cachefiles_state_changed(cache);
}
/*
* delete an object representation from the cache
* - file backed objects are unlinked
@ -684,11 +706,7 @@ mark_active_timed_out:
check_error:
_debug("check error %d", ret);
write_lock(&cache->active_lock);
rb_erase(&object->active_node, &cache->active_nodes);
clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
write_unlock(&cache->active_lock);
cachefiles_mark_object_inactive(cache, object);
release_dentry:
dput(object->dentry);
object->dentry = NULL;

View File

@ -977,13 +977,8 @@ out_unlock:
/*
* splice a dentry to an inode.
* caller must hold directory i_mutex for this to be safe.
*
* we will only rehash the resulting dentry if @prehash is
* true; @prehash will be set to false (for the benefit of
* the caller) if we fail.
*/
static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
bool *prehash)
static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
{
struct dentry *realdn;
@ -996,8 +991,6 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
if (IS_ERR(realdn)) {
pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
PTR_ERR(realdn), dn, in, ceph_vinop(in));
if (prehash)
*prehash = false; /* don't rehash on error */
dn = realdn; /* note realdn contains the error */
goto out;
} else if (realdn) {
@ -1013,8 +1006,6 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
dout("dn %p attached to %p ino %llx.%llx\n",
dn, d_inode(dn), ceph_vinop(d_inode(dn)));
}
if ((!prehash || *prehash) && d_unhashed(dn))
d_rehash(dn);
out:
return dn;
}
@ -1247,10 +1238,8 @@ retry_lookup:
dout("d_delete %p\n", dn);
d_delete(dn);
} else {
dout("d_instantiate %p NULL\n", dn);
d_instantiate(dn, NULL);
if (have_lease && d_unhashed(dn))
d_rehash(dn);
d_add(dn, NULL);
update_dentry_lease(dn, rinfo->dlease,
session,
req->r_request_started);
@ -1262,7 +1251,7 @@ retry_lookup:
if (d_really_is_negative(dn)) {
ceph_dir_clear_ordered(dir);
ihold(in);
dn = splice_dentry(dn, in, &have_lease);
dn = splice_dentry(dn, in);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
goto done;
@ -1292,7 +1281,7 @@ retry_lookup:
dout(" linking snapped dir %p to dn %p\n", in, dn);
ceph_dir_clear_ordered(dir);
ihold(in);
dn = splice_dentry(dn, in, NULL);
dn = splice_dentry(dn, in);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
goto done;
@ -1503,7 +1492,7 @@ retry_lookup:
}
if (d_really_is_negative(dn)) {
struct dentry *realdn = splice_dentry(dn, in, NULL);
struct dentry *realdn = splice_dentry(dn, in);
if (IS_ERR(realdn)) {
err = PTR_ERR(realdn);
d_drop(dn);

View File

@ -642,9 +642,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
while (*s && *s != sep)
s++;
inode_lock(dir);
child = lookup_one_len(p, dentry, s - p);
inode_unlock(dir);
child = lookup_one_len_unlocked(p, dentry, s - p);
dput(dentry);
dentry = child;
} while (!IS_ERR(dentry));

View File

@ -432,14 +432,9 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den
(sd->s_type & CONFIGFS_ITEM_BIN_ATTR) ?
configfs_init_bin_file :
configfs_init_file);
if (error) {
if (error)
configfs_put(sd);
return error;
}
d_rehash(dentry);
return 0;
return error;
}
static struct dentry * configfs_lookup(struct inode *dir,

View File

@ -201,9 +201,17 @@ int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct in
configfs_set_inode_lock_class(sd, inode);
init(inode);
d_instantiate(dentry, inode);
if (S_ISDIR(mode) || S_ISLNK(mode))
if (S_ISDIR(mode) || S_ISLNK(mode)) {
/*
* ->symlink(), ->mkdir(), configfs_register_subsystem() or
* create_default_group() - already hashed.
*/
d_instantiate(dentry, inode);
dget(dentry); /* pin link and directory dentries in core */
} else {
/* ->lookup() */
d_add(dentry, inode);
}
return error;
}

View File

@ -1745,13 +1745,12 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
unsigned add_flags = d_flags_for_inode(inode);
spin_lock(&dentry->d_lock);
if (inode)
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
raw_write_seqcount_begin(&dentry->d_seq);
__d_set_inode_and_type(dentry, inode, add_flags);
raw_write_seqcount_end(&dentry->d_seq);
__fsnotify_d_instantiate(dentry);
spin_unlock(&dentry->d_lock);
fsnotify_d_instantiate(dentry, inode);
}
/**
@ -1772,90 +1771,15 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
void d_instantiate(struct dentry *entry, struct inode * inode)
{
BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
if (inode)
if (inode) {
spin_lock(&inode->i_lock);
__d_instantiate(entry, inode);
if (inode)
__d_instantiate(entry, inode);
spin_unlock(&inode->i_lock);
}
security_d_instantiate(entry, inode);
}
EXPORT_SYMBOL(d_instantiate);
/**
* d_instantiate_unique - instantiate a non-aliased dentry
* @entry: dentry to instantiate
* @inode: inode to attach to this dentry
*
* Fill in inode information in the entry. On success, it returns NULL.
* If an unhashed alias of "entry" already exists, then we return the
* aliased dentry instead and drop one reference to inode.
*
* Note that in order to avoid conflicts with rename() etc, the caller
* had better be holding the parent directory semaphore.
*
* This also assumes that the inode count has been incremented
* (or otherwise set) by the caller to indicate that it is now
* in use by the dcache.
*/
static struct dentry *__d_instantiate_unique(struct dentry *entry,
struct inode *inode)
{
struct dentry *alias;
int len = entry->d_name.len;
const char *name = entry->d_name.name;
unsigned int hash = entry->d_name.hash;
if (!inode) {
__d_instantiate(entry, NULL);
return NULL;
}
hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
/*
* Don't need alias->d_lock here, because aliases with
* d_parent == entry->d_parent are not subject to name or
* parent changes, because the parent inode i_mutex is held.
*/
if (alias->d_name.hash != hash)
continue;
if (alias->d_parent != entry->d_parent)
continue;
if (alias->d_name.len != len)
continue;
if (dentry_cmp(alias, name, len))
continue;
__dget(alias);
return alias;
}
__d_instantiate(entry, inode);
return NULL;
}
struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
{
struct dentry *result;
BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
if (inode)
spin_lock(&inode->i_lock);
result = __d_instantiate_unique(entry, inode);
if (inode)
spin_unlock(&inode->i_lock);
if (!result) {
security_d_instantiate(entry, inode);
return NULL;
}
BUG_ON(!d_unhashed(result));
iput(inode);
return result;
}
EXPORT_SYMBOL(d_instantiate_unique);
/**
* d_instantiate_no_diralias - instantiate a non-aliased dentry
* @entry: dentry to complete
@ -2436,6 +2360,86 @@ void d_rehash(struct dentry * entry)
}
EXPORT_SYMBOL(d_rehash);
/* inode->i_lock held if inode is non-NULL */
static inline void __d_add(struct dentry *dentry, struct inode *inode)
{
if (inode) {
__d_instantiate(dentry, inode);
spin_unlock(&inode->i_lock);
}
security_d_instantiate(dentry, inode);
d_rehash(dentry);
}
/**
* d_add - add dentry to hash queues
* @entry: dentry to add
* @inode: The inode to attach to this dentry
*
* This adds the entry to the hash queues and initializes @inode.
* The entry was actually filled in earlier during d_alloc().
*/
void d_add(struct dentry *entry, struct inode *inode)
{
if (inode)
spin_lock(&inode->i_lock);
__d_add(entry, inode);
}
EXPORT_SYMBOL(d_add);
/**
* d_exact_alias - find and hash an exact unhashed alias
* @entry: dentry to add
* @inode: The inode to go with this dentry
*
* If an unhashed dentry with the same name/parent and desired
* inode already exists, hash and return it. Otherwise, return
* NULL.
*
* Parent directory should be locked.
*/
struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
{
struct dentry *alias;
int len = entry->d_name.len;
const char *name = entry->d_name.name;
unsigned int hash = entry->d_name.hash;
spin_lock(&inode->i_lock);
hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
/*
* Don't need alias->d_lock here, because aliases with
* d_parent == entry->d_parent are not subject to name or
* parent changes, because the parent inode i_mutex is held.
*/
if (alias->d_name.hash != hash)
continue;
if (alias->d_parent != entry->d_parent)
continue;
if (alias->d_name.len != len)
continue;
if (dentry_cmp(alias, name, len))
continue;
spin_lock(&alias->d_lock);
if (!d_unhashed(alias)) {
spin_unlock(&alias->d_lock);
alias = NULL;
} else {
__dget_dlock(alias);
_d_rehash(alias);
spin_unlock(&alias->d_lock);
}
spin_unlock(&inode->i_lock);
return alias;
}
spin_unlock(&inode->i_lock);
return NULL;
}
EXPORT_SYMBOL(d_exact_alias);
/**
* dentry_update_name_case - update case insensitive dentry with a new name
* @dentry: dentry to be updated
@ -2772,10 +2776,9 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
BUG_ON(!d_unhashed(dentry));
if (!inode) {
__d_instantiate(dentry, NULL);
if (!inode)
goto out;
}
spin_lock(&inode->i_lock);
if (S_ISDIR(inode->i_mode)) {
struct dentry *new = __d_find_any_alias(inode);
@ -2809,12 +2812,8 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
return new;
}
}
/* already taking inode->i_lock, so d_add() by hand */
__d_instantiate(dentry, inode);
spin_unlock(&inode->i_lock);
out:
security_d_instantiate(dentry, inode);
d_rehash(dentry);
__d_add(dentry, inode);
return NULL;
}
EXPORT_SYMBOL(d_splice_alias);

View File

@ -445,7 +445,8 @@ static struct bio *dio_await_one(struct dio *dio)
__set_current_state(TASK_UNINTERRUPTIBLE);
dio->waiter = current;
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (!blk_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie))
if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
!blk_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie))
io_schedule();
/* wake up sets us TASK_RUNNING */
spin_lock_irqsave(&dio->bio_lock, flags);

View File

@ -1492,16 +1492,14 @@ out:
*/
static int
ecryptfs_encrypt_filename(struct ecryptfs_filename *filename,
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
int rc = 0;
filename->encrypted_filename = NULL;
filename->encrypted_filename_size = 0;
if ((crypt_stat && (crypt_stat->flags & ECRYPTFS_ENCFN_USE_MOUNT_FNEK))
|| (mount_crypt_stat && (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK))) {
if (mount_crypt_stat && (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK)) {
size_t packet_size;
size_t remaining_bytes;
@ -1933,7 +1931,6 @@ out:
int ecryptfs_encrypt_and_encode_filename(
char **encoded_name,
size_t *encoded_name_size,
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
const char *name, size_t name_size)
{
@ -1942,9 +1939,8 @@ int ecryptfs_encrypt_and_encode_filename(
(*encoded_name) = NULL;
(*encoded_name_size) = 0;
if ((crypt_stat && (crypt_stat->flags & ECRYPTFS_ENCRYPT_FILENAMES))
|| (mount_crypt_stat && (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES))) {
if (mount_crypt_stat && (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) {
struct ecryptfs_filename *filename;
filename = kzalloc(sizeof(*filename), GFP_KERNEL);
@ -1957,8 +1953,7 @@ int ecryptfs_encrypt_and_encode_filename(
}
filename->filename = (char *)name;
filename->filename_size = name_size;
rc = ecryptfs_encrypt_filename(filename, crypt_stat,
mount_crypt_stat);
rc = ecryptfs_encrypt_filename(filename, mount_crypt_stat);
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt "
"filename; rc = [%d]\n", __func__, rc);
@ -1969,11 +1964,9 @@ int ecryptfs_encrypt_and_encode_filename(
NULL, &encoded_name_no_prefix_size,
filename->encrypted_filename,
filename->encrypted_filename_size);
if ((crypt_stat && (crypt_stat->flags
& ECRYPTFS_ENCFN_USE_MOUNT_FNEK))
|| (mount_crypt_stat
if (mount_crypt_stat
&& (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK)))
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK))
(*encoded_name_size) =
(ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE
+ encoded_name_no_prefix_size);
@ -1991,11 +1984,9 @@ int ecryptfs_encrypt_and_encode_filename(
kfree(filename);
goto out;
}
if ((crypt_stat && (crypt_stat->flags
& ECRYPTFS_ENCFN_USE_MOUNT_FNEK))
|| (mount_crypt_stat
if (mount_crypt_stat
&& (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK))) {
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK)) {
memcpy((*encoded_name),
ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX,
ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE);

View File

@ -569,7 +569,6 @@ int ecryptfs_fill_zeros(struct file *file, loff_t new_length);
int ecryptfs_encrypt_and_encode_filename(
char **encoded_name,
size_t *encoded_name_size,
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
const char *name, size_t name_size);
struct dentry *ecryptfs_lower_dentry(struct dentry *this_dentry);

View File

@ -396,11 +396,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
int rc = 0;
lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
inode_lock(d_inode(lower_dir_dentry));
lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name,
lower_dentry = lookup_one_len_unlocked(ecryptfs_dentry->d_name.name,
lower_dir_dentry,
ecryptfs_dentry->d_name.len);
inode_unlock(d_inode(lower_dir_dentry));
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
@ -418,18 +416,16 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
dput(lower_dentry);
rc = ecryptfs_encrypt_and_encode_filename(
&encrypted_and_encoded_name, &encrypted_and_encoded_name_size,
NULL, mount_crypt_stat, ecryptfs_dentry->d_name.name,
mount_crypt_stat, ecryptfs_dentry->d_name.name,
ecryptfs_dentry->d_name.len);
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt and encode "
"filename; rc = [%d]\n", __func__, rc);
goto out;
}
inode_lock(d_inode(lower_dir_dentry));
lower_dentry = lookup_one_len(encrypted_and_encoded_name,
lower_dentry = lookup_one_len_unlocked(encrypted_and_encoded_name,
lower_dir_dentry,
encrypted_and_encoded_name_size);
inode_unlock(d_inode(lower_dir_dentry));
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
@ -501,7 +497,6 @@ static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
dir->i_sb)->mount_crypt_stat;
rc = ecryptfs_encrypt_and_encode_filename(&encoded_symname,
&encoded_symlen,
NULL,
mount_crypt_stat, symname,
strlen(symname));
if (rc)

View File

@ -1220,8 +1220,8 @@ static int follow_managed(struct path *path, struct nameidata *nd)
if (need_mntput && path->mnt == mnt)
mntput(path->mnt);
if (ret == -EISDIR)
ret = 0;
if (ret == -EISDIR || !ret)
ret = 1;
if (need_mntput)
nd->flags |= LOOKUP_JUMPED;
if (unlikely(ret < 0))
@ -1444,40 +1444,26 @@ static int follow_dotdot(struct nameidata *nd)
* This looks up the name in dcache, possibly revalidates the old dentry and
* allocates a new one if not found or not valid. In the need_lookup argument
* returns whether i_op->lookup is necessary.
*
* dir->d_inode->i_mutex must be held
*/
static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
unsigned int flags, bool *need_lookup)
static struct dentry *lookup_dcache(const struct qstr *name,
struct dentry *dir,
unsigned int flags)
{
struct dentry *dentry;
int error;
*need_lookup = false;
dentry = d_lookup(dir, name);
if (dentry) {
if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
error = d_revalidate(dentry, flags);
if (unlikely(error <= 0)) {
if (error < 0) {
dput(dentry);
return ERR_PTR(error);
} else {
if (!error)
d_invalidate(dentry);
dput(dentry);
dentry = NULL;
}
dput(dentry);
return ERR_PTR(error);
}
}
}
if (!dentry) {
dentry = d_alloc(dir, name);
if (unlikely(!dentry))
return ERR_PTR(-ENOMEM);
*need_lookup = true;
}
return dentry;
}
@ -1506,45 +1492,44 @@ static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
return dentry;
}
static struct dentry *__lookup_hash(struct qstr *name,
static struct dentry *__lookup_hash(const struct qstr *name,
struct dentry *base, unsigned int flags)
{
bool need_lookup;
struct dentry *dentry;
struct dentry *dentry = lookup_dcache(name, base, flags);
dentry = lookup_dcache(name, base, flags, &need_lookup);
if (!need_lookup)
if (dentry)
return dentry;
dentry = d_alloc(base, name);
if (unlikely(!dentry))
return ERR_PTR(-ENOMEM);
return lookup_real(base->d_inode, dentry, flags);
}
/*
* It's more convoluted than I'd like it to be, but... it's still fairly
* small and for now I'd prefer to have fast path as straight as possible.
* It _is_ time-critical.
*/
static int lookup_fast(struct nameidata *nd,
struct path *path, struct inode **inode,
unsigned *seqp)
{
struct vfsmount *mnt = nd->path.mnt;
struct dentry *dentry, *parent = nd->path.dentry;
int need_reval = 1;
int status = 1;
int err;
/*
* Rename seqlock is not required here because in the off chance
* of a false negative due to a concurrent rename, we're going to
* do the non-racy lookup, below.
* of a false negative due to a concurrent rename, the caller is
* going to fall back to non-racy lookup.
*/
if (nd->flags & LOOKUP_RCU) {
unsigned seq;
bool negative;
dentry = __d_lookup_rcu(parent, &nd->last, &seq);
if (!dentry)
goto unlazy;
if (unlikely(!dentry)) {
if (unlazy_walk(nd, NULL, 0))
return -ECHILD;
return 0;
}
/*
* This sequence count validates that the inode matches
@ -1552,7 +1537,7 @@ static int lookup_fast(struct nameidata *nd,
*/
*inode = d_backing_inode(dentry);
negative = d_is_negative(dentry);
if (read_seqcount_retry(&dentry->d_seq, seq))
if (unlikely(read_seqcount_retry(&dentry->d_seq, seq)))
return -ECHILD;
/*
@ -1562,81 +1547,89 @@ static int lookup_fast(struct nameidata *nd,
* The memory barrier in read_seqcount_begin of child is
* enough, we can use __read_seqcount_retry here.
*/
if (__read_seqcount_retry(&parent->d_seq, nd->seq))
if (unlikely(__read_seqcount_retry(&parent->d_seq, nd->seq)))
return -ECHILD;
*seqp = seq;
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE))
status = d_revalidate(dentry, nd->flags);
if (unlikely(status <= 0)) {
if (status != -ECHILD)
need_reval = 0;
goto unlazy;
}
if (unlikely(status <= 0)) {
if (unlazy_walk(nd, dentry, seq))
return -ECHILD;
if (status == -ECHILD)
status = d_revalidate(dentry, nd->flags);
} else {
/*
* Note: do negative dentry check after revalidation in
* case that drops it.
*/
if (unlikely(negative))
return -ENOENT;
path->mnt = mnt;
path->dentry = dentry;
if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
return 1;
if (unlazy_walk(nd, dentry, seq))
return -ECHILD;
}
/*
* Note: do negative dentry check after revalidation in
* case that drops it.
*/
if (negative)
return -ENOENT;
path->mnt = mnt;
path->dentry = dentry;
if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
return 0;
unlazy:
if (unlazy_walk(nd, dentry, seq))
return -ECHILD;
} else {
dentry = __d_lookup(parent, &nd->last);
if (unlikely(!dentry))
return 0;
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE))
status = d_revalidate(dentry, nd->flags);
}
if (unlikely(!dentry))
goto need_lookup;
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
status = d_revalidate(dentry, nd->flags);
if (unlikely(status <= 0)) {
if (status < 0) {
dput(dentry);
return status;
}
d_invalidate(dentry);
if (!status)
d_invalidate(dentry);
dput(dentry);
goto need_lookup;
return status;
}
if (unlikely(d_is_negative(dentry))) {
dput(dentry);
return -ENOENT;
}
path->mnt = mnt;
path->dentry = dentry;
err = follow_managed(path, nd);
if (likely(!err))
if (likely(err > 0))
*inode = d_backing_inode(path->dentry);
return err;
need_lookup:
return 1;
}
/* Fast lookup failed, do it the slow way */
static int lookup_slow(struct nameidata *nd, struct path *path)
static struct dentry *lookup_slow(const struct qstr *name,
struct dentry *dir,
unsigned int flags)
{
struct dentry *dentry, *parent;
parent = nd->path.dentry;
BUG_ON(nd->inode != parent->d_inode);
inode_lock(parent->d_inode);
dentry = __lookup_hash(&nd->last, parent, nd->flags);
inode_unlock(parent->d_inode);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
path->mnt = nd->path.mnt;
path->dentry = dentry;
return follow_managed(path, nd);
struct dentry *dentry;
inode_lock(dir->d_inode);
dentry = d_lookup(dir, name);
if (unlikely(dentry)) {
if ((dentry->d_flags & DCACHE_OP_REVALIDATE) &&
!(flags & LOOKUP_NO_REVAL)) {
int error = d_revalidate(dentry, flags);
if (unlikely(error <= 0)) {
if (!error)
d_invalidate(dentry);
dput(dentry);
dentry = ERR_PTR(error);
}
}
if (dentry) {
inode_unlock(dir->d_inode);
return dentry;
}
}
dentry = d_alloc(dir, name);
if (unlikely(!dentry)) {
inode_unlock(dir->d_inode);
return ERR_PTR(-ENOMEM);
}
dentry = lookup_real(dir->d_inode, dentry, flags);
inode_unlock(dir->d_inode);
return dentry;
}
static inline int may_lookup(struct nameidata *nd)
@ -1740,18 +1733,23 @@ static int walk_component(struct nameidata *nd, int flags)
return err;
}
err = lookup_fast(nd, &path, &inode, &seq);
if (unlikely(err)) {
if (unlikely(err <= 0)) {
if (err < 0)
return err;
err = lookup_slow(nd, &path);
if (err < 0)
path.dentry = lookup_slow(&nd->last, nd->path.dentry,
nd->flags);
if (IS_ERR(path.dentry))
return PTR_ERR(path.dentry);
if (unlikely(d_is_negative(path.dentry))) {
dput(path.dentry);
return -ENOENT;
}
path.mnt = nd->path.mnt;
err = follow_managed(&path, nd);
if (unlikely(err < 0))
return err;
seq = 0; /* we are already out of RCU mode */
err = -ENOENT;
if (d_is_negative(path.dentry))
goto out_path_put;
inode = d_backing_inode(path.dentry);
}
@ -1764,10 +1762,6 @@ static int walk_component(struct nameidata *nd, int flags)
nd->inode = inode;
nd->seq = seq;
return 0;
out_path_put:
path_to_nameidata(&path, nd);
return err;
}
/*
@ -2373,21 +2367,9 @@ struct dentry *lookup_one_len_unlocked(const char *name,
if (err)
return ERR_PTR(err);
/*
* __d_lookup() is used to try to get a quick answer and avoid the
* mutex. A false-negative does no harm.
*/
ret = __d_lookup(base, &this);
if (ret && unlikely(ret->d_flags & DCACHE_OP_REVALIDATE)) {
dput(ret);
ret = NULL;
}
if (ret)
return ret;
inode_lock(base->d_inode);
ret = __lookup_hash(&this, base, 0);
inode_unlock(base->d_inode);
ret = lookup_dcache(&this, base, 0);
if (!ret)
ret = lookup_slow(&this, base, 0);
return ret;
}
EXPORT_SYMBOL(lookup_one_len_unlocked);
@ -2465,31 +2447,21 @@ mountpoint_last(struct nameidata *nd, struct path *path)
if (error)
return error;
dentry = dget(nd->path.dentry);
goto done;
}
inode_lock(dir->d_inode);
dentry = d_lookup(dir, &nd->last);
if (!dentry) {
/*
* No cached dentry. Mounted dentries are pinned in the cache,
* so that means that this dentry is probably a symlink or the
* path doesn't actually point to a mounted dentry.
*/
dentry = d_alloc(dir, &nd->last);
} else {
dentry = d_lookup(dir, &nd->last);
if (!dentry) {
inode_unlock(dir->d_inode);
return -ENOMEM;
}
dentry = lookup_real(dir->d_inode, dentry, nd->flags);
if (IS_ERR(dentry)) {
inode_unlock(dir->d_inode);
return PTR_ERR(dentry);
/*
* No cached dentry. Mounted dentries are pinned in the
* cache, so that means that this dentry is probably
* a symlink or the path doesn't actually point
* to a mounted dentry.
*/
dentry = lookup_slow(&nd->last, dir,
nd->flags | LOOKUP_NO_REVAL);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
}
}
inode_unlock(dir->d_inode);
done:
if (d_is_negative(dentry)) {
dput(dentry);
return -ENOENT;
@ -3018,16 +2990,22 @@ static int lookup_open(struct nameidata *nd, struct path *path,
struct inode *dir_inode = dir->d_inode;
struct dentry *dentry;
int error;
bool need_lookup;
bool need_lookup = false;
*opened &= ~FILE_CREATED;
dentry = lookup_dcache(&nd->last, dir, nd->flags, &need_lookup);
dentry = lookup_dcache(&nd->last, dir, nd->flags);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
/* Cached positive dentry: will open in f_op->open */
if (!need_lookup && dentry->d_inode)
if (!dentry) {
dentry = d_alloc(dir, &nd->last);
if (unlikely(!dentry))
return -ENOMEM;
need_lookup = true;
} else if (dentry->d_inode) {
/* Cached positive dentry: will open in f_op->open */
goto out_no_open;
}
if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) {
return atomic_open(nd, dentry, path, file, op, got_write,
@ -3111,13 +3089,14 @@ static int do_last(struct nameidata *nd,
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
/* we _can_ be in RCU mode here */
error = lookup_fast(nd, &path, &inode, &seq);
if (likely(!error))
if (likely(error > 0))
goto finish_lookup;
if (error < 0)
return error;
BUG_ON(nd->inode != dir->d_inode);
BUG_ON(nd->flags & LOOKUP_RCU);
} else {
/* create side of things */
/*
@ -3171,12 +3150,6 @@ retry_lookup:
goto finish_open_created;
}
/*
* create/update audit record if it already exists.
*/
if (d_is_positive(path.dentry))
audit_inode(nd->name, path.dentry, 0);
/*
* If atomic_open() acquired write access it is dropped now due to
* possible mount and symlink following (this might be optimized away if
@ -3187,6 +3160,16 @@ retry_lookup:
got_write = false;
}
if (unlikely(d_is_negative(path.dentry))) {
path_to_nameidata(&path, nd);
return -ENOENT;
}
/*
* create/update audit record if it already exists.
*/
audit_inode(nd->name, path.dentry, 0);
if (unlikely((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))) {
path_to_nameidata(&path, nd);
return -EEXIST;
@ -3196,12 +3179,7 @@ retry_lookup:
if (unlikely(error < 0))
return error;
BUG_ON(nd->flags & LOOKUP_RCU);
seq = 0; /* out of RCU mode, so the value doesn't matter */
if (unlikely(d_is_negative(path.dentry))) {
path_to_nameidata(&path, nd);
return -ENOENT;
}
inode = d_backing_inode(path.dentry);
finish_lookup:
if (nd->depth)
@ -3707,31 +3685,6 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
return sys_mkdirat(AT_FDCWD, pathname, mode);
}
/*
* The dentry_unhash() helper will try to drop the dentry early: we
* should have a usage count of 1 if we're the only user of this
* dentry, and if that is true (possibly after pruning the dcache),
* then we drop the dentry now.
*
* A low-level filesystem can, if it choses, legally
* do a
*
* if (!d_unhashed(dentry))
* return -EBUSY;
*
* if it cannot handle the case of removing a directory
* that is still in use by something else..
*/
void dentry_unhash(struct dentry *dentry)
{
shrink_dcache_parent(dentry);
spin_lock(&dentry->d_lock);
if (dentry->d_lockref.count == 1)
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
EXPORT_SYMBOL(dentry_unhash);
int vfs_rmdir(struct inode *dir, struct dentry *dentry)
{
int error = may_delete(dir, dentry, 1);

View File

@ -1360,19 +1360,15 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
dfprintk(VFS, "NFS: lookup(%pd2)\n", dentry);
nfs_inc_stats(dir, NFSIOS_VFSLOOKUP);
res = ERR_PTR(-ENAMETOOLONG);
if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
goto out;
if (unlikely(dentry->d_name.len > NFS_SERVER(dir)->namelen))
return ERR_PTR(-ENAMETOOLONG);
/*
* If we're doing an exclusive create, optimize away the lookup
* but don't hash the dentry.
*/
if (nfs_is_exclusive_create(dir, flags)) {
d_instantiate(dentry, NULL);
res = NULL;
goto out;
}
if (nfs_is_exclusive_create(dir, flags))
return NULL;
res = ERR_PTR(-ENOMEM);
fhandle = nfs_alloc_fhandle();

View File

@ -2461,14 +2461,15 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
dentry = opendata->dentry;
if (d_really_is_negative(dentry)) {
/* FIXME: Is this d_drop() ever needed? */
struct dentry *alias;
d_drop(dentry);
dentry = d_add_unique(dentry, igrab(state->inode));
if (dentry == NULL) {
dentry = opendata->dentry;
} else {
alias = d_exact_alias(dentry, state->inode);
if (!alias)
alias = d_splice_alias(igrab(state->inode), dentry);
/* d_splice_alias() can't fail here - it's a non-directory */
if (alias) {
dput(ctx->dentry);
ctx->dentry = dentry;
ctx->dentry = dentry = alias;
}
nfs_set_verifier(dentry,
nfs_save_change_attribute(d_inode(opendata->dir)));

View File

@ -870,7 +870,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
oldfs = get_fs();
set_fs(KERNEL_DS);
host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset, 0);
set_fs(oldfs);
return nfsd_finish_read(file, count, host_err);
}
@ -957,7 +957,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
/* Write the data. */
oldfs = get_fs(); set_fs(KERNEL_DS);
host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos, 0);
set_fs(oldfs);
if (host_err < 0)
goto out_nfserr;

View File

@ -199,6 +199,8 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
if (sb->s_op->show_devname) {
seq_puts(m, "device ");
err = sb->s_op->show_devname(m, mnt_path.dentry);
if (err)
goto out;
} else {
if (r->mnt_devname) {
seq_puts(m, "device ");

View File

@ -2430,9 +2430,7 @@ int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
struct dentry *dentry;
int error;
inode_lock(d_inode(sb->s_root));
dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
inode_unlock(d_inode(sb->s_root));
dentry = lookup_one_len_unlocked(qf_name, sb->s_root, strlen(qf_name));
if (IS_ERR(dentry))
return PTR_ERR(dentry);

View File

@ -693,12 +693,17 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
EXPORT_SYMBOL(iov_shorten);
static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
loff_t *ppos, iter_fn_t fn)
loff_t *ppos, iter_fn_t fn, int flags)
{
struct kiocb kiocb;
ssize_t ret;
if (flags & ~RWF_HIPRI)
return -EOPNOTSUPP;
init_sync_kiocb(&kiocb, filp);
if (flags & RWF_HIPRI)
kiocb.ki_flags |= IOCB_HIPRI;
kiocb.ki_pos = *ppos;
ret = fn(&kiocb, iter);
@ -709,10 +714,13 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
/* Do it by hand, with file-ops */
static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
loff_t *ppos, io_fn_t fn)
loff_t *ppos, io_fn_t fn, int flags)
{
ssize_t ret = 0;
if (flags & ~RWF_HIPRI)
return -EOPNOTSUPP;
while (iov_iter_count(iter)) {
struct iovec iovec = iov_iter_iovec(iter);
ssize_t nr;
@ -813,7 +821,8 @@ out:
static ssize_t do_readv_writev(int type, struct file *file,
const struct iovec __user * uvector,
unsigned long nr_segs, loff_t *pos)
unsigned long nr_segs, loff_t *pos,
int flags)
{
size_t tot_len;
struct iovec iovstack[UIO_FASTIOV];
@ -845,9 +854,9 @@ static ssize_t do_readv_writev(int type, struct file *file,
}
if (iter_fn)
ret = do_iter_readv_writev(file, &iter, pos, iter_fn);
ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags);
else
ret = do_loop_readv_writev(file, &iter, pos, fn);
ret = do_loop_readv_writev(file, &iter, pos, fn, flags);
if (type != READ)
file_end_write(file);
@ -864,40 +873,40 @@ out:
}
ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
unsigned long vlen, loff_t *pos)
unsigned long vlen, loff_t *pos, int flags)
{
if (!(file->f_mode & FMODE_READ))
return -EBADF;
if (!(file->f_mode & FMODE_CAN_READ))
return -EINVAL;
return do_readv_writev(READ, file, vec, vlen, pos);
return do_readv_writev(READ, file, vec, vlen, pos, flags);
}
EXPORT_SYMBOL(vfs_readv);
ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
unsigned long vlen, loff_t *pos)
unsigned long vlen, loff_t *pos, int flags)
{
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
if (!(file->f_mode & FMODE_CAN_WRITE))
return -EINVAL;
return do_readv_writev(WRITE, file, vec, vlen, pos);
return do_readv_writev(WRITE, file, vec, vlen, pos, flags);
}
EXPORT_SYMBOL(vfs_writev);
SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen)
static ssize_t do_readv(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, int flags)
{
struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
if (f.file) {
loff_t pos = file_pos_read(f.file);
ret = vfs_readv(f.file, vec, vlen, &pos);
ret = vfs_readv(f.file, vec, vlen, &pos, flags);
if (ret >= 0)
file_pos_write(f.file, pos);
fdput_pos(f);
@ -909,15 +918,15 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
return ret;
}
SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen)
static ssize_t do_writev(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, int flags)
{
struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
if (f.file) {
loff_t pos = file_pos_read(f.file);
ret = vfs_writev(f.file, vec, vlen, &pos);
ret = vfs_writev(f.file, vec, vlen, &pos, flags);
if (ret >= 0)
file_pos_write(f.file, pos);
fdput_pos(f);
@ -935,10 +944,9 @@ static inline loff_t pos_from_hilo(unsigned long high, unsigned long low)
return (((loff_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low;
}
SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
static ssize_t do_preadv(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, loff_t pos, int flags)
{
loff_t pos = pos_from_hilo(pos_h, pos_l);
struct fd f;
ssize_t ret = -EBADF;
@ -949,7 +957,7 @@ SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
if (f.file) {
ret = -ESPIPE;
if (f.file->f_mode & FMODE_PREAD)
ret = vfs_readv(f.file, vec, vlen, &pos);
ret = vfs_readv(f.file, vec, vlen, &pos, flags);
fdput(f);
}
@ -959,10 +967,9 @@ SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
return ret;
}
SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
static ssize_t do_pwritev(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, loff_t pos, int flags)
{
loff_t pos = pos_from_hilo(pos_h, pos_l);
struct fd f;
ssize_t ret = -EBADF;
@ -973,7 +980,7 @@ SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
if (f.file) {
ret = -ESPIPE;
if (f.file->f_mode & FMODE_PWRITE)
ret = vfs_writev(f.file, vec, vlen, &pos);
ret = vfs_writev(f.file, vec, vlen, &pos, flags);
fdput(f);
}
@ -983,11 +990,64 @@ SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
return ret;
}
SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen)
{
return do_readv(fd, vec, vlen, 0);
}
SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen)
{
return do_writev(fd, vec, vlen, 0);
}
SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
{
loff_t pos = pos_from_hilo(pos_h, pos_l);
return do_preadv(fd, vec, vlen, pos, 0);
}
SYSCALL_DEFINE6(preadv2, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h,
int, flags)
{
loff_t pos = pos_from_hilo(pos_h, pos_l);
if (pos == -1)
return do_readv(fd, vec, vlen, flags);
return do_preadv(fd, vec, vlen, pos, flags);
}
SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
{
loff_t pos = pos_from_hilo(pos_h, pos_l);
return do_pwritev(fd, vec, vlen, pos, 0);
}
SYSCALL_DEFINE6(pwritev2, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h,
int, flags)
{
loff_t pos = pos_from_hilo(pos_h, pos_l);
if (pos == -1)
return do_writev(fd, vec, vlen, flags);
return do_pwritev(fd, vec, vlen, pos, flags);
}
#ifdef CONFIG_COMPAT
static ssize_t compat_do_readv_writev(int type, struct file *file,
const struct compat_iovec __user *uvector,
unsigned long nr_segs, loff_t *pos)
unsigned long nr_segs, loff_t *pos,
int flags)
{
compat_ssize_t tot_len;
struct iovec iovstack[UIO_FASTIOV];
@ -1019,9 +1079,9 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
}
if (iter_fn)
ret = do_iter_readv_writev(file, &iter, pos, iter_fn);
ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags);
else
ret = do_loop_readv_writev(file, &iter, pos, fn);
ret = do_loop_readv_writev(file, &iter, pos, fn, flags);
if (type != READ)
file_end_write(file);
@ -1039,7 +1099,7 @@ out:
static size_t compat_readv(struct file *file,
const struct compat_iovec __user *vec,
unsigned long vlen, loff_t *pos)
unsigned long vlen, loff_t *pos, int flags)
{
ssize_t ret = -EBADF;
@ -1050,7 +1110,7 @@ static size_t compat_readv(struct file *file,
if (!(file->f_mode & FMODE_CAN_READ))
goto out;
ret = compat_do_readv_writev(READ, file, vec, vlen, pos);
ret = compat_do_readv_writev(READ, file, vec, vlen, pos, flags);
out:
if (ret > 0)
@ -1059,9 +1119,9 @@ out:
return ret;
}
COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
const struct compat_iovec __user *,vec,
compat_ulong_t, vlen)
static size_t do_compat_readv(compat_ulong_t fd,
const struct compat_iovec __user *vec,
compat_ulong_t vlen, int flags)
{
struct fd f = fdget_pos(fd);
ssize_t ret;
@ -1070,16 +1130,24 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
if (!f.file)
return -EBADF;
pos = f.file->f_pos;
ret = compat_readv(f.file, vec, vlen, &pos);
ret = compat_readv(f.file, vec, vlen, &pos, flags);
if (ret >= 0)
f.file->f_pos = pos;
fdput_pos(f);
return ret;
}
static long __compat_sys_preadv64(unsigned long fd,
COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
const struct compat_iovec __user *,vec,
compat_ulong_t, vlen)
{
return do_compat_readv(fd, vec, vlen, 0);
}
static long do_compat_preadv64(unsigned long fd,
const struct compat_iovec __user *vec,
unsigned long vlen, loff_t pos)
unsigned long vlen, loff_t pos, int flags)
{
struct fd f;
ssize_t ret;
@ -1091,7 +1159,7 @@ static long __compat_sys_preadv64(unsigned long fd,
return -EBADF;
ret = -ESPIPE;
if (f.file->f_mode & FMODE_PREAD)
ret = compat_readv(f.file, vec, vlen, &pos);
ret = compat_readv(f.file, vec, vlen, &pos, flags);
fdput(f);
return ret;
}
@ -1101,7 +1169,7 @@ COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
const struct compat_iovec __user *,vec,
unsigned long, vlen, loff_t, pos)
{
return __compat_sys_preadv64(fd, vec, vlen, pos);
return do_compat_preadv64(fd, vec, vlen, pos, 0);
}
#endif
@ -1111,12 +1179,25 @@ COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
{
loff_t pos = ((loff_t)pos_high << 32) | pos_low;
return __compat_sys_preadv64(fd, vec, vlen, pos);
return do_compat_preadv64(fd, vec, vlen, pos, 0);
}
COMPAT_SYSCALL_DEFINE6(preadv2, compat_ulong_t, fd,
const struct compat_iovec __user *,vec,
compat_ulong_t, vlen, u32, pos_low, u32, pos_high,
int, flags)
{
loff_t pos = ((loff_t)pos_high << 32) | pos_low;
if (pos == -1)
return do_compat_readv(fd, vec, vlen, flags);
return do_compat_preadv64(fd, vec, vlen, pos, flags);
}
static size_t compat_writev(struct file *file,
const struct compat_iovec __user *vec,
unsigned long vlen, loff_t *pos)
unsigned long vlen, loff_t *pos, int flags)
{
ssize_t ret = -EBADF;
@ -1127,7 +1208,7 @@ static size_t compat_writev(struct file *file,
if (!(file->f_mode & FMODE_CAN_WRITE))
goto out;
ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos);
ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0);
out:
if (ret > 0)
@ -1136,9 +1217,9 @@ out:
return ret;
}
COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
const struct compat_iovec __user *, vec,
compat_ulong_t, vlen)
static size_t do_compat_writev(compat_ulong_t fd,
const struct compat_iovec __user* vec,
compat_ulong_t vlen, int flags)
{
struct fd f = fdget_pos(fd);
ssize_t ret;
@ -1147,16 +1228,23 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
if (!f.file)
return -EBADF;
pos = f.file->f_pos;
ret = compat_writev(f.file, vec, vlen, &pos);
ret = compat_writev(f.file, vec, vlen, &pos, flags);
if (ret >= 0)
f.file->f_pos = pos;
fdput_pos(f);
return ret;
}
static long __compat_sys_pwritev64(unsigned long fd,
COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
const struct compat_iovec __user *, vec,
compat_ulong_t, vlen)
{
return do_compat_writev(fd, vec, vlen, 0);
}
static long do_compat_pwritev64(unsigned long fd,
const struct compat_iovec __user *vec,
unsigned long vlen, loff_t pos)
unsigned long vlen, loff_t pos, int flags)
{
struct fd f;
ssize_t ret;
@ -1168,7 +1256,7 @@ static long __compat_sys_pwritev64(unsigned long fd,
return -EBADF;
ret = -ESPIPE;
if (f.file->f_mode & FMODE_PWRITE)
ret = compat_writev(f.file, vec, vlen, &pos);
ret = compat_writev(f.file, vec, vlen, &pos, flags);
fdput(f);
return ret;
}
@ -1178,7 +1266,7 @@ COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
const struct compat_iovec __user *,vec,
unsigned long, vlen, loff_t, pos)
{
return __compat_sys_pwritev64(fd, vec, vlen, pos);
return do_compat_pwritev64(fd, vec, vlen, pos, 0);
}
#endif
@ -1188,8 +1276,21 @@ COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
{
loff_t pos = ((loff_t)pos_high << 32) | pos_low;
return __compat_sys_pwritev64(fd, vec, vlen, pos);
return do_compat_pwritev64(fd, vec, vlen, pos, 0);
}
COMPAT_SYSCALL_DEFINE6(pwritev2, compat_ulong_t, fd,
const struct compat_iovec __user *,vec,
compat_ulong_t, vlen, u32, pos_low, u32, pos_high, int, flags)
{
loff_t pos = ((loff_t)pos_high << 32) | pos_low;
if (pos == -1)
return do_compat_writev(fd, vec, vlen, flags);
return do_compat_pwritev64(fd, vec, vlen, pos, flags);
}
#endif
static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,

View File

@ -185,6 +185,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
unsigned int spd_pages = spd->nr_pages;
int ret, do_wakeup, page_nr;
if (!spd_pages)
return 0;
ret = 0;
do_wakeup = 0;
page_nr = 0;
@ -577,7 +580,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
old_fs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos, 0);
set_fs(old_fs);
return res;

View File

@ -499,7 +499,8 @@ struct request_queue {
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP))
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_POLL))
static inline void queue_lockdep_assert_held(struct request_queue *q)
{

View File

@ -340,6 +340,12 @@ asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
const struct compat_iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
const struct compat_iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
const struct compat_iovec __user *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
asmlinkage long compat_sys_preadv64(unsigned long fd,

View File

@ -246,6 +246,7 @@ extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
extern struct dentry * d_obtain_root(struct inode *);
@ -272,38 +273,8 @@ extern int have_submounts(struct dentry *);
* This adds the entry to the hash queues.
*/
extern void d_rehash(struct dentry *);
/**
* d_add - add dentry to hash queues
* @entry: dentry to add
* @inode: The inode to attach to this dentry
*
* This adds the entry to the hash queues and initializes @inode.
* The entry was actually filled in earlier during d_alloc().
*/
static inline void d_add(struct dentry *entry, struct inode *inode)
{
d_instantiate(entry, inode);
d_rehash(entry);
}
/**
* d_add_unique - add dentry to hash queues without aliasing
* @entry: dentry to add
* @inode: The inode to attach to this dentry
*
* This adds the entry to the hash queues and initializes @inode.
* The entry was actually filled in earlier during d_alloc().
*/
static inline struct dentry *d_add_unique(struct dentry *entry, struct inode *inode)
{
struct dentry *res;
res = d_instantiate_unique(entry, inode);
d_rehash(res != NULL ? res : entry);
return res;
}
extern void d_add(struct dentry *, struct inode *);
extern void dentry_update_name_case(struct dentry *, struct qstr *);

View File

@ -320,6 +320,7 @@ struct writeback_control;
#define IOCB_EVENTFD (1 << 0)
#define IOCB_APPEND (1 << 1)
#define IOCB_DIRECT (1 << 2)
#define IOCB_HIPRI (1 << 3)
struct kiocb {
struct file *ki_filp;
@ -1539,11 +1540,6 @@ extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
extern int vfs_whiteout(struct inode *, struct dentry *);
/*
* VFS dentry helper functions.
*/
extern void dentry_unhash(struct dentry *dentry);
/*
* VFS file helper functions.
*/
@ -1709,9 +1705,9 @@ extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *)
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
unsigned long, loff_t *);
unsigned long, loff_t *, int);
extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
unsigned long, loff_t *);
unsigned long, loff_t *, int);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,

View File

@ -16,15 +16,6 @@
#include <linux/slab.h>
#include <linux/bug.h>
/*
* fsnotify_d_instantiate - instantiate a dentry for inode
*/
static inline void fsnotify_d_instantiate(struct dentry *dentry,
struct inode *inode)
{
__fsnotify_d_instantiate(dentry, inode);
}
/* Notify this dentry's parent about a child's events. */
static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{

View File

@ -290,14 +290,9 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
/*
* fsnotify_d_instantiate - instantiate a dentry for inode
*/
static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
static inline void __fsnotify_d_instantiate(struct dentry *dentry)
{
if (!inode)
return;
spin_lock(&dentry->d_lock);
__fsnotify_update_dcache_flags(dentry);
spin_unlock(&dentry->d_lock);
}
/* called from fsnotify listeners, such as fanotify or dnotify */
@ -396,7 +391,7 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
{}
static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
static inline void __fsnotify_d_instantiate(struct dentry *dentry)
{}
static inline u32 fsnotify_get_cookie(void)

View File

@ -31,6 +31,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_PARENT 0x0010
#define LOOKUP_REVAL 0x0020
#define LOOKUP_RCU 0x0040
#define LOOKUP_NO_REVAL 0x0080
/*
* Intent data

View File

@ -575,8 +575,14 @@ asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
size_t count, loff_t pos);
asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
int flags);
asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
int flags);
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode);
asmlinkage long sys_chdir(const char __user *filename);

View File

@ -304,4 +304,7 @@ struct fsxattr {
#define SYNC_FILE_RANGE_WRITE 2
#define SYNC_FILE_RANGE_WAIT_AFTER 4
/* flags for preadv2/pwritev2: */
#define RWF_HIPRI 0x00000001 /* high priority request, poll if possible */
#endif /* _UAPI_LINUX_FS_H */