1
0
Fork 0

vfs: split do_lookup()

Split do_lookup() into two functions:

  lookup_fast() - does cached lookup without i_mutex
  lookup_slow() - does lookup with i_mutex

Both follow managed dentries.

The new functions are needed by atomic_open.

Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
hifive-unleashed-5.1
Miklos Szeredi 2012-05-21 17:30:05 +02:00 committed by Al Viro
parent e41f941a23
commit 697f514df1
1 changed files with 45 additions and 14 deletions

View File

@ -1125,8 +1125,8 @@ static struct dentry *__lookup_hash(struct qstr *name,
* small and for now I'd prefer to have fast path as straight as possible.
* It _is_ time-critical.
*/
static int do_lookup(struct nameidata *nd, struct qstr *name,
struct path *path, struct inode **inode)
static int lookup_fast(struct nameidata *nd, struct qstr *name,
struct path *path, struct inode **inode)
{
struct vfsmount *mnt = nd->path.mnt;
struct dentry *dentry, *parent = nd->path.dentry;
@ -1208,7 +1208,7 @@ unlazy:
goto need_lookup;
}
}
done:
path->mnt = mnt;
path->dentry = dentry;
err = follow_managed(path, nd->flags);
@ -1222,6 +1222,17 @@ done:
return 0;
need_lookup:
return 1;
}
/* Fast lookup failed, do it the slow way */
static int lookup_slow(struct nameidata *nd, struct qstr *name,
struct path *path)
{
struct dentry *dentry, *parent;
int err;
parent = nd->path.dentry;
BUG_ON(nd->inode != parent->d_inode);
mutex_lock(&parent->d_inode->i_mutex);
@ -1229,7 +1240,16 @@ need_lookup:
mutex_unlock(&parent->d_inode->i_mutex);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
goto done;
path->mnt = nd->path.mnt;
path->dentry = dentry;
err = follow_managed(path, nd->flags);
if (unlikely(err < 0)) {
path_put_conditional(path, nd);
return err;
}
if (err)
nd->flags |= LOOKUP_JUMPED;
return 0;
}
static inline int may_lookup(struct nameidata *nd)
@ -1301,21 +1321,26 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
*/
if (unlikely(type != LAST_NORM))
return handle_dots(nd, type);
err = do_lookup(nd, name, path, &inode);
err = lookup_fast(nd, name, path, &inode);
if (unlikely(err)) {
terminate_walk(nd);
return err;
}
if (!inode) {
path_to_nameidata(path, nd);
terminate_walk(nd);
return -ENOENT;
if (err < 0)
goto out_err;
err = lookup_slow(nd, name, path);
if (err < 0)
goto out_err;
inode = path->dentry->d_inode;
}
err = -ENOENT;
if (!inode)
goto out_path_put;
if (should_follow_link(inode, follow)) {
if (nd->flags & LOOKUP_RCU) {
if (unlikely(unlazy_walk(nd, path->dentry))) {
terminate_walk(nd);
return -ECHILD;
err = -ECHILD;
goto out_err;
}
}
BUG_ON(inode != path->dentry->d_inode);
@ -1324,6 +1349,12 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
path_to_nameidata(path, nd);
nd->inode = inode;
return 0;
out_path_put:
path_to_nameidata(path, nd);
out_err:
terminate_walk(nd);
return err;
}
/*