1
0
Fork 0

dcache: let the dentry count go down to zero without taking d_lock

We can be more aggressive about this, if we are clever and careful. This is subtle.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
hifive-unleashed-5.1
Linus Torvalds 2015-01-09 15:19:03 -08:00 committed by Al Viro
parent 32426f6653
commit 360f54796e
3 changed files with 144 additions and 13 deletions

View File

@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry)
* dentry_iput drops the locks, at which point nobody (except
* transient RCU lookups) can reach this dentry.
*/
BUG_ON((int)dentry->d_lockref.count > 0);
BUG_ON(dentry->d_lockref.count > 0);
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
struct dentry *parent = dentry->d_parent;
if (IS_ROOT(dentry))
return NULL;
if (unlikely((int)dentry->d_lockref.count < 0))
if (unlikely(dentry->d_lockref.count < 0))
return NULL;
if (likely(spin_trylock(&parent->d_lock)))
return parent;
@ -590,6 +590,110 @@ again:
return parent;
}
/*
* Try to do a lockless dput(), and return whether that was successful.
*
* If unsuccessful, we return false, having already taken the dentry lock.
*
* The caller needs to hold the RCU read lock, so that the dentry is
* guaranteed to stay around even if the refcount goes down to zero!
*/
static inline bool fast_dput(struct dentry *dentry)
{
int ret;
unsigned int d_flags;
/*
* If we have a d_op->d_delete() operation, we sould not
* let the dentry count go to zero, so use "put__or_lock".
*/
if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
return lockref_put_or_lock(&dentry->d_lockref);
/*
* .. otherwise, we can try to just decrement the
* lockref optimistically.
*/
ret = lockref_put_return(&dentry->d_lockref);
/*
* If the lockref_put_return() failed due to the lock being held
* by somebody else, the fast path has failed. We will need to
* get the lock, and then check the count again.
*/
if (unlikely(ret < 0)) {
spin_lock(&dentry->d_lock);
if (dentry->d_lockref.count > 1) {
dentry->d_lockref.count--;
spin_unlock(&dentry->d_lock);
return 1;
}
return 0;
}
/*
* If we weren't the last ref, we're done.
*/
if (ret)
return 1;
/*
* Careful, careful. The reference count went down
* to zero, but we don't hold the dentry lock, so
* somebody else could get it again, and do another
* dput(), and we need to not race with that.
*
* However, there is a very special and common case
* where we don't care, because there is nothing to
* do: the dentry is still hashed, it does not have
* a 'delete' op, and it's referenced and already on
* the LRU list.
*
* NOTE! Since we aren't locked, these values are
* not "stable". However, it is sufficient that at
* some point after we dropped the reference the
* dentry was hashed and the flags had the proper
* value. Other dentry users may have re-gotten
* a reference to the dentry and change that, but
* our work is done - we can leave the dentry
* around with a zero refcount.
*/
smp_rmb();
d_flags = ACCESS_ONCE(dentry->d_flags);
d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
/* Nothing to do? Dropping the reference was all we needed? */
if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
return 1;
/*
* Not the fast normal case? Get the lock. We've already decremented
* the refcount, but we'll need to re-check the situation after
* getting the lock.
*/
spin_lock(&dentry->d_lock);
/*
* Did somebody else grab a reference to it in the meantime, and
* we're no longer the last user after all? Alternatively, somebody
* else could have killed it and marked it dead. Either way, we
* don't need to do anything else.
*/
if (dentry->d_lockref.count) {
spin_unlock(&dentry->d_lock);
return 1;
}
/*
* Re-get the reference we optimistically dropped. We hold the
* lock, and we just tested that it was zero, so we can just
* set it to 1.
*/
dentry->d_lockref.count = 1;
return 0;
}
/*
* This is dput
*
@ -622,8 +726,14 @@ void dput(struct dentry *dentry)
return;
repeat:
if (lockref_put_or_lock(&dentry->d_lockref))
rcu_read_lock();
if (likely(fast_dput(dentry))) {
rcu_read_unlock();
return;
}
/* Slow case: now with the dentry lock held */
rcu_read_unlock();
/* Unreachable? Get rid of it */
if (unlikely(d_unhashed(dentry)))
@ -810,7 +920,7 @@ static void shrink_dentry_list(struct list_head *list)
* We found an inuse dentry which was not removed from
* the LRU because of laziness during lookup. Do not free it.
*/
if ((int)dentry->d_lockref.count > 0) {
if (dentry->d_lockref.count > 0) {
spin_unlock(&dentry->d_lock);
if (parent)
spin_unlock(&parent->d_lock);

View File

@ -28,12 +28,13 @@ struct lockref {
#endif
struct {
spinlock_t lock;
unsigned int count;
int count;
};
};
};
extern void lockref_get(struct lockref *);
extern int lockref_put_return(struct lockref *);
extern int lockref_get_not_zero(struct lockref *);
extern int lockref_get_or_lock(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);

View File

@ -60,7 +60,7 @@ void lockref_get(struct lockref *lockref)
EXPORT_SYMBOL(lockref_get);
/**
* lockref_get_not_zero - Increments count unless the count is 0
* lockref_get_not_zero - Increments count unless the count is 0 or dead
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count was zero
*/
@ -70,7 +70,7 @@ int lockref_get_not_zero(struct lockref *lockref)
CMPXCHG_LOOP(
new.count++;
if (!old.count)
if (old.count <= 0)
return 0;
,
return 1;
@ -78,7 +78,7 @@ int lockref_get_not_zero(struct lockref *lockref)
spin_lock(&lockref->lock);
retval = 0;
if (lockref->count) {
if (lockref->count > 0) {
lockref->count++;
retval = 1;
}
@ -88,7 +88,7 @@ int lockref_get_not_zero(struct lockref *lockref)
EXPORT_SYMBOL(lockref_get_not_zero);
/**
* lockref_get_or_lock - Increments count unless the count is 0
* lockref_get_or_lock - Increments count unless the count is 0 or dead
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count was zero
* and we got the lock instead.
@ -97,14 +97,14 @@ int lockref_get_or_lock(struct lockref *lockref)
{
CMPXCHG_LOOP(
new.count++;
if (!old.count)
if (old.count <= 0)
break;
,
return 1;
);
spin_lock(&lockref->lock);
if (!lockref->count)
if (lockref->count <= 0)
return 0;
lockref->count++;
spin_unlock(&lockref->lock);
@ -112,6 +112,26 @@ int lockref_get_or_lock(struct lockref *lockref)
}
EXPORT_SYMBOL(lockref_get_or_lock);
/**
* lockref_put_return - Decrement reference count if possible
* @lockref: pointer to lockref structure
*
* Decrement the reference count and return the new value.
* If the lockref was dead or locked, return an error.
*/
int lockref_put_return(struct lockref *lockref)
{
CMPXCHG_LOOP(
new.count--;
if (old.count <= 0)
return -1;
,
return new.count;
);
return -1;
}
EXPORT_SYMBOL(lockref_put_return);
/**
* lockref_put_or_lock - decrements count unless count <= 1 before decrement
* @lockref: pointer to lockref structure
@ -158,7 +178,7 @@ int lockref_get_not_dead(struct lockref *lockref)
CMPXCHG_LOOP(
new.count++;
if ((int)old.count < 0)
if (old.count < 0)
return 0;
,
return 1;
@ -166,7 +186,7 @@ int lockref_get_not_dead(struct lockref *lockref)
spin_lock(&lockref->lock);
retval = 0;
if ((int) lockref->count >= 0) {
if (lockref->count >= 0) {
lockref->count++;
retval = 1;
}