xfs: cleanup xfs_qm_dqlookup

Rearrange the code to avoid the conditional locking around the flist_locked
variable.  This means we lose a (rather pointless) assert, and hold the
freelist lock a bit longer for one corner case.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
This commit is contained in:
Christoph Hellwig 2011-12-06 21:58:15 +00:00 committed by Ben Myers
parent 800b484ec0
commit 497507b9ee

View file

@ -710,12 +710,9 @@ xfs_qm_dqlookup(
xfs_dquot_t **O_dqpp) xfs_dquot_t **O_dqpp)
{ {
xfs_dquot_t *dqp; xfs_dquot_t *dqp;
uint flist_locked;
ASSERT(mutex_is_locked(&qh->qh_lock)); ASSERT(mutex_is_locked(&qh->qh_lock));
flist_locked = B_FALSE;
/* /*
* Traverse the hashchain looking for a match * Traverse the hashchain looking for a match
*/ */
@ -750,31 +747,19 @@ xfs_qm_dqlookup(
xfs_dqlock(dqp); xfs_dqlock(dqp);
dqp->dq_flags &= ~(XFS_DQ_WANT); dqp->dq_flags &= ~(XFS_DQ_WANT);
} }
flist_locked = B_TRUE;
}
/* if (dqp->q_nrefs == 0) {
* id couldn't have changed; we had the hashlock all
* along
*/
ASSERT(be32_to_cpu(dqp->q_core.d_id) == id);
if (flist_locked) {
if (dqp->q_nrefs != 0) {
mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
flist_locked = B_FALSE;
} else {
/* take it off the freelist */ /* take it off the freelist */
trace_xfs_dqlookup_freelist(dqp); trace_xfs_dqlookup_freelist(dqp);
list_del_init(&dqp->q_freelist); list_del_init(&dqp->q_freelist);
xfs_Gqm->qm_dqfrlist_cnt--; xfs_Gqm->qm_dqfrlist_cnt--;
} }
XFS_DQHOLD(dqp);
mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
} else {
XFS_DQHOLD(dqp);
} }
XFS_DQHOLD(dqp);
if (flist_locked)
mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
/* /*
* move the dquot to the front of the hashchain * move the dquot to the front of the hashchain
*/ */