1
0
Fork 0

IB/mthca: Annotate CQ locking

mthca_ib_lock_cqs()/mthca_ib_unlock_cqs() are helper functions that
lock/unlock both CQs attached to a QP in the proper order to avoid
AB-BA deadlocks.  Annotate this so sparse can understand what's going
on (and warn us if we misuse these functions).

Signed-off-by: Roland Dreier <rolandd@cisco.com>
hifive-unleashed-5.1
Roland Dreier 2009-09-05 20:36:15 -07:00
parent deecb5d672
commit ffe063f32b
1 changed files with 8 additions and 4 deletions

View File

@ -1319,10 +1319,12 @@ int mthca_alloc_qp(struct mthca_dev *dev,
}
static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{
if (send_cq == recv_cq)
if (send_cq == recv_cq) {
spin_lock_irq(&send_cq->lock);
else if (send_cq->cqn < recv_cq->cqn) {
__acquire(&recv_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
spin_lock_irq(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
@ -1332,10 +1334,12 @@ static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
}
static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
__releases(&send_cq->lock) __releases(&recv_cq->lock)
{
if (send_cq == recv_cq)
if (send_cq == recv_cq) {
__release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
else if (send_cq->cqn < recv_cq->cqn) {
} else if (send_cq->cqn < recv_cq->cqn) {
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
} else {