[SCTP]: Fix couple of races between sctp_peeloff() and sctp_rcv().

Validate and update the sk in sctp_rcv() to avoid the race where an
assoc/ep could move to a different socket after we get the sk, but before
the skb is added to the backlog.

Also migrate the skb's in backlog queue to new sk when doing a peeloff.

Signed-off-by: Sridhar Samudrala <sri@us.ibm.com>
This commit is contained in:
Sridhar Samudrala 2006-01-17 11:56:26 -08:00
parent 313e7b4d25
commit c4d2444e99
3 changed files with 40 additions and 1 deletions

View file

@ -175,6 +175,8 @@ void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
void sctp_icmp_proto_unreachable(struct sock *sk,
struct sctp_association *asoc,
struct sctp_transport *t);
void sctp_backlog_migrate(struct sctp_association *assoc,
struct sock *oldsk, struct sock *newsk);
/*
* Section: Macros, externs, and inlines

View file

@ -257,12 +257,21 @@ int sctp_rcv(struct sk_buff *skb)
*/
sctp_bh_lock_sock(sk);
/* It is possible that the association could have moved to a different
* socket if it is peeled off. If so, update the sk.
*/
if (sk != rcvr->sk) {
sctp_bh_lock_sock(rcvr->sk);
sctp_bh_unlock_sock(sk);
sk = rcvr->sk;
}
if (sock_owned_by_user(sk))
sk_add_backlog(sk, skb);
else
sctp_backlog_rcv(sk, skb);
/* Release the sock and the sock ref we took in the lookup calls.
/* Release the sock and the sock ref we took in the lookup calls.
* The asoc/ep ref will be released in sctp_backlog_rcv.
*/
sctp_bh_unlock_sock(sk);
@ -297,6 +306,9 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
struct sctp_ep_common *rcvr = NULL;
rcvr = chunk->rcvr;
BUG_TRAP(rcvr->sk == sk);
if (rcvr->dead) {
sctp_chunk_free(chunk);
} else {
@ -313,6 +325,27 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
void sctp_backlog_migrate(struct sctp_association *assoc,
struct sock *oldsk, struct sock *newsk)
{
struct sk_buff *skb;
struct sctp_chunk *chunk;
skb = oldsk->sk_backlog.head;
oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
while (skb != NULL) {
struct sk_buff *next = skb->next;
chunk = SCTP_INPUT_CB(skb)->chunk;
skb->next = NULL;
if (&assoc->base == chunk->rcvr)
sk_add_backlog(newsk, skb);
else
sk_add_backlog(oldsk, skb);
skb = next;
}
}
/* Handle icmp frag needed error. */
void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
struct sctp_transport *t, __u32 pmtu)

View file

@ -5602,8 +5602,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
*/
newsp->type = type;
spin_lock_bh(&oldsk->sk_lock.slock);
/* Migrate the backlog from oldsk to newsk. */
sctp_backlog_migrate(assoc, oldsk, newsk);
/* Migrate the association to the new socket. */
sctp_assoc_migrate(assoc, newsk);
spin_unlock_bh(&oldsk->sk_lock.slock);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.