1
0
Fork 0

sctp: ABORT if receive, reassmbly, or reodering queue is not empty while closing socket

Trigger user ABORT if application closes a socket which has data
queued on the socket receive queue or chunks waiting on the
reassembly or ordering queue as this would imply data being lost
which defeats the point of a graceful shutdown.

This behavior is already practiced in TCP.

We do not check the input queue because that would mean to parse
all chunks on it to look for unacknowledged data which seems too
much of an effort. Control chunks or duplicated chunks may also
be in the input queue and should not be stopping a graceful
shutdown.

Signed-off-by: Thomas Graf <tgraf@infradead.org>
Acked-by: Vlad Yasevich <vladislav.yasevich@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Thomas Graf 2011-07-08 04:37:46 +00:00 committed by David S. Miller
parent 3f97fae948
commit cd4fcc704f
3 changed files with 22 additions and 9 deletions

View File

@ -80,7 +80,7 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
void sctp_ulpevent_free(struct sctp_ulpevent *);
int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
void sctp_queue_purge_ulpevents(struct sk_buff_head *list);
unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list);
struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
const struct sctp_association *asoc,

View File

@ -1384,6 +1384,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct list_head *pos, *temp;
unsigned int data_was_unread;
SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
@ -1393,6 +1394,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
ep = sctp_sk(sk)->ep;
/* Clean up any skbs sitting on the receive queue. */
data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
/* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
@ -1410,7 +1415,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
}
}
if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
!skb_queue_empty(&asoc->ulpq.reasm) ||
(sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
struct sctp_chunk *chunk;
chunk = sctp_make_abort_user(asoc, NULL, 0);
@ -1420,10 +1427,6 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
sctp_primitive_SHUTDOWN(asoc, NULL);
}
/* Clean up any skbs sitting on the receive queue. */
sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
/* On a TCP-style socket, block for at most linger_time if set. */
if (sctp_style(sk, TCP) && timeout)
sctp_wait_for_close(sk, timeout);

View File

@ -1081,9 +1081,19 @@ void sctp_ulpevent_free(struct sctp_ulpevent *event)
}
/* Purge the skb lists holding ulpevents. */
void sctp_queue_purge_ulpevents(struct sk_buff_head *list)
unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(list)) != NULL)
sctp_ulpevent_free(sctp_skb2event(skb));
unsigned int data_unread = 0;
while ((skb = skb_dequeue(list)) != NULL) {
struct sctp_ulpevent *event = sctp_skb2event(skb);
if (!sctp_ulpevent_is_notification(event))
data_unread += skb->len;
sctp_ulpevent_free(event);
}
return data_unread;
}