[DCCP]: Initial dccp_poll implementation
Tested with a patched netcat, no horror stories so far 8) Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
8efa544f9c
commit
331968bd0c
|
@ -34,6 +34,7 @@ static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
|
||||||
dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
|
dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
|
||||||
dccp_fin(sk, skb);
|
dccp_fin(sk, skb);
|
||||||
dccp_set_state(sk, DCCP_CLOSED);
|
dccp_set_state(sk, DCCP_CLOSED);
|
||||||
|
sk_wake_async(sk, 1, POLL_HUP);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
|
static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
|
||||||
|
|
|
@ -140,6 +140,62 @@ int dccp_disconnect(struct sock *sk, int flags)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wait for a DCCP event.
|
||||||
|
*
|
||||||
|
* Note that we don't need to lock the socket, as the upper poll layers
|
||||||
|
* take care of normal races (between the test and the event) and we don't
|
||||||
|
* go look at any of the socket buffers directly.
|
||||||
|
*/
|
||||||
|
static unsigned int dccp_poll(struct file *file, struct socket *sock,
|
||||||
|
poll_table *wait)
|
||||||
|
{
|
||||||
|
unsigned int mask;
|
||||||
|
struct sock *sk = sock->sk;
|
||||||
|
|
||||||
|
poll_wait(file, sk->sk_sleep, wait);
|
||||||
|
if (sk->sk_state == DCCP_LISTEN)
|
||||||
|
return inet_csk_listen_poll(sk);
|
||||||
|
|
||||||
|
/* Socket is not locked. We are protected from async events
|
||||||
|
by poll logic and correct handling of state changes
|
||||||
|
made by another threads is impossible in any case.
|
||||||
|
*/
|
||||||
|
|
||||||
|
mask = 0;
|
||||||
|
if (sk->sk_err)
|
||||||
|
mask = POLLERR;
|
||||||
|
|
||||||
|
if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
|
||||||
|
mask |= POLLHUP;
|
||||||
|
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
||||||
|
mask |= POLLIN | POLLRDNORM;
|
||||||
|
|
||||||
|
/* Connected? */
|
||||||
|
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
|
||||||
|
if (atomic_read(&sk->sk_rmem_alloc) > 0)
|
||||||
|
mask |= POLLIN | POLLRDNORM;
|
||||||
|
|
||||||
|
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
|
||||||
|
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
|
||||||
|
mask |= POLLOUT | POLLWRNORM;
|
||||||
|
} else { /* send SIGIO later */
|
||||||
|
set_bit(SOCK_ASYNC_NOSPACE,
|
||||||
|
&sk->sk_socket->flags);
|
||||||
|
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||||
|
|
||||||
|
/* Race breaker. If space is freed after
|
||||||
|
* wspace test but before the flags are set,
|
||||||
|
* IO signal will be lost.
|
||||||
|
*/
|
||||||
|
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
|
||||||
|
mask |= POLLOUT | POLLWRNORM;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mask;
|
||||||
|
}
|
||||||
|
|
||||||
int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
||||||
{
|
{
|
||||||
dccp_pr_debug("entry\n");
|
dccp_pr_debug("entry\n");
|
||||||
|
@ -478,7 +534,8 @@ static struct proto_ops inet_dccp_ops = {
|
||||||
.socketpair = sock_no_socketpair,
|
.socketpair = sock_no_socketpair,
|
||||||
.accept = inet_accept,
|
.accept = inet_accept,
|
||||||
.getname = inet_getname,
|
.getname = inet_getname,
|
||||||
.poll = sock_no_poll,
|
/* FIXME: work on tcp_poll to rename it to inet_csk_poll */
|
||||||
|
.poll = dccp_poll,
|
||||||
.ioctl = inet_ioctl,
|
.ioctl = inet_ioctl,
|
||||||
/* FIXME: work on inet_listen to rename it to sock_common_listen */
|
/* FIXME: work on inet_listen to rename it to sock_common_listen */
|
||||||
.listen = inet_dccp_listen,
|
.listen = inet_dccp_listen,
|
||||||
|
|
Loading…
Reference in a new issue