svc: Move sk_reserved to svc_xprt

This functionally trivial patch moves the sk_reserved field to the
transport independent svc_xprt structure.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Acked-by: Neil Brown <neilb@suse.de>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
This commit is contained in:
Tom Tucker 2007-12-30 21:07:55 -06:00 committed by J. Bruce Fields
parent 7a18208383
commit 7a90e8cc21
3 changed files with 6 additions and 7 deletions

View file

@ -53,6 +53,7 @@ struct svc_xprt {
struct svc_pool *xpt_pool; /* current pool iff queued */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
};
int svc_reg_xprt_class(struct svc_xprt_class *);

View file

@ -20,8 +20,6 @@ struct svc_sock {
struct socket * sk_sock; /* berkeley socket layer */
struct sock * sk_sk; /* INET layer */
atomic_t sk_reserved; /* space on outq that is reserved */
spinlock_t sk_lock; /* protects sk_deferred and
* sk_info_authunix */
struct list_head sk_deferred; /* deferred requests that need to

View file

@ -288,7 +288,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
rqstp->rq_sock = svsk;
svc_xprt_get(&svsk->sk_xprt);
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved);
BUG_ON(svsk->sk_xprt.xpt_pool != pool);
wake_up(&rqstp->rq_wait);
} else {
@ -353,7 +353,7 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
if (space < rqstp->rq_reserved) {
struct svc_sock *svsk = rqstp->rq_sock;
atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved);
atomic_sub((rqstp->rq_reserved - space), &svsk->sk_xprt.xpt_reserved);
rqstp->rq_reserved = space;
svc_sock_enqueue(svsk);
@ -881,7 +881,7 @@ static int svc_udp_has_wspace(struct svc_xprt *xprt)
* sock space.
*/
set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg;
required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
if (required*2 > sock_wspace(svsk->sk_sk))
return 0;
clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
@ -1327,7 +1327,7 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
* sock space.
*/
set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg;
required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
wspace = sk_stream_wspace(svsk->sk_sk);
if (wspace < sk_stream_min_wspace(svsk->sk_sk))
@ -1544,7 +1544,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
rqstp->rq_sock = svsk;
svc_xprt_get(&svsk->sk_xprt);
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved);
} else {
/* No data pending. Go to sleep */
svc_thread_enqueue(pool, rqstp);