1
0
Fork 0

nfsd/sunrpc: turn enqueueing a svc_xprt into a svc_serv operation

For now, all services use svc_xprt_do_enqueue, but once we add
workqueue-based service support, we'll need to do something different.

Signed-off-by: Shirley Ma <shirley.ma@oracle.com>
Acked-by: Jeff Layton <jlayton@primarydata.com>
Tested-by: Shirley Ma <shirley.ma@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
hifive-unleashed-5.1
Jeff Layton 2015-06-08 12:06:51 -07:00 committed by J. Bruce Fields
parent 758f62fff9
commit b9e13cdfac
6 changed files with 18 additions and 11 deletions

View File

@ -323,7 +323,8 @@ out_rqst:
}
static struct svc_serv_ops lockd_sv_ops = {
.svo_shutdown = svc_rpcb_cleanup,
.svo_shutdown = svc_rpcb_cleanup,
.svo_enqueue_xprt = svc_xprt_do_enqueue,
};
static struct svc_serv *lockd_create_svc(void)

View File

@ -309,6 +309,7 @@ err_bind:
}
static struct svc_serv_ops nfs_cb_sv_ops = {
.svo_enqueue_xprt = svc_xprt_do_enqueue,
};
static struct svc_serv *nfs_callback_create_svc(int minorversion)

View File

@ -391,10 +391,11 @@ static int nfsd_get_default_max_blksize(void)
return ret;
}
static struct svc_serv_ops nfsd_sv_ops = {
.svo_shutdown = nfsd_last_thread,
.svo_function = nfsd,
.svo_module = THIS_MODULE,
static struct svc_serv_ops nfsd_thread_sv_ops = {
.svo_shutdown = nfsd_last_thread,
.svo_function = nfsd,
.svo_enqueue_xprt = svc_xprt_do_enqueue,
.svo_module = THIS_MODULE,
};
int nfsd_create_serv(struct net *net)
@ -411,7 +412,7 @@ int nfsd_create_serv(struct net *net)
nfsd_max_blksize = nfsd_get_default_max_blksize();
nfsd_reset_versions();
nn->nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
&nfsd_sv_ops);
&nfsd_thread_sv_ops);
if (nn->nfsd_serv == NULL)
return -ENOMEM;

View File

@ -58,6 +58,9 @@ struct svc_serv_ops {
/* function for service threads to run */
int (*svo_function)(void *);
/* queue up a transport for servicing */
void (*svo_enqueue_xprt)(struct svc_xprt *);
/* optional module to count when adding threads (pooled svcs only) */
struct module *svo_module;
};

View File

@ -116,6 +116,7 @@ void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
struct svc_serv *);
int svc_create_xprt(struct svc_serv *, const char *, struct net *,
const int, const unsigned short, int);
void svc_xprt_do_enqueue(struct svc_xprt *xprt);
void svc_xprt_enqueue(struct svc_xprt *xprt);
void svc_xprt_put(struct svc_xprt *xprt);
void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);

View File

@ -24,7 +24,6 @@ static int svc_deferred_recv(struct svc_rqst *rqstp);
static struct cache_deferred_req *svc_defer(struct cache_req *req);
static void svc_age_temp_xprts(unsigned long closure);
static void svc_delete_xprt(struct svc_xprt *xprt);
static void svc_xprt_do_enqueue(struct svc_xprt *xprt);
/* apparently the "standard" is that clients close
* idle connections after 5 minutes, servers after
@ -225,12 +224,12 @@ static void svc_xprt_received(struct svc_xprt *xprt)
}
/* As soon as we clear busy, the xprt could be closed and
* 'put', so we need a reference to call svc_xprt_do_enqueue with:
* 'put', so we need a reference to call svc_enqueue_xprt with:
*/
svc_xprt_get(xprt);
smp_mb__before_atomic();
clear_bit(XPT_BUSY, &xprt->xpt_flags);
svc_xprt_do_enqueue(xprt);
xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
svc_xprt_put(xprt);
}
@ -320,7 +319,7 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
return false;
}
static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
void svc_xprt_do_enqueue(struct svc_xprt *xprt)
{
struct svc_pool *pool;
struct svc_rqst *rqstp = NULL;
@ -402,6 +401,7 @@ redo_search:
out:
trace_svc_xprt_do_enqueue(xprt, rqstp);
}
EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
/*
* Queue up a transport with data pending. If there are idle nfsd
@ -412,7 +412,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
{
if (test_bit(XPT_BUSY, &xprt->xpt_flags))
return;
svc_xprt_do_enqueue(xprt);
xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
}
EXPORT_SYMBOL_GPL(svc_xprt_enqueue);