NFSv4: Fix callback server shutdown

We want to use kthread_stop() in order to ensure the threads are
shut down before we tear down the nfs_callback_info in nfs_callback_down.

Tested-and-reviewed-by: Kinglong Mee <kinglongmee@gmail.com>
Reported-by: Kinglong Mee <kinglongmee@gmail.com>
Fixes: bb6aeba736 ("NFSv4.x: Switch to using svc_set_num_threads()...")
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
This commit is contained in:
Trond Myklebust 2017-04-26 11:55:27 -04:00 committed by J. Bruce Fields
parent 9e0d87680d
commit ed6473ddc7
3 changed files with 55 additions and 8 deletions

View file

@ -76,7 +76,10 @@ nfs4_callback_svc(void *vrqstp)
set_freezable();
while (!kthread_should_stop()) {
while (!kthread_freezable_should_stop(NULL)) {
if (signal_pending(current))
flush_signals(current);
/*
* Listen for a request on the socket
*/
@ -85,6 +88,8 @@ nfs4_callback_svc(void *vrqstp)
continue;
svc_process(rqstp);
}
svc_exit_thread(rqstp);
module_put_and_exit(0);
return 0;
}
@ -103,9 +108,10 @@ nfs41_callback_svc(void *vrqstp)
set_freezable();
while (!kthread_should_stop()) {
if (try_to_freeze())
continue;
while (!kthread_freezable_should_stop(NULL)) {
if (signal_pending(current))
flush_signals(current);
prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
spin_lock_bh(&serv->sv_cb_lock);
@ -121,11 +127,13 @@ nfs41_callback_svc(void *vrqstp)
error);
} else {
spin_unlock_bh(&serv->sv_cb_lock);
schedule();
if (!kthread_should_stop())
schedule();
finish_wait(&serv->sv_cb_waitq, &wq);
}
flush_signals(current);
}
svc_exit_thread(rqstp);
module_put_and_exit(0);
return 0;
}
@ -221,14 +229,14 @@ err_bind:
static struct svc_serv_ops nfs40_cb_sv_ops = {
.svo_function = nfs4_callback_svc,
.svo_enqueue_xprt = svc_xprt_do_enqueue,
.svo_setup = svc_set_num_threads,
.svo_setup = svc_set_num_threads_sync,
.svo_module = THIS_MODULE,
};
#if defined(CONFIG_NFS_V4_1)
static struct svc_serv_ops nfs41_cb_sv_ops = {
.svo_function = nfs41_callback_svc,
.svo_enqueue_xprt = svc_xprt_do_enqueue,
.svo_setup = svc_set_num_threads,
.svo_setup = svc_set_num_threads_sync,
.svo_module = THIS_MODULE,
};

View file

@ -473,6 +473,7 @@ void svc_pool_map_put(void);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
struct svc_serv_ops *);
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_destroy(struct svc_serv *);
void svc_shutdown_net(struct svc_serv *, struct net *);

View file

@ -795,6 +795,44 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
}
EXPORT_SYMBOL_GPL(svc_set_num_threads);
/* destroy old threads */
static int
svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
struct task_struct *task;
unsigned int state = serv->sv_nrthreads-1;
/* destroy old threads */
do {
task = choose_victim(serv, pool, &state);
if (task == NULL)
break;
kthread_stop(task);
nrservs++;
} while (nrservs < 0);
return 0;
}
int
svc_set_num_threads_sync(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
if (pool == NULL) {
/* The -1 assumes caller has done a svc_get() */
nrservs -= (serv->sv_nrthreads-1);
} else {
spin_lock_bh(&pool->sp_lock);
nrservs -= pool->sp_nrthreads;
spin_unlock_bh(&pool->sp_lock);
}
if (nrservs > 0)
return svc_start_kthreads(serv, pool, nrservs);
if (nrservs < 0)
return svc_stop_kthreads(serv, pool, nrservs);
return 0;
}
EXPORT_SYMBOL_GPL(svc_set_num_threads_sync);
/*
* Called from a server thread as it's exiting. Caller must hold the "service
* mutex" for the service.