sunrpc: convert sp_task_pending flag to use atomic bitops

In a later patch, we'll want to be able to handle this flag without
holding the sp_lock. Change this field to an unsigned long flags
field, and declare a new flag in it that can be managed with atomic
bitops.

Signed-off-by: Jeff Layton <jlayton@primarydata.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
This commit is contained in:
Jeff Layton 2014-11-19 07:51:20 -05:00 committed by J. Bruce Fields
parent 62978b3c61
commit 4d5db3f536
2 changed files with 6 additions and 5 deletions

View file

@ -50,7 +50,9 @@ struct svc_pool {
unsigned int sp_nrthreads; /* # of threads in pool */ unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */ struct list_head sp_all_threads; /* all server threads */
struct svc_pool_stats sp_stats; /* statistics on pool operation */ struct svc_pool_stats sp_stats; /* statistics on pool operation */
int sp_task_pending;/* has pending task */ #define SP_TASK_PENDING (0) /* still work to do even if no
* xprt is queued. */
unsigned long sp_flags;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
/* /*

View file

@ -509,7 +509,7 @@ void svc_wake_up(struct svc_serv *serv)
*/ */
wake_up_process(rqstp->rq_task); wake_up_process(rqstp->rq_task);
} else } else
pool->sp_task_pending = 1; set_bit(SP_TASK_PENDING, &pool->sp_flags);
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
} }
} }
@ -644,10 +644,9 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
* long for cache updates. * long for cache updates.
*/ */
rqstp->rq_chandle.thread_wait = 1*HZ; rqstp->rq_chandle.thread_wait = 1*HZ;
pool->sp_task_pending = 0; clear_bit(SP_TASK_PENDING, &pool->sp_flags);
} else { } else {
if (pool->sp_task_pending) { if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags)) {
pool->sp_task_pending = 0;
xprt = ERR_PTR(-EAGAIN); xprt = ERR_PTR(-EAGAIN);
goto out; goto out;
} }