1
0
Fork 0

staging/lustre/ptlrpc: avoid list scan in ptlrpcd_check

ptlrpcd_check() always scan all requests on ptlrpc_request_set
and try to finish completed requests, this is low efficiency.
Even worse, l_wait_event() always checks condition for twice
before sleeping and one more time after waking up, which means
it will call ptlrpcd_check() for three times in each loop.

This patch will move completed requests at the head of list
in ptlrpc_check_set(), with this change ptlrpcd_check doesn't
need to scan all requests anymore.

Signed-off-by: Liang Zhen <liang.zhen@intel.com>
Reviewed-on: http://review.whamcloud.com/11513
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5548
Reviewed-by: Bobi Jam <bobijam@gmail.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Johann Lombardi <johann.lombardi@intel.com>
Signed-off-by: Oleg Drokin <oleg.drokin@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
hifive-unleashed-5.1
Liang Zhen 2015-02-01 21:52:00 -05:00 committed by Greg Kroah-Hartman
parent b47ea4bbfe
commit fa55c6a4b4
2 changed files with 20 additions and 15 deletions

View File

@ -1497,11 +1497,13 @@ static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
struct list_head *tmp, *next;
struct list_head comp_reqs;
int force_timer_recalc = 0;
if (atomic_read(&set->set_remaining) == 0)
return 1;
INIT_LIST_HEAD(&comp_reqs);
list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
@ -1576,8 +1578,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
ptlrpc_rqphase_move(req, req->rq_next_phase);
}
if (req->rq_phase == RQ_PHASE_COMPLETE)
if (req->rq_phase == RQ_PHASE_COMPLETE) {
list_move_tail(&req->rq_set_chain, &comp_reqs);
continue;
}
if (req->rq_phase == RQ_PHASE_INTERPRET)
goto interpret;
@ -1860,9 +1864,15 @@ interpret:
if (req->rq_status != 0)
set->set_rc = req->rq_status;
ptlrpc_req_finished(req);
} else {
list_move_tail(&req->rq_set_chain, &comp_reqs);
}
}
/* move completed request at the head of list so it's easier for
* caller to find them */
list_splice(&comp_reqs, &set->set_requests);
/* If we hit an error, we want to recover promptly. */
return atomic_read(&set->set_remaining) == 0 || force_timer_recalc;
}

View File

@ -306,21 +306,16 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
if (atomic_read(&set->set_remaining))
rc |= ptlrpc_check_set(env, set);
if (!list_empty(&set->set_requests)) {
/*
* XXX: our set never completes, so we prune the completed
* reqs after each iteration. boy could this be smarter.
*/
list_for_each_safe(pos, tmp, &set->set_requests) {
req = list_entry(pos, struct ptlrpc_request,
rq_set_chain);
if (req->rq_phase != RQ_PHASE_COMPLETE)
continue;
/* NB: ptlrpc_check_set has already moved completed request at the
* head of seq::set_requests */
list_for_each_safe(pos, tmp, &set->set_requests) {
req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
if (req->rq_phase != RQ_PHASE_COMPLETE)
break;
list_del_init(&req->rq_set_chain);
req->rq_set = NULL;
ptlrpc_req_finished(req);
}
list_del_init(&req->rq_set_chain);
req->rq_set = NULL;
ptlrpc_req_finished(req);
}
if (rc == 0) {