1
0
Fork 0

block: convert is_sync helpers to use REQ_OPs.

This patch converts the is_sync helpers to use separate variables
for the operation and flags.

Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
hifive-unleashed-5.1
Mike Christie 2016-06-05 14:32:16 -05:00 committed by Jens Axboe
parent 8fe0d473f5
commit d9d8c5c489
4 changed files with 11 additions and 11 deletions

View File

@ -962,7 +962,7 @@ static void __freed_request(struct request_list *rl, int sync)
static void freed_request(struct request_list *rl, int op, unsigned int flags)
{
struct request_queue *q = rl->q;
int sync = rw_is_sync(op | flags);
int sync = rw_is_sync(op, flags);
q->nr_rqs[sync]--;
rl->count[sync]--;
@ -1075,7 +1075,7 @@ static struct request *__get_request(struct request_list *rl, int op,
struct elevator_type *et = q->elevator->type;
struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL;
const bool is_sync = rw_is_sync(op | op_flags) != 0;
const bool is_sync = rw_is_sync(op, op_flags) != 0;
int may_queue;
if (unlikely(blk_queue_dying(q)))
@ -1244,7 +1244,7 @@ static struct request *get_request(struct request_queue *q, int op,
int op_flags, struct bio *bio,
gfp_t gfp_mask)
{
const bool is_sync = rw_is_sync(op | op_flags) != 0;
const bool is_sync = rw_is_sync(op, op_flags) != 0;
DEFINE_WAIT(wait);
struct request_list *rl;
struct request *rq;

View File

@ -204,7 +204,7 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
rq->end_io_data = NULL;
rq->next_rq = NULL;
ctx->rq_dispatched[rw_is_sync(op | op_flags)]++;
ctx->rq_dispatched[rw_is_sync(op, op_flags)]++;
}
static struct request *
@ -1178,7 +1178,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
if (rw_is_sync(bio->bi_rw))
if (rw_is_sync(bio_op(bio), bio->bi_rw))
op_flags |= REQ_SYNC;
trace_block_getrq(q, bio, op);
@ -1246,7 +1246,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
*/
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = rw_is_sync(bio->bi_rw);
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
struct blk_map_ctx data;
struct request *rq;
@ -1343,7 +1343,7 @@ done:
*/
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = rw_is_sync(bio->bi_rw);
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
struct blk_plug *plug;
unsigned int request_count = 0;

View File

@ -4311,7 +4311,7 @@ static int cfq_may_queue(struct request_queue *q, int op, int op_flags)
if (!cic)
return ELV_MQUEUE_MAY;
cfqq = cic_to_cfqq(cic, rw_is_sync(op | op_flags));
cfqq = cic_to_cfqq(cic, rw_is_sync(op, op_flags));
if (cfqq) {
cfq_init_prio_data(cfqq, cic);

View File

@ -624,14 +624,14 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q)
/*
* We regard a request as sync, if either a read or a sync write
*/
static inline bool rw_is_sync(unsigned int rw_flags)
static inline bool rw_is_sync(int op, unsigned int rw_flags)
{
return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
}
static inline bool rq_is_sync(struct request *rq)
{
return rw_is_sync(rq->cmd_flags);
return rw_is_sync(req_op(rq), rq->cmd_flags);
}
static inline bool blk_rl_full(struct request_list *rl, bool sync)