1
0
Fork 0

block: add queue_is_mq() helper

Various spots check for q->mq_ops being non-NULL, but provide
a helper to do this instead.

Where the ->mq_ops != NULL check is redundant, remove it.

Since mq == rq-based now that legacy is gone, get rid of the
queue_is_rq_based() and just use queue_is_mq() everywhere.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
hifive-unleashed-5.1
Jens Axboe 2018-11-15 12:22:51 -07:00
parent dabcefab45
commit 344e9ffcbd
14 changed files with 36 additions and 42 deletions

View File

@ -1324,7 +1324,7 @@ int blkcg_activate_policy(struct request_queue *q,
if (blkcg_policy_enabled(q, pol))
return 0;
if (q->mq_ops)
if (queue_is_mq(q))
blk_mq_freeze_queue(q);
pd_prealloc:
if (!pd_prealloc) {
@ -1363,7 +1363,7 @@ pd_prealloc:
spin_unlock_irq(&q->queue_lock);
out_bypass_end:
if (q->mq_ops)
if (queue_is_mq(q))
blk_mq_unfreeze_queue(q);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
@ -1387,7 +1387,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
if (!blkcg_policy_enabled(q, pol))
return;
if (q->mq_ops)
if (queue_is_mq(q))
blk_mq_freeze_queue(q);
spin_lock_irq(&q->queue_lock);
@ -1405,7 +1405,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
spin_unlock_irq(&q->queue_lock);
if (q->mq_ops)
if (queue_is_mq(q))
blk_mq_unfreeze_queue(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);

View File

@ -232,7 +232,7 @@ void blk_sync_queue(struct request_queue *q)
del_timer_sync(&q->timeout);
cancel_work_sync(&q->timeout_work);
if (q->mq_ops) {
if (queue_is_mq(q)) {
struct blk_mq_hw_ctx *hctx;
int i;
@ -281,7 +281,7 @@ void blk_set_queue_dying(struct request_queue *q)
*/
blk_freeze_queue_start(q);
if (q->mq_ops)
if (queue_is_mq(q))
blk_mq_wake_waiters(q);
/* Make blk_queue_enter() reexamine the DYING flag. */
@ -356,7 +356,7 @@ void blk_cleanup_queue(struct request_queue *q)
* blk_freeze_queue() should be enough for cases of passthrough
* request.
*/
if (q->mq_ops && blk_queue_init_done(q))
if (queue_is_mq(q) && blk_queue_init_done(q))
blk_mq_quiesce_queue(q);
/* for synchronous bio-based driver finish in-flight integrity i/o */
@ -374,7 +374,7 @@ void blk_cleanup_queue(struct request_queue *q)
blk_exit_queue(q);
if (q->mq_ops)
if (queue_is_mq(q))
blk_mq_free_queue(q);
percpu_ref_exit(&q->q_usage_counter);
@ -982,7 +982,7 @@ generic_make_request_checks(struct bio *bio)
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue is not a request based queue.
*/
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
goto not_supported;
if (should_fail_bio(bio))
@ -1657,7 +1657,7 @@ EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
*/
int blk_lld_busy(struct request_queue *q)
{
if (q->mq_ops && q->mq_ops->busy)
if (queue_is_mq(q) && q->mq_ops->busy)
return q->mq_ops->busy(q);
return 0;

View File

@ -273,8 +273,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
* assigned to empty flushes, and we deadlock if we are expecting
* other requests to make progress. Don't defer for that case.
*/
if (!list_empty(&fq->flush_data_in_flight) &&
!(q->mq_ops && q->elevator) &&
if (!list_empty(&fq->flush_data_in_flight) && q->elevator &&
time_before(jiffies,
fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
return;

View File

@ -150,7 +150,7 @@ void blk_freeze_queue_start(struct request_queue *q)
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
if (freeze_depth == 1) {
percpu_ref_kill(&q->q_usage_counter);
if (q->mq_ops)
if (queue_is_mq(q))
blk_mq_run_hw_queues(q, false);
}
}

View File

@ -68,7 +68,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
unsigned long nr;
int ret, err;
if (!q->mq_ops)
if (!queue_is_mq(q))
return -EINVAL;
ret = queue_var_store(&nr, page, count);
@ -835,12 +835,12 @@ static void __blk_release_queue(struct work_struct *work)
blk_queue_free_zone_bitmaps(q);
if (q->mq_ops)
if (queue_is_mq(q))
blk_mq_release(q);
blk_trace_shutdown(q);
if (q->mq_ops)
if (queue_is_mq(q))
blk_mq_debugfs_unregister(q);
bioset_exit(&q->bio_split);
@ -914,7 +914,7 @@ int blk_register_queue(struct gendisk *disk)
goto unlock;
}
if (q->mq_ops) {
if (queue_is_mq(q)) {
__blk_mq_register_dev(dev, q);
blk_mq_debugfs_register(q);
}
@ -925,7 +925,7 @@ int blk_register_queue(struct gendisk *disk)
blk_throtl_register_queue(q);
if ((q->mq_ops && q->elevator)) {
if (q->elevator) {
ret = elv_register_queue(q);
if (ret) {
mutex_unlock(&q->sysfs_lock);
@ -974,7 +974,7 @@ void blk_unregister_queue(struct gendisk *disk)
* Remove the sysfs attributes before unregistering the queue data
* structures that can be modified through sysfs.
*/
if (q->mq_ops)
if (queue_is_mq(q))
blk_mq_unregister_dev(disk_to_dev(disk), q);
mutex_unlock(&q->sysfs_lock);
@ -983,7 +983,7 @@ void blk_unregister_queue(struct gendisk *disk)
blk_trace_remove_sysfs(disk_to_dev(disk));
mutex_lock(&q->sysfs_lock);
if (q->mq_ops && q->elevator)
if (q->elevator)
elv_unregister_queue(q);
mutex_unlock(&q->sysfs_lock);

View File

@ -2456,7 +2456,7 @@ void blk_throtl_register_queue(struct request_queue *q)
td->throtl_slice = DFL_THROTL_SLICE_HD;
#endif
td->track_bio_latency = !queue_is_rq_based(q);
td->track_bio_latency = !queue_is_mq(q);
if (!td->track_bio_latency)
blk_stat_enable_accounting(q);
}

View File

@ -701,7 +701,7 @@ void wbt_enable_default(struct request_queue *q)
if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
return;
if (q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ))
if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
wbt_init(q);
}
EXPORT_SYMBOL_GPL(wbt_enable_default);

View File

@ -421,7 +421,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
* BIO based queues do not use a scheduler so only q->nr_zones
* needs to be updated so that the sysfs exposed value is correct.
*/
if (!queue_is_rq_based(q)) {
if (!queue_is_mq(q)) {
q->nr_zones = nr_zones;
return 0;
}

View File

@ -471,7 +471,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
/*
* we need a proper transport to send commands, not a stacked device
*/
if (!queue_is_rq_based(q))
if (!queue_is_mq(q))
return 0;
bcd = &q->bsg_dev;

View File

@ -667,7 +667,7 @@ static int __elevator_change(struct request_queue *q, const char *name)
/*
* Special case for mq, turn off scheduling
*/
if (q->mq_ops && !strncmp(name, "none", 4))
if (!strncmp(name, "none", 4))
return elevator_switch(q, NULL);
strlcpy(elevator_name, name, sizeof(elevator_name));
@ -685,8 +685,7 @@ static int __elevator_change(struct request_queue *q, const char *name)
static inline bool elv_support_iosched(struct request_queue *q)
{
if (q->mq_ops && q->tag_set && (q->tag_set->flags &
BLK_MQ_F_NO_SCHED))
if (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
return false;
return true;
}
@ -696,7 +695,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
{
int ret;
if (!q->mq_ops || !elv_support_iosched(q))
if (!queue_is_mq(q) || !elv_support_iosched(q))
return count;
ret = __elevator_change(q, name);
@ -713,7 +712,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
struct elevator_type *__e;
int len = 0;
if (!queue_is_rq_based(q))
if (!queue_is_mq(q))
return sprintf(name, "none\n");
if (!q->elevator)
@ -732,7 +731,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
}
spin_unlock(&elv_list_lock);
if (q->mq_ops && q->elevator)
if (q->elevator)
len += sprintf(name+len, "none");
len += sprintf(len+name, "\n");

View File

@ -47,7 +47,7 @@ static void disk_release_events(struct gendisk *disk);
void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
{
if (q->mq_ops)
if (queue_is_mq(q))
return;
atomic_inc(&part->in_flight[rw]);
@ -57,7 +57,7 @@ void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
{
if (q->mq_ops)
if (queue_is_mq(q))
return;
atomic_dec(&part->in_flight[rw]);
@ -68,7 +68,7 @@ void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
void part_in_flight(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
if (q->mq_ops) {
if (queue_is_mq(q)) {
blk_mq_in_flight(q, part, inflight);
return;
}
@ -85,7 +85,7 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part,
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
if (q->mq_ops) {
if (queue_is_mq(q)) {
blk_mq_in_flight_rw(q, part, inflight);
return;
}

View File

@ -43,7 +43,7 @@ static unsigned dm_get_blk_mq_queue_depth(void)
int dm_request_based(struct mapped_device *md)
{
return queue_is_rq_based(md->queue);
return queue_is_mq(md->queue);
}
void dm_start_queue(struct request_queue *q)

View File

@ -919,12 +919,12 @@ static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
struct request_queue *q = bdev_get_queue(dev->bdev);
struct verify_rq_based_data *v = data;
if (q->mq_ops)
if (queue_is_mq(q))
v->mq_count++;
else
v->sq_count++;
return queue_is_rq_based(q);
return queue_is_mq(q);
}
static int dm_table_determine_type(struct dm_table *t)

View File

@ -656,11 +656,7 @@ static inline bool blk_account_rq(struct request *rq)
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
/*
* Driver can handle struct request, if it either has an old style
* request_fn defined, or is blk-mq based.
*/
static inline bool queue_is_rq_based(struct request_queue *q)
static inline bool queue_is_mq(struct request_queue *q)
{
return q->mq_ops;
}