1
0
Fork 0

block: Use pointer to backing_dev_info from request_queue

We will want to have struct backing_dev_info allocated separately from
struct request_queue. As the first step add pointer to backing_dev_info
to request_queue and convert all users touching it. No functional
changes in this patch.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Jens Axboe <axboe@fb.com>
hifive-unleashed-5.1
Jan Kara 2017-02-02 15:56:50 +01:00 committed by Jens Axboe
parent f44f1ab5a2
commit dc3b17cc8b
33 changed files with 90 additions and 86 deletions

View File

@ -184,7 +184,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
goto err_free_blkg; goto err_free_blkg;
} }
wb_congested = wb_congested_get_create(&q->backing_dev_info, wb_congested = wb_congested_get_create(q->backing_dev_info,
blkcg->css.id, blkcg->css.id,
GFP_NOWAIT | __GFP_NOWARN); GFP_NOWAIT | __GFP_NOWARN);
if (!wb_congested) { if (!wb_congested) {
@ -469,8 +469,8 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
const char *blkg_dev_name(struct blkcg_gq *blkg) const char *blkg_dev_name(struct blkcg_gq *blkg)
{ {
/* some drivers (floppy) instantiate a queue w/o disk registered */ /* some drivers (floppy) instantiate a queue w/o disk registered */
if (blkg->q->backing_dev_info.dev) if (blkg->q->backing_dev_info->dev)
return dev_name(blkg->q->backing_dev_info.dev); return dev_name(blkg->q->backing_dev_info->dev);
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(blkg_dev_name); EXPORT_SYMBOL_GPL(blkg_dev_name);

View File

@ -75,7 +75,7 @@ static void blk_clear_congested(struct request_list *rl, int sync)
* flip its congestion state for events on other blkcgs. * flip its congestion state for events on other blkcgs.
*/ */
if (rl == &rl->q->root_rl) if (rl == &rl->q->root_rl)
clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync); clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif #endif
} }
@ -86,7 +86,7 @@ static void blk_set_congested(struct request_list *rl, int sync)
#else #else
/* see blk_clear_congested() */ /* see blk_clear_congested() */
if (rl == &rl->q->root_rl) if (rl == &rl->q->root_rl)
set_wb_congested(rl->q->backing_dev_info.wb.congested, sync); set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif #endif
} }
@ -117,7 +117,7 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
return &q->backing_dev_info; return q->backing_dev_info;
} }
EXPORT_SYMBOL(blk_get_backing_dev_info); EXPORT_SYMBOL(blk_get_backing_dev_info);
@ -575,7 +575,7 @@ void blk_cleanup_queue(struct request_queue *q)
blk_flush_integrity(); blk_flush_integrity();
/* @q won't process any more request, flush async actions */ /* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
blk_sync_queue(q); blk_sync_queue(q);
if (q->mq_ops) if (q->mq_ops)
@ -587,7 +587,7 @@ void blk_cleanup_queue(struct request_queue *q)
q->queue_lock = &q->__queue_lock; q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock); spin_unlock_irq(lock);
bdi_unregister(&q->backing_dev_info); bdi_unregister(q->backing_dev_info);
/* @q is and will stay empty, shutdown and put */ /* @q is and will stay empty, shutdown and put */
blk_put_queue(q); blk_put_queue(q);
@ -728,17 +728,18 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q->bio_split) if (!q->bio_split)
goto fail_id; goto fail_id;
q->backing_dev_info.ra_pages = q->backing_dev_info = &q->_backing_dev_info;
q->backing_dev_info->ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE; (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info.name = "block"; q->backing_dev_info->name = "block";
q->node = node_id; q->node = node_id;
err = bdi_init(&q->backing_dev_info); err = bdi_init(q->backing_dev_info);
if (err) if (err)
goto fail_split; goto fail_split;
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q); laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->queue_head);
@ -788,7 +789,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
fail_ref: fail_ref:
percpu_ref_exit(&q->q_usage_counter); percpu_ref_exit(&q->q_usage_counter);
fail_bdi: fail_bdi:
bdi_destroy(&q->backing_dev_info); bdi_destroy(q->backing_dev_info);
fail_split: fail_split:
bioset_free(q->bio_split); bioset_free(q->bio_split);
fail_id: fail_id:
@ -1182,7 +1183,7 @@ fail_elvpriv:
* disturb iosched and blkcg but weird is bettern than dead. * disturb iosched and blkcg but weird is bettern than dead.
*/ */
printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
__func__, dev_name(q->backing_dev_info.dev)); __func__, dev_name(q->backing_dev_info->dev));
rq->rq_flags &= ~RQF_ELVPRIV; rq->rq_flags &= ~RQF_ELVPRIV;
rq->elv.icq = NULL; rq->elv.icq = NULL;
@ -2659,7 +2660,7 @@ void blk_finish_request(struct request *req, int error)
BUG_ON(blk_queued_rq(req)); BUG_ON(blk_queued_rq(req));
if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req)) if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
laptop_io_completion(&req->q->backing_dev_info); laptop_io_completion(req->q->backing_dev_info);
blk_delete_timer(req); blk_delete_timer(req);

View File

@ -443,10 +443,10 @@ void blk_integrity_revalidate(struct gendisk *disk)
return; return;
if (bi->profile) if (bi->profile)
disk->queue->backing_dev_info.capabilities |= disk->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES; BDI_CAP_STABLE_WRITES;
else else
disk->queue->backing_dev_info.capabilities &= disk->queue->backing_dev_info->capabilities &=
~BDI_CAP_STABLE_WRITES; ~BDI_CAP_STABLE_WRITES;
} }

View File

@ -253,7 +253,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
limits->max_sectors = max_sectors; limits->max_sectors = max_sectors;
q->backing_dev_info.io_pages = max_sectors >> (PAGE_SHIFT - 9); q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
} }
EXPORT_SYMBOL(blk_queue_max_hw_sectors); EXPORT_SYMBOL(blk_queue_max_hw_sectors);

View File

@ -89,7 +89,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_ra_show(struct request_queue *q, char *page) static ssize_t queue_ra_show(struct request_queue *q, char *page)
{ {
unsigned long ra_kb = q->backing_dev_info.ra_pages << unsigned long ra_kb = q->backing_dev_info->ra_pages <<
(PAGE_SHIFT - 10); (PAGE_SHIFT - 10);
return queue_var_show(ra_kb, (page)); return queue_var_show(ra_kb, (page));
@ -104,7 +104,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0) if (ret < 0)
return ret; return ret;
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10); q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
return ret; return ret;
} }
@ -236,7 +236,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1; q->limits.max_sectors = max_sectors_kb << 1;
q->backing_dev_info.io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
return ret; return ret;
@ -799,7 +799,7 @@ static void blk_release_queue(struct kobject *kobj)
container_of(kobj, struct request_queue, kobj); container_of(kobj, struct request_queue, kobj);
wbt_exit(q); wbt_exit(q);
bdi_exit(&q->backing_dev_info); bdi_exit(q->backing_dev_info);
blkcg_exit_queue(q); blkcg_exit_queue(q);
if (q->elevator) { if (q->elevator) {

View File

@ -96,7 +96,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
*/ */
static bool wb_recent_wait(struct rq_wb *rwb) static bool wb_recent_wait(struct rq_wb *rwb)
{ {
struct bdi_writeback *wb = &rwb->queue->backing_dev_info.wb; struct bdi_writeback *wb = &rwb->queue->backing_dev_info->wb;
return time_before(jiffies, wb->dirty_sleep + HZ); return time_before(jiffies, wb->dirty_sleep + HZ);
} }
@ -279,7 +279,7 @@ enum {
static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat) static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
{ {
struct backing_dev_info *bdi = &rwb->queue->backing_dev_info; struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
u64 thislat; u64 thislat;
/* /*
@ -339,7 +339,7 @@ static int latency_exceeded(struct rq_wb *rwb)
static void rwb_trace_step(struct rq_wb *rwb, const char *msg) static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{ {
struct backing_dev_info *bdi = &rwb->queue->backing_dev_info; struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec, trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
rwb->wb_background, rwb->wb_normal, rwb->wb_max); rwb->wb_background, rwb->wb_normal, rwb->wb_max);
@ -423,7 +423,7 @@ static void wb_timer_fn(unsigned long data)
status = latency_exceeded(rwb); status = latency_exceeded(rwb);
trace_wbt_timer(&rwb->queue->backing_dev_info, status, rwb->scale_step, trace_wbt_timer(rwb->queue->backing_dev_info, status, rwb->scale_step,
inflight); inflight);
/* /*

View File

@ -613,7 +613,7 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
disk_alloc_events(disk); disk_alloc_events(disk);
/* Register BDI before referencing it from bdev */ /* Register BDI before referencing it from bdev */
bdi = &disk->queue->backing_dev_info; bdi = disk->queue->backing_dev_info;
bdi_register_owner(bdi, disk_to_dev(disk)); bdi_register_owner(bdi, disk_to_dev(disk));
blk_register_region(disk_devt(disk), disk->minors, NULL, blk_register_region(disk_devt(disk), disk->minors, NULL,

View File

@ -396,8 +396,8 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->gd); WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP); WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
q->backing_dev_info.name = "aoe"; q->backing_dev_info->name = "aoe";
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE; q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp; d->bufpool = mp;
d->blkq = gd->queue = q; d->blkq = gd->queue = q;
q->queuedata = d; q->queuedata = d;

View File

@ -2462,7 +2462,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
if (get_ldev(device)) { if (get_ldev(device)) {
q = bdev_get_queue(device->ldev->backing_bdev); q = bdev_get_queue(device->ldev->backing_bdev);
r = bdi_congested(&q->backing_dev_info, bdi_bits); r = bdi_congested(q->backing_dev_info, bdi_bits);
put_ldev(device); put_ldev(device);
if (r) if (r)
reason = 'b'; reason = 'b';
@ -2834,8 +2834,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
/* we have no partitions. we contain only ourselves. */ /* we have no partitions. we contain only ourselves. */
device->this_bdev->bd_contains = device->this_bdev; device->this_bdev->bd_contains = device->this_bdev;
q->backing_dev_info.congested_fn = drbd_congested; q->backing_dev_info->congested_fn = drbd_congested;
q->backing_dev_info.congested_data = device; q->backing_dev_info->congested_data = device;
blk_queue_make_request(q, drbd_make_request); blk_queue_make_request(q, drbd_make_request);
blk_queue_write_cache(q, true, true); blk_queue_write_cache(q, true, true);

View File

@ -1328,11 +1328,13 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
if (b) { if (b) {
blk_queue_stack_limits(q, b); blk_queue_stack_limits(q, b);
if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { if (q->backing_dev_info->ra_pages !=
b->backing_dev_info->ra_pages) {
drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
q->backing_dev_info.ra_pages, q->backing_dev_info->ra_pages,
b->backing_dev_info.ra_pages); b->backing_dev_info->ra_pages);
q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; q->backing_dev_info->ra_pages =
b->backing_dev_info->ra_pages;
} }
} }
fixup_discard_if_not_supported(q); fixup_discard_if_not_supported(q);
@ -3345,7 +3347,7 @@ static void device_to_statistics(struct device_statistics *s,
s->dev_disk_flags = md->flags; s->dev_disk_flags = md->flags;
q = bdev_get_queue(device->ldev->backing_bdev); q = bdev_get_queue(device->ldev->backing_bdev);
s->dev_lower_blocked = s->dev_lower_blocked =
bdi_congested(&q->backing_dev_info, bdi_congested(q->backing_dev_info,
(1 << WB_async_congested) | (1 << WB_async_congested) |
(1 << WB_sync_congested)); (1 << WB_sync_congested));
put_ldev(device); put_ldev(device);

View File

@ -288,7 +288,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%2d: cs:Unconfigured\n", i); seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else { } else {
/* reset device->congestion_reason */ /* reset device->congestion_reason */
bdi_rw_congested(&device->rq_queue->backing_dev_info); bdi_rw_congested(device->rq_queue->backing_dev_info);
nc = rcu_dereference(first_peer_device(device)->connection->net_conf); nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' '; wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';

View File

@ -938,7 +938,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
switch (rbm) { switch (rbm) {
case RB_CONGESTED_REMOTE: case RB_CONGESTED_REMOTE:
bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
return bdi_read_congested(bdi); return bdi_read_congested(bdi);
case RB_LEAST_PENDING: case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) > return atomic_read(&device->local_cnt) >

View File

@ -1243,7 +1243,7 @@ try_next_bio:
&& pd->bio_queue_size <= pd->write_congestion_off); && pd->bio_queue_size <= pd->write_congestion_off);
spin_unlock(&pd->lock); spin_unlock(&pd->lock);
if (wakeup) { if (wakeup) {
clear_bdi_congested(&pd->disk->queue->backing_dev_info, clear_bdi_congested(pd->disk->queue->backing_dev_info,
BLK_RW_ASYNC); BLK_RW_ASYNC);
} }
@ -2370,7 +2370,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
spin_lock(&pd->lock); spin_lock(&pd->lock);
if (pd->write_congestion_on > 0 if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) { && pd->bio_queue_size >= pd->write_congestion_on) {
set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC); set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
do { do {
spin_unlock(&pd->lock); spin_unlock(&pd->lock);
congestion_wait(BLK_RW_ASYNC, HZ); congestion_wait(BLK_RW_ASYNC, HZ);

View File

@ -4526,7 +4526,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q->limits.discard_zeroes_data = 1; q->limits.discard_zeroes_data = 1;
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
disk->queue = q; disk->queue = q;

View File

@ -1009,7 +1009,7 @@ static int cached_dev_congested(void *data, int bits)
struct request_queue *q = bdev_get_queue(dc->bdev); struct request_queue *q = bdev_get_queue(dc->bdev);
int ret = 0; int ret = 0;
if (bdi_congested(&q->backing_dev_info, bits)) if (bdi_congested(q->backing_dev_info, bits))
return 1; return 1;
if (cached_dev_get(dc)) { if (cached_dev_get(dc)) {
@ -1018,7 +1018,7 @@ static int cached_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) { for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev); q = bdev_get_queue(ca->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
cached_dev_put(dc); cached_dev_put(dc);
@ -1032,7 +1032,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
struct gendisk *g = dc->disk.disk; struct gendisk *g = dc->disk.disk;
g->queue->make_request_fn = cached_dev_make_request; g->queue->make_request_fn = cached_dev_make_request;
g->queue->backing_dev_info.congested_fn = cached_dev_congested; g->queue->backing_dev_info->congested_fn = cached_dev_congested;
dc->disk.cache_miss = cached_dev_cache_miss; dc->disk.cache_miss = cached_dev_cache_miss;
dc->disk.ioctl = cached_dev_ioctl; dc->disk.ioctl = cached_dev_ioctl;
} }
@ -1125,7 +1125,7 @@ static int flash_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) { for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev); q = bdev_get_queue(ca->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
return ret; return ret;
@ -1136,7 +1136,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
struct gendisk *g = d->disk; struct gendisk *g = d->disk;
g->queue->make_request_fn = flash_dev_make_request; g->queue->make_request_fn = flash_dev_make_request;
g->queue->backing_dev_info.congested_fn = flash_dev_congested; g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss; d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl; d->ioctl = flash_dev_ioctl;
} }

View File

@ -807,7 +807,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
blk_queue_make_request(q, NULL); blk_queue_make_request(q, NULL);
d->disk->queue = q; d->disk->queue = q;
q->queuedata = d; q->queuedata = d;
q->backing_dev_info.congested_data = d; q->backing_dev_info->congested_data = d;
q->limits.max_hw_sectors = UINT_MAX; q->limits.max_hw_sectors = UINT_MAX;
q->limits.max_sectors = UINT_MAX; q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX; q->limits.max_segment_size = UINT_MAX;
@ -1132,9 +1132,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
set_capacity(dc->disk.disk, set_capacity(dc->disk.disk,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset); dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
dc->disk.disk->queue->backing_dev_info.ra_pages = dc->disk.disk->queue->backing_dev_info->ra_pages =
max(dc->disk.disk->queue->backing_dev_info.ra_pages, max(dc->disk.disk->queue->backing_dev_info->ra_pages,
q->backing_dev_info.ra_pages); q->backing_dev_info->ra_pages);
bch_cached_dev_request_init(dc); bch_cached_dev_request_init(dc);
bch_cached_dev_writeback_init(dc); bch_cached_dev_writeback_init(dc);

View File

@ -2284,7 +2284,7 @@ static void do_waker(struct work_struct *ws)
static int is_congested(struct dm_dev *dev, int bdi_bits) static int is_congested(struct dm_dev *dev, int bdi_bits)
{ {
struct request_queue *q = bdev_get_queue(dev->bdev); struct request_queue *q = bdev_get_queue(dev->bdev);
return bdi_congested(&q->backing_dev_info, bdi_bits); return bdi_congested(q->backing_dev_info, bdi_bits);
} }
static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)

View File

@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era)
static int dev_is_congested(struct dm_dev *dev, int bdi_bits) static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
{ {
struct request_queue *q = bdev_get_queue(dev->bdev); struct request_queue *q = bdev_get_queue(dev->bdev);
return bdi_congested(&q->backing_dev_info, bdi_bits); return bdi_congested(q->backing_dev_info, bdi_bits);
} }
static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)

View File

@ -1750,7 +1750,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
if (likely(q)) if (likely(q))
r |= bdi_congested(&q->backing_dev_info, bdi_bits); r |= bdi_congested(q->backing_dev_info, bdi_bits);
else else
DMWARN_LIMIT("%s: any_congested: nonexistent device %s", DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
dm_device_name(t->md), dm_device_name(t->md),

View File

@ -2711,7 +2711,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
return 1; return 1;
q = bdev_get_queue(pt->data_dev->bdev); q = bdev_get_queue(pt->data_dev->bdev);
return bdi_congested(&q->backing_dev_info, bdi_bits); return bdi_congested(q->backing_dev_info, bdi_bits);
} }
static void requeue_bios(struct pool *pool) static void requeue_bios(struct pool *pool)

View File

@ -1313,7 +1313,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
* With request-based DM we only need to check the * With request-based DM we only need to check the
* top-level queue for congestion. * top-level queue for congestion.
*/ */
r = md->queue->backing_dev_info.wb.state & bdi_bits; r = md->queue->backing_dev_info->wb.state & bdi_bits;
} else { } else {
map = dm_get_live_table_fast(md); map = dm_get_live_table_fast(md);
if (map) if (map)
@ -1396,7 +1396,7 @@ void dm_init_md_queue(struct mapped_device *md)
* - must do so here (in alloc_dev callchain) before queue is used * - must do so here (in alloc_dev callchain) before queue is used
*/ */
md->queue->queuedata = md; md->queue->queuedata = md;
md->queue->backing_dev_info.congested_data = md; md->queue->backing_dev_info->congested_data = md;
} }
void dm_init_normal_md_queue(struct mapped_device *md) void dm_init_normal_md_queue(struct mapped_device *md)
@ -1407,7 +1407,7 @@ void dm_init_normal_md_queue(struct mapped_device *md)
/* /*
* Initialize aspects of queue that aren't relevant for blk-mq * Initialize aspects of queue that aren't relevant for blk-mq
*/ */
md->queue->backing_dev_info.congested_fn = dm_any_congested; md->queue->backing_dev_info->congested_fn = dm_any_congested;
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
} }

View File

@ -62,7 +62,7 @@ static int linear_congested(struct mddev *mddev, int bits)
for (i = 0; i < mddev->raid_disks && !ret ; i++) { for (i = 0; i < mddev->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
return ret; return ret;

View File

@ -5341,8 +5341,8 @@ int md_run(struct mddev *mddev)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
else else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue); queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info->congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = md_congested; mddev->queue->backing_dev_info->congested_fn = md_congested;
} }
if (pers->sync_request) { if (pers->sync_request) {
if (mddev->kobj.sd && if (mddev->kobj.sd &&
@ -5699,7 +5699,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
__md_stop_writes(mddev); __md_stop_writes(mddev);
__md_stop(mddev); __md_stop(mddev);
mddev->queue->backing_dev_info.congested_fn = NULL; mddev->queue->backing_dev_info->congested_fn = NULL;
/* tell userspace to handle 'inactive' */ /* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);

View File

@ -169,7 +169,7 @@ static int multipath_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) { if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev); struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
/* Just like multipath_map, we just check the /* Just like multipath_map, we just check the
* first available device * first available device
*/ */

View File

@ -41,7 +41,7 @@ static int raid0_congested(struct mddev *mddev, int bits)
for (i = 0; i < raid_disks && !ret ; i++) { for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev); struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
return ret; return ret;
} }
@ -420,8 +420,8 @@ static int raid0_run(struct mddev *mddev)
*/ */
int stripe = mddev->raid_disks * int stripe = mddev->raid_disks *
(mddev->chunk_sectors << 9) / PAGE_SIZE; (mddev->chunk_sectors << 9) / PAGE_SIZE;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe; mddev->queue->backing_dev_info->ra_pages = 2* stripe;
} }
dump_zones(mddev); dump_zones(mddev);

View File

@ -744,9 +744,9 @@ static int raid1_congested(struct mddev *mddev, int bits)
* non-congested targets, it can be removed * non-congested targets, it can be removed
*/ */
if ((bits & (1 << WB_async_congested)) || 1) if ((bits & (1 << WB_async_congested)) || 1)
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
else else
ret &= bdi_congested(&q->backing_dev_info, bits); ret &= bdi_congested(q->backing_dev_info, bits);
} }
} }
rcu_read_unlock(); rcu_read_unlock();

View File

@ -860,7 +860,7 @@ static int raid10_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) { if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev); struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(q->backing_dev_info, bits);
} }
} }
rcu_read_unlock(); rcu_read_unlock();
@ -3841,8 +3841,8 @@ static int raid10_run(struct mddev *mddev)
* maybe... * maybe...
*/ */
stripe /= conf->geo.near_copies; stripe /= conf->geo.near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe; mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
} }
if (md_integrity_register(mddev)) if (md_integrity_register(mddev))
@ -4643,8 +4643,8 @@ static void end_reshape(struct r10conf *conf)
int stripe = conf->geo.raid_disks * int stripe = conf->geo.raid_disks *
((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
stripe /= conf->geo.near_copies; stripe /= conf->geo.near_copies;
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
} }
conf->fullsync = 0; conf->fullsync = 0;
} }

View File

@ -6264,10 +6264,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
mddev_suspend(mddev); mddev_suspend(mddev);
conf->skip_copy = new; conf->skip_copy = new;
if (new) if (new)
mddev->queue->backing_dev_info.capabilities |= mddev->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES; BDI_CAP_STABLE_WRITES;
else else
mddev->queue->backing_dev_info.capabilities &= mddev->queue->backing_dev_info->capabilities &=
~BDI_CAP_STABLE_WRITES; ~BDI_CAP_STABLE_WRITES;
mddev_resume(mddev); mddev_resume(mddev);
} }
@ -7086,8 +7086,8 @@ static int raid5_run(struct mddev *mddev)
int data_disks = conf->previous_raid_disks - conf->max_degraded; int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks * int stripe = data_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE); ((mddev->chunk_sectors << 9) / PAGE_SIZE);
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe; mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
chunk_size = mddev->chunk_sectors << 9; chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size); blk_queue_io_min(mddev->queue, chunk_size);
@ -7696,8 +7696,8 @@ static void end_reshape(struct r5conf *conf)
int data_disks = conf->raid_disks - conf->max_degraded; int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * ((conf->chunk_sectors << 9) int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE); / PAGE_SIZE);
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
} }
} }
} }

View File

@ -1226,7 +1226,7 @@ static int set_gfs2_super(struct super_block *s, void *data)
* We set the bdi here to the queue backing, file systems can * We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super() * overwrite this in ->fill_super()
*/ */
s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0; return 0;
} }

View File

@ -1068,7 +1068,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_time_gran = 1; sb->s_time_gran = 1;
sb->s_max_links = NILFS_LINK_MAX; sb->s_max_links = NILFS_LINK_MAX;
sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; sb->s_bdi = bdev_get_queue(sb->s_bdev)->backing_dev_info;
err = load_nilfs(nilfs, sb); err = load_nilfs(nilfs, sb);
if (err) if (err)

View File

@ -1047,7 +1047,7 @@ static int set_bdev_super(struct super_block *s, void *data)
* We set the bdi here to the queue backing, file systems can * We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super() * overwrite this in ->fill_super()
*/ */
s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0; return 0;
} }

View File

@ -432,7 +432,8 @@ struct request_queue {
*/ */
struct delayed_work delay_work; struct delayed_work delay_work;
struct backing_dev_info backing_dev_info; struct backing_dev_info *backing_dev_info;
struct backing_dev_info _backing_dev_info;
/* /*
* The queue owner gets to use this for whatever they like. * The queue owner gets to use this for whatever they like.

View File

@ -1988,11 +1988,11 @@ void laptop_mode_timer_fn(unsigned long data)
* We want to write everything out, not just down to the dirty * We want to write everything out, not just down to the dirty
* threshold * threshold
*/ */
if (!bdi_has_dirty_io(&q->backing_dev_info)) if (!bdi_has_dirty_io(q->backing_dev_info))
return; return;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node) list_for_each_entry_rcu(wb, &q->backing_dev_info->wb_list, bdi_node)
if (wb_has_dirty_io(wb)) if (wb_has_dirty_io(wb))
wb_start_writeback(wb, nr_pages, true, wb_start_writeback(wb, nr_pages, true,
WB_REASON_LAPTOP_TIMER); WB_REASON_LAPTOP_TIMER);