1
0
Fork 0

block: simplify queue allocation

Current make_request based drivers use either blk_alloc_queue_node or
blk_alloc_queue to allocate a queue, and then set up the make_request_fn
function pointer and a few parameters using the blk_queue_make_request
helper.  Simplify this by passing the make_request pointer to
blk_alloc_queue, and while at it merge the _node variant into the main
helper by always passing a node_id, and remove the superfluous gfp_mask
parameter.  A lower-level __blk_alloc_queue is kept for the blk-mq case.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
alistair/sensors
Christoph Hellwig 2020-03-27 09:30:11 +01:00 committed by Jens Axboe
parent ff27668ce8
commit 3d745ea5b0
26 changed files with 54 additions and 108 deletions

View File

@ -118,12 +118,11 @@ static int __init nfhd_init_one(int id, u32 blocks, u32 bsize)
dev->bsize = bsize;
dev->bshift = ffs(bsize) - 10;
dev->queue = blk_alloc_queue(GFP_KERNEL);
dev->queue = blk_alloc_queue(nfhd_make_request, NUMA_NO_NODE);
if (dev->queue == NULL)
goto free_dev;
dev->queue->queuedata = dev;
blk_queue_make_request(dev->queue, nfhd_make_request);
blk_queue_logical_block_size(dev->queue, bsize);
dev->disk = alloc_disk(16);

View File

@ -267,13 +267,12 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
spin_lock_init(&dev->lock);
dev->users = 0;
dev->queue = blk_alloc_queue(GFP_KERNEL);
dev->queue = blk_alloc_queue(simdisk_make_request, NUMA_NO_NODE);
if (dev->queue == NULL) {
pr_err("blk_alloc_queue failed\n");
goto out_alloc_queue;
}
blk_queue_make_request(dev->queue, simdisk_make_request);
dev->queue->queuedata = dev;
dev->gd = alloc_disk(SIMDISK_MINORS);

View File

@ -1010,7 +1010,7 @@ unlock:
* blkcg_init_queue - initialize blkcg part of request queue
* @q: request_queue to initialize
*
* Called from blk_alloc_queue_node(). Responsible for initializing blkcg
* Called from __blk_alloc_queue(). Responsible for initializing blkcg
* part of new request_queue @q.
*
* RETURNS:

View File

@ -388,12 +388,6 @@ void blk_cleanup_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_cleanup_queue);
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_alloc_queue);
/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
@ -470,24 +464,19 @@ static void blk_timeout_work(struct work_struct *work)
{
}
/**
* blk_alloc_queue_node - allocate a request queue
* @gfp_mask: memory allocation flags
* @node_id: NUMA node to allocate memory from
*/
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
struct request_queue *__blk_alloc_queue(int node_id)
{
struct request_queue *q;
int ret;
q = kmem_cache_alloc_node(blk_requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
GFP_KERNEL | __GFP_ZERO, node_id);
if (!q)
return NULL;
q->last_merge = NULL;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
if (q->id < 0)
goto fail_q;
@ -495,7 +484,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (ret)
goto fail_id;
q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
q->backing_dev_info = bdi_alloc_node(GFP_KERNEL, node_id);
if (!q->backing_dev_info)
goto fail_split;
@ -541,6 +530,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (blkcg_init_queue(q))
goto fail_ref;
blk_queue_dma_alignment(q, 511);
blk_set_default_limits(&q->limits);
return q;
fail_ref:
@ -557,7 +549,22 @@ fail_q:
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}
EXPORT_SYMBOL(blk_alloc_queue_node);
struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id)
{
struct request_queue *q;
if (WARN_ON_ONCE(!make_request))
return -EINVAL;
q = __blk_alloc_queue(node_id);
if (!q)
return NULL;
q->make_request_fn = make_request;
q->nr_requests = BLKDEV_MAX_RQ;
return q;
}
EXPORT_SYMBOL(blk_alloc_queue);
bool blk_get_queue(struct request_queue *q)
{

View File

@ -2729,7 +2729,7 @@ struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
{
struct request_queue *uninit_q, *q;
uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
uninit_q = __blk_alloc_queue(set->numa_node);
if (!uninit_q)
return ERR_PTR(-ENOMEM);
uninit_q->queuedata = queuedata;
@ -2939,11 +2939,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
INIT_LIST_HEAD(&q->requeue_list);
spin_lock_init(&q->requeue_lock);
blk_queue_make_request(q, blk_mq_make_request);
/*
* Do this after blk_queue_make_request() overrides it...
*/
q->make_request_fn = blk_mq_make_request;
q->nr_requests = set->queue_depth;
/*

View File

@ -86,42 +86,6 @@ void blk_set_stacking_limits(struct queue_limits *lim)
}
EXPORT_SYMBOL(blk_set_stacking_limits);
/**
* blk_queue_make_request - define an alternate make_request function for a device
* @q: the request queue for the device to be affected
* @mfn: the alternate make_request function
*
* Description:
* The normal way for &struct bios to be passed to a device
* driver is for them to be collected into requests on a request
* queue, and then to allow the device driver to select requests
* off that queue when it is ready. This works well for many block
* devices. However some block devices (typically virtual devices
* such as md or lvm) do not benefit from the processing on the
* request queue, and are served best by having the requests passed
* directly to them. This can be achieved by providing a function
* to blk_queue_make_request().
*
* Caveat:
* The driver that does this *must* be able to deal appropriately
* with buffers in "highmemory". This can be accomplished by either calling
* kmap_atomic() to get a temporary kernel mapping, or by calling
* blk_queue_bounce() to create a buffer in normal memory.
**/
void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
{
/*
* set defaults
*/
q->nr_requests = BLKDEV_MAX_RQ;
q->make_request_fn = mfn;
blk_queue_dma_alignment(q, 511);
blk_set_default_limits(&q->limits);
}
EXPORT_SYMBOL(blk_queue_make_request);
/**
* blk_queue_bounce_limit - set bounce buffer limit for queue
* @q: the request queue for the device

View File

@ -482,4 +482,6 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
#endif
}
struct request_queue *__blk_alloc_queue(int node_id);
#endif /* BLK_INTERNAL_H */

View File

@ -381,12 +381,10 @@ static struct brd_device *brd_alloc(int i)
spin_lock_init(&brd->brd_lock);
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
brd->brd_queue = blk_alloc_queue(brd_make_request, NUMA_NO_NODE);
if (!brd->brd_queue)
goto out_free_dev;
blk_queue_make_request(brd->brd_queue, brd_make_request);
/* This is so fdisk will align partitions on 4k, because of
* direct_access API needing 4k alignment, returning a PFN
* (This is only a problem on very small devices <= 4M,

View File

@ -2801,7 +2801,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_init_set_defaults(device);
q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
q = blk_alloc_queue(drbd_make_request, NUMA_NO_NODE);
if (!q)
goto out_no_q;
device->rq_queue = q;
@ -2828,7 +2828,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
q->backing_dev_info->congested_fn = drbd_congested;
q->backing_dev_info->congested_data = device;
blk_queue_make_request(q, drbd_make_request);
blk_queue_write_cache(q, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */

View File

@ -1743,12 +1743,11 @@ static int null_add_dev(struct nullb_device *dev)
goto out_cleanup_tags;
}
} else if (dev->queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
nullb->q = blk_alloc_queue(null_queue_bio, dev->home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
}
blk_queue_make_request(nullb->q, null_queue_bio);
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;

View File

@ -2493,7 +2493,6 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
{
struct request_queue *q = pd->disk->queue;
blk_queue_make_request(q, pkt_make_request);
blk_queue_logical_block_size(q, CD_FRAMESIZE);
blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
q->queuedata = pd;
@ -2750,7 +2749,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
disk->flags = GENHD_FL_REMOVABLE;
strcpy(disk->disk_name, pd->name);
disk->private_data = pd;
disk->queue = blk_alloc_queue(GFP_KERNEL);
disk->queue = blk_alloc_queue(pkt_make_request, NUMA_NO_NODE);
if (!disk->queue)
goto out_mem2;

View File

@ -737,7 +737,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
ps3vram_proc_init(dev);
queue = blk_alloc_queue(GFP_KERNEL);
queue = blk_alloc_queue(ps3vram_make_request, NUMA_NO_NODE);
if (!queue) {
dev_err(&dev->core, "blk_alloc_queue failed\n");
error = -ENOMEM;
@ -746,7 +746,6 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
priv->queue = queue;
queue->queuedata = dev;
blk_queue_make_request(queue, ps3vram_make_request);
blk_queue_max_segments(queue, BLK_MAX_SEGMENTS);
blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE);
blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS);

View File

@ -248,7 +248,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
return -ENOMEM;
}
card->queue = blk_alloc_queue(GFP_KERNEL);
card->queue = blk_alloc_queue(rsxx_make_request, NUMA_NO_NODE);
if (!card->queue) {
dev_err(CARD_TO_DEV(card), "Failed queue alloc\n");
unregister_blkdev(card->major, DRIVER_NAME);
@ -269,7 +269,6 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
blk_queue_logical_block_size(card->queue, blk_size);
}
blk_queue_make_request(card->queue, rsxx_make_request);
blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);

View File

@ -885,11 +885,9 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
card->biotail = &card->bio;
spin_lock_init(&card->lock);
card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
card->queue = blk_alloc_queue(mm_make_request, NUMA_NO_NODE);
if (!card->queue)
goto failed_alloc;
blk_queue_make_request(card->queue, mm_make_request);
card->queue->queuedata = card;
tasklet_init(&card->tasklet, process_page, (unsigned long)card);

View File

@ -1895,7 +1895,7 @@ static int zram_add(void)
#ifdef CONFIG_ZRAM_WRITEBACK
spin_lock_init(&zram->wb_limit_lock);
#endif
queue = blk_alloc_queue(GFP_KERNEL);
queue = blk_alloc_queue(zram_make_request, NUMA_NO_NODE);
if (!queue) {
pr_err("Error allocating disk queue for device %d\n",
device_id);
@ -1903,8 +1903,6 @@ static int zram_add(void)
goto out_free_idr;
}
blk_queue_make_request(queue, zram_make_request);
/* gendisk structure */
zram->disk = alloc_disk(1);
if (!zram->disk) {

View File

@ -380,12 +380,11 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
goto err_dev;
}
tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
tqueue = blk_alloc_queue(tt->make_rq, dev->q->node);
if (!tqueue) {
ret = -ENOMEM;
goto err_disk;
}
blk_queue_make_request(tqueue, tt->make_rq);
strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
tdisk->flags = GENHD_FL_EXT_DEVT;

View File

@ -866,11 +866,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
d->disk->fops = &bcache_ops;
d->disk->private_data = d;
q = blk_alloc_queue(GFP_KERNEL);
q = blk_alloc_queue(make_request_fn, NUMA_NO_NODE);
if (!q)
return -ENOMEM;
blk_queue_make_request(q, make_request_fn);
d->disk->queue = q;
q->queuedata = d;
q->backing_dev_info->congested_data = d;

View File

@ -1939,16 +1939,15 @@ static struct mapped_device *alloc_dev(int minor)
INIT_LIST_HEAD(&md->table_devices);
spin_lock_init(&md->uevent_lock);
md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
if (!md->queue)
goto bad;
md->queue->queuedata = md;
/*
* default to bio-based required ->make_request_fn until DM
* table is loaded and md->type established. If request-based
* table is loaded: blk-mq will override accordingly.
*/
blk_queue_make_request(md->queue, dm_make_request);
md->queue = blk_alloc_queue(dm_make_request, numa_node_id);
if (!md->queue)
goto bad;
md->queue->queuedata = md;
md->disk = alloc_disk_node(1, md->numa_node_id);
if (!md->disk)

View File

@ -5623,12 +5623,11 @@ static int md_alloc(dev_t dev, char *name)
mddev->hold_active = UNTIL_STOP;
error = -ENOMEM;
mddev->queue = blk_alloc_queue(GFP_KERNEL);
mddev->queue = blk_alloc_queue(md_make_request, NUMA_NO_NODE);
if (!mddev->queue)
goto abort;
mddev->queue->queuedata = mddev;
blk_queue_make_request(mddev->queue, md_make_request);
blk_set_stacking_limits(&mddev->queue->limits);
disk = alloc_disk(1 << shift);

View File

@ -249,13 +249,12 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk));
available_disk_size = internal_nlba * nsblk_sector_size(nsblk);
q = blk_alloc_queue(GFP_KERNEL);
q = blk_alloc_queue(nd_blk_make_request, NUMA_NO_NODE);
if (!q)
return -ENOMEM;
if (devm_add_action_or_reset(dev, nd_blk_release_queue, q))
return -ENOMEM;
blk_queue_make_request(q, nd_blk_make_request);
blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);

View File

@ -1521,7 +1521,7 @@ static int btt_blk_init(struct btt *btt)
struct nd_namespace_common *ndns = nd_btt->ndns;
/* create a new disk and request queue for btt */
btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
btt->btt_queue = blk_alloc_queue(btt_make_request, NUMA_NO_NODE);
if (!btt->btt_queue)
return -ENOMEM;
@ -1540,7 +1540,6 @@ static int btt_blk_init(struct btt *btt)
btt->btt_disk->queue->backing_dev_info->capabilities |=
BDI_CAP_SYNCHRONOUS_IO;
blk_queue_make_request(btt->btt_queue, btt_make_request);
blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue);

View File

@ -395,7 +395,7 @@ static int pmem_attach_disk(struct device *dev,
return -EBUSY;
}
q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
q = blk_alloc_queue(pmem_make_request, dev_to_node(dev));
if (!q)
return -ENOMEM;
@ -433,7 +433,6 @@ static int pmem_attach_disk(struct device *dev,
pmem->virt_addr = addr;
blk_queue_write_cache(q, true, fua);
blk_queue_make_request(q, pmem_make_request);
blk_queue_physical_block_size(q, PAGE_SIZE);
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
blk_queue_max_hw_sectors(q, UINT_MAX);

View File

@ -377,11 +377,10 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
return 0;
q = blk_alloc_queue_node(GFP_KERNEL, ctrl->numa_node);
q = blk_alloc_queue(nvme_ns_head_make_request, ctrl->numa_node);
if (!q)
goto out;
q->queuedata = head;
blk_queue_make_request(q, nvme_ns_head_make_request);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(q, 512);

View File

@ -636,10 +636,10 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
}
dev_info->gd->major = dcssblk_major;
dev_info->gd->fops = &dcssblk_devops;
dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
dev_info->dcssblk_queue =
blk_alloc_queue(dcssblk_make_request, NUMA_NO_NODE);
dev_info->gd->queue = dev_info->dcssblk_queue;
dev_info->gd->private_data = dev_info;
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);

View File

@ -343,14 +343,14 @@ static int __init xpram_setup_blkdev(void)
xpram_disks[i] = alloc_disk(1);
if (!xpram_disks[i])
goto out;
xpram_queues[i] = blk_alloc_queue(GFP_KERNEL);
xpram_queues[i] = blk_alloc_queue(xpram_make_request,
NUMA_NO_NODE);
if (!xpram_queues[i]) {
put_disk(xpram_disks[i]);
goto out;
}
blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
blk_queue_make_request(xpram_queues[i], xpram_make_request);
blk_queue_logical_block_size(xpram_queues[i], 4096);
}

View File

@ -1063,7 +1063,6 @@ extern void blk_abort_request(struct request *);
* Access functions for manipulating queue properties
*/
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
@ -1140,8 +1139,7 @@ extern void blk_dump_rq_flags(struct request *, char *);
extern long nr_blockdev_pages(void);
bool __must_check blk_get_queue(struct request_queue *);
struct request_queue *blk_alloc_queue(gfp_t);
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id);
struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id);
extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);