diff --git a/MAINTAINERS b/MAINTAINERS index 2f3eba4484aa..95e56fd128a5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8367,7 +8367,7 @@ F: drivers/media/dvb-frontends/lgdt3305.* LIBATA PATA ARASAN COMPACT FLASH CONTROLLER M: Viresh Kumar L: linux-ide@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: include/linux/pata_arasan_cf_data.h F: drivers/ata/pata_arasan_cf.c @@ -8384,7 +8384,7 @@ F: drivers/ata/ata_generic.c LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS M: Linus Walleij L: linux-ide@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: drivers/ata/pata_ftide010.c F: drivers/ata/sata_gemini.c @@ -8403,7 +8403,7 @@ F: include/linux/ahci_platform.h LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER M: Mikael Pettersson L: linux-ide@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: drivers/ata/sata_promise.* diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 74c002ddc0ce..28c40624bcb6 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -1305,6 +1305,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, io_req->fds[0] = dev->cow.fd; else io_req->fds[0] = dev->fd; + io_req->error = 0; if (req_op(req) == REQ_OP_FLUSH) { io_req->op = UBD_FLUSH; @@ -1313,9 +1314,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, io_req->cow_offset = -1; io_req->offset = off; io_req->length = bvec->bv_len; - io_req->error = 0; io_req->sector_mask = 0; - io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE; io_req->offsets[0] = 0; io_req->offsets[1] = dev->cow.data_offset; @@ -1341,11 +1340,14 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { + struct ubd *ubd_dev = hctx->queue->queuedata; struct request *req = bd->rq; int ret = 0; blk_mq_start_request(req); + spin_lock_irq(&ubd_dev->lock); + if (req_op(req) == REQ_OP_FLUSH) { ret = ubd_queue_one_vec(hctx, req, 0, NULL); } else { @@ -1361,9 +1363,11 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, } } out: - if (ret < 0) { + spin_unlock_irq(&ubd_dev->lock); + + if (ret < 0) blk_mq_requeue_request(req, true); - } + return BLK_STS_OK; } diff --git a/block/bio.c b/block/bio.c index d5368a445561..a50d59236b19 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1260,6 +1260,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, if (ret) goto cleanup; } else { + zero_fill_bio(bio); iov_iter_advance(iter, bio->bi_iter.bi_size); } diff --git a/block/blk-lib.c b/block/blk-lib.c index 76f867ea9a9b..e8b3bb9bf375 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -51,16 +51,12 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, if ((sector | nr_sects) & bs_mask) return -EINVAL; + if (!nr_sects) + return -EINVAL; + while (nr_sects) { - unsigned int req_sects = nr_sects; - sector_t end_sect; - - if (!req_sects) - goto fail; - if (req_sects > UINT_MAX >> 9) - req_sects = UINT_MAX >> 9; - - end_sect = sector + req_sects; + unsigned int req_sects = min_t(unsigned int, nr_sects, + bio_allowed_max_sectors(q)); bio = blk_next_bio(bio, 0, gfp_mask); bio->bi_iter.bi_sector = sector; @@ -68,8 +64,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, bio_set_op_attrs(bio, op, 0); bio->bi_iter.bi_size = req_sects << 9; + sector += req_sects; nr_sects -= req_sects; - sector = end_sect; /* * We can loop for a long time in here, if someone does @@ -82,14 +78,6 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, *biop = bio; return 0; - -fail: - if (bio) { - submit_bio_wait(bio); - bio_put(bio); - } - *biop = NULL; - return -EOPNOTSUPP; } EXPORT_SYMBOL(__blkdev_issue_discard); @@ -161,7 +149,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, return -EOPNOTSUPP; /* Ensure that max_write_same_sectors doesn't overflow bi_size */ - max_write_same_sectors = UINT_MAX >> 9; + max_write_same_sectors = bio_allowed_max_sectors(q); while (nr_sects) { bio = blk_next_bio(bio, 1, gfp_mask); diff --git a/block/blk-merge.c b/block/blk-merge.c index 6b5ad275ed56..e7696c47489a 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -46,7 +46,7 @@ static inline bool bio_will_gap(struct request_queue *q, bio_get_first_bvec(prev_rq->bio, &pb); else bio_get_first_bvec(prev, &pb); - if (pb.bv_offset) + if (pb.bv_offset & queue_virt_boundary(q)) return true; /* @@ -90,7 +90,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q, /* Zero-sector (unknown) and one-sector granularities are the same. */ granularity = max(q->limits.discard_granularity >> 9, 1U); - max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); + max_discard_sectors = min(q->limits.max_discard_sectors, + bio_allowed_max_sectors(q)); max_discard_sectors -= max_discard_sectors % granularity; if (unlikely(!max_discard_sectors)) { diff --git a/block/blk.h b/block/blk.h index a1841b8ff129..0089fefdf771 100644 --- a/block/blk.h +++ b/block/blk.h @@ -169,7 +169,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q, static inline bool __bvec_gap_to_prev(struct request_queue *q, struct bio_vec *bprv, unsigned int offset) { - return offset || + return (offset & queue_virt_boundary(q)) || ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); } @@ -395,6 +395,16 @@ static inline unsigned long blk_rq_deadline(struct request *rq) return rq->__deadline & ~0x1UL; } +/* + * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size + * is defined as 'unsigned int', meantime it has to aligned to with logical + * block size which is the minimum accepted unit by hardware. + */ +static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) +{ + return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; +} + /* * Internal io_context interface */ diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 10ecb232245d..4b1ff5bc256a 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Renesas R-Car SATA driver * * Author: Vladimir Barinov * Copyright (C) 2013-2015 Cogent Embedded, Inc. * Copyright (C) 2013-2015 Renesas Solutions Corp. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ #include diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 2e65be8b1387..559d567693b8 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1519,8 +1519,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->ndev) nvme_nvm_update_nvm_info(ns); #ifdef CONFIG_NVME_MULTIPATH - if (ns->head->disk) + if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); + blk_queue_stack_limits(ns->head->disk->queue, ns->queue); + } #endif } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 5e3cc8c59a39..9901afd804ce 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -285,6 +285,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* set to a default value for 512 until disk is validated */ blk_queue_logical_block_size(q, 512); + blk_set_stacking_limits(&q->limits); /* we need to propagate up the VMC settings */ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index f4efe289dc7b..a5f9bbce863f 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -420,7 +420,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, struct pci_dev *p2p_dev; int ret; - if (!ctrl->p2p_client) + if (!ctrl->p2p_client || !ns->use_p2pmem) return; if (ns->p2p_dev) { diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index ddce100be57a..3f7971d3706d 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -122,7 +122,6 @@ struct nvmet_rdma_device { int inline_page_count; }; -static struct workqueue_struct *nvmet_rdma_delete_wq; static bool nvmet_rdma_use_srq; module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); MODULE_PARM_DESC(use_srq, "Use shared receive queue."); @@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, if (queue->host_qid == 0) { /* Let inflight controller teardown complete */ - flush_workqueue(nvmet_rdma_delete_wq); + flush_scheduled_work(); } ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); if (ret) { - queue_work(nvmet_rdma_delete_wq, &queue->release_work); + schedule_work(&queue->release_work); /* Destroying rdma_cm id is not needed here */ return 0; } @@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) if (disconnect) { rdma_disconnect(queue->cm_id); - queue_work(nvmet_rdma_delete_wq, &queue->release_work); + schedule_work(&queue->release_work); } } @@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, mutex_unlock(&nvmet_rdma_queue_mutex); pr_err("failed to connect queue %d\n", queue->idx); - queue_work(nvmet_rdma_delete_wq, &queue->release_work); + schedule_work(&queue->release_work); } /** @@ -1656,17 +1655,8 @@ static int __init nvmet_rdma_init(void) if (ret) goto err_ib_client; - nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq", - WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); - if (!nvmet_rdma_delete_wq) { - ret = -ENOMEM; - goto err_unreg_transport; - } - return 0; -err_unreg_transport: - nvmet_unregister_transport(&nvmet_rdma_ops); err_ib_client: ib_unregister_client(&nvmet_rdma_ib_client); return ret; @@ -1674,7 +1664,6 @@ err_ib_client: static void __exit nvmet_rdma_exit(void) { - destroy_workqueue(nvmet_rdma_delete_wq); nvmet_unregister_transport(&nvmet_rdma_ops); ib_unregister_client(&nvmet_rdma_ib_client); WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));