From bb830add192e9d8338082c0fc2c209e23b43d865 Mon Sep 17 00:00:00 2001 From: Susobhan Dey Date: Tue, 25 Sep 2018 12:29:15 -0700 Subject: [PATCH 1/8] nvme: properly propagate errors in nvme_mpath_init Signed-off-by: Susobhan Dey Signed-off-by: Christoph Hellwig --- drivers/nvme/host/multipath.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 5a9562881d4e..9fe3fff818b8 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -537,8 +537,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) INIT_WORK(&ctrl->ana_work, nvme_ana_work); ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); - if (!ctrl->ana_log_buf) + if (!ctrl->ana_log_buf) { + error = -ENOMEM; goto out; + } error = nvme_read_ana_log(ctrl, true); if (error) @@ -547,7 +549,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) out_free_ana_log_buf: kfree(ctrl->ana_log_buf); out: - return -ENOMEM; + return error; } void nvme_mpath_uninit(struct nvme_ctrl *ctrl) From 530ca2c9bd6949c72c9b5cfc330cb3dbccaa3f5b Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 25 Sep 2018 10:36:20 -0600 Subject: [PATCH 2/8] blk-mq: Allow blocking queue tag iter callbacks A recent commit runs tag iterator callbacks under the rcu read lock, but existing callbacks do not satisfy the non-blocking requirement. The commit intended to prevent an iterator from accessing a queue that's being modified. This patch fixes the original issue by taking a queue reference instead of reading it, which allows callbacks to make blocking calls. Fixes: f5bbbbe4d6357 ("blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter") Acked-by: Jianchao Wang Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 94e1ed667b6e..41317c50a446 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, /* * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and - * queue_hw_ctx after freeze the queue. So we could use q_usage_counter - * to avoid race with it. __blk_mq_update_nr_hw_queues will users - * synchronize_rcu to ensure all of the users go out of the critical - * section below and see zeroed q_usage_counter. + * queue_hw_ctx after freeze the queue, so we use q_usage_counter + * to avoid race with it. */ - rcu_read_lock(); - if (percpu_ref_is_zero(&q->q_usage_counter)) { - rcu_read_unlock(); + if (!percpu_ref_tryget(&q->q_usage_counter)) return; - } queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_tags *tags = hctx->tags; @@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); } - rcu_read_unlock(); + blk_queue_exit(q); } static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, From 854f31ccdd7964c9c2e68da234a3a8aedb51cf6b Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Thu, 27 Sep 2018 10:55:13 +0900 Subject: [PATCH 3/8] block: fix deadline elevator drain for zoned block devices When the deadline scheduler is used with a zoned block device, writes to a zone will be dispatched one at a time. This causes the warning message: deadline: forced dispatching is broken (nr_sorted=X), please report this to be displayed when switching to another elevator with the legacy I/O path while write requests to a zone are being retained in the scheduler queue. Prevent this message from being displayed when executing elv_drain_elevator() for a zoned block device. __blk_drain_queue() will loop until all writes are dispatched and completed, resulting in the desired elevator queue drain without extensive modifications to the deadline code itself to handle forced-dispatch calls. Signed-off-by: Damien Le Moal Fixes: 8dc8146f9c92 ("deadline-iosched: Introduce zone locking support") Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe --- block/elevator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/elevator.c b/block/elevator.c index 6a06b5d040e5..fae58b2f906f 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q) while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) ; - if (q->nr_sorted && printed++ < 10) { + if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) { printk(KERN_ERR "%s: forced dispatching is broken " "(nr_sorted=%u), please report this\n", q->elevator->type->elevator_name, q->nr_sorted); From f151ba989d149bbdfc90e5405724bbea094f9b17 Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Sat, 22 Sep 2018 15:55:49 -0400 Subject: [PATCH 4/8] xen/blkfront: When purging persistent grants, keep them in the buffer Commit a46b53672b2c ("xen/blkfront: cleanup stale persistent grants") added support for purging persistent grants when they are not in use. As part of the purge, the grants were removed from the grant buffer, This eventually causes the buffer to become empty, with BUG_ON triggered in get_free_grant(). This can be observed even on an idle system, within 20-30 minutes. We should keep the grants in the buffer when purging, and only free the grant ref. Fixes: a46b53672b2c ("xen/blkfront: cleanup stale persistent grants") Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky Signed-off-by: Jens Axboe --- drivers/block/xen-blkfront.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a71d817e900d..3b441fe69c0d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2667,11 +2667,9 @@ static void purge_persistent_grants(struct blkfront_info *info) gnttab_query_foreign_access(gnt_list_entry->gref)) continue; - list_del(&gnt_list_entry->node); gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); + gnt_list_entry->gref = GRANT_INVALID_REF; rinfo->persistent_gnts_c--; - __free_page(gnt_list_entry->page); - kfree(gnt_list_entry); } spin_unlock_irqrestore(&rinfo->ring_lock, flags); From 0f843e65d9eef4936929bb036c5f771fb261eea4 Mon Sep 17 00:00:00 2001 From: Guoju Fang Date: Thu, 27 Sep 2018 23:41:46 +0800 Subject: [PATCH 5/8] bcache: add separate workqueue for journal_write to avoid deadlock After write SSD completed, bcache schedules journal_write work to system_wq, which is a public workqueue in system, without WQ_MEM_RECLAIM flag. system_wq is also a bound wq, and there may be no idle kworker on current processor. Creating a new kworker may unfortunately need to reclaim memory first, by shrinking cache and slab used by vfs, which depends on bcache device. That's a deadlock. This patch create a new workqueue for journal_write with WQ_MEM_RECLAIM flag. It's rescuer thread will work to avoid the deadlock. Signed-off-by: Guoju Fang Cc: stable@vger.kernel.org Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/bcache.h | 1 + drivers/md/bcache/journal.c | 6 +++--- drivers/md/bcache/super.c | 8 ++++++++ 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 83504dd8100a..954dad29e6e8 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca); void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); extern struct workqueue_struct *bcache_wq; +extern struct workqueue_struct *bch_journal_wq; extern struct mutex bch_register_lock; extern struct list_head bch_cache_sets; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 6116bbf870d8..522c7426f3a0 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca) closure_get(&ca->set->cl); INIT_WORK(&ja->discard_work, journal_discard_work); - schedule_work(&ja->discard_work); + queue_work(bch_journal_wq, &ja->discard_work); } } @@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl) : &j->w[0]; __closure_wake_up(&w->wait); - continue_at_nobarrier(cl, journal_write, system_wq); + continue_at_nobarrier(cl, journal_write, bch_journal_wq); } static void journal_write_unlock(struct closure *cl) @@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl) spin_unlock(&c->journal.lock); btree_flush_write(c); - continue_at(cl, journal_write, system_wq); + continue_at(cl, journal_write, bch_journal_wq); return; } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 94c756c66bd7..30ba9aeb5ee8 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -47,6 +47,7 @@ static int bcache_major; static DEFINE_IDA(bcache_device_idx); static wait_queue_head_t unregister_wait; struct workqueue_struct *bcache_wq; +struct workqueue_struct *bch_journal_wq; #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) /* limitation of partitions number on single bcache device */ @@ -2341,6 +2342,9 @@ static void bcache_exit(void) kobject_put(bcache_kobj); if (bcache_wq) destroy_workqueue(bcache_wq); + if (bch_journal_wq) + destroy_workqueue(bch_journal_wq); + if (bcache_major) unregister_blkdev(bcache_major, "bcache"); unregister_reboot_notifier(&reboot); @@ -2370,6 +2374,10 @@ static int __init bcache_init(void) if (!bcache_wq) goto err; + bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0); + if (!bch_journal_wq) + goto err; + bcache_kobj = kobject_create_and_add("bcache", fs_kobj); if (!bcache_kobj) goto err; From 587562d0c7cd6861f4f90a2eb811cccb1a376f5f Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Wed, 26 Sep 2018 14:35:50 +0200 Subject: [PATCH 6/8] blk-mq: I/O and timer unplugs are inverted in blktrace trace_block_unplug() takes true for explicit unplugs and false for implicit unplugs. schedule() unplugs are implicit and should be reported as timer unplugs. While correct in the legacy code, this has been inverted in blk-mq since 4.11. Cc: stable@vger.kernel.org Fixes: bd166ef183c2 ("blk-mq-sched: add framework for MQ capable IO schedulers") Reviewed-by: Omar Sandoval Signed-off-by: Ilya Dryomov Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 85a1c1a59c72..e3c39ea8e17b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) BUG_ON(!rq->q); if (rq->mq_ctx != this_ctx) { if (this_ctx) { - trace_block_unplug(this_q, depth, from_schedule); + trace_block_unplug(this_q, depth, !from_schedule); blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, from_schedule); @@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) * on 'ctx_list'. Do those. */ if (this_ctx) { - trace_block_unplug(this_q, depth, from_schedule); + trace_block_unplug(this_q, depth, !from_schedule); blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, from_schedule); } From 15c206887603a452f13fbfde2db0f8830d37028c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 28 Sep 2018 09:40:17 -0600 Subject: [PATCH 7/8] Revert "xen/blkfront: When purging persistent grants, keep them in the buffer" Fix didn't work for all cases, reverting to add a (hopefully) better fix. This reverts commit f151ba989d149bbdfc90e5405724bbea094f9b17. Signed-off-by: Jens Axboe --- drivers/block/xen-blkfront.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 3b441fe69c0d..a71d817e900d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2667,9 +2667,11 @@ static void purge_persistent_grants(struct blkfront_info *info) gnttab_query_foreign_access(gnt_list_entry->gref)) continue; + list_del(&gnt_list_entry->node); gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); - gnt_list_entry->gref = GRANT_INVALID_REF; rinfo->persistent_gnts_c--; + __free_page(gnt_list_entry->page); + kfree(gnt_list_entry); } spin_unlock_irqrestore(&rinfo->ring_lock, flags); From 6c7678674014b4552caf0e5aa0ca34078a377482 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Fri, 28 Sep 2018 09:28:27 +0200 Subject: [PATCH 8/8] xen/blkfront: correct purging of persistent grants Commit a46b53672b2c2e3770b38a4abf90d16364d2584b ("xen/blkfront: cleanup stale persistent grants") introduced a regression as purged persistent grants were not pu into the list of free grants again. Correct that. Reviewed-by: Boris Ostrovsky Signed-off-by: Juergen Gross Signed-off-by: Jens Axboe --- drivers/block/xen-blkfront.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a71d817e900d..429d20131c7e 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info) list_del(&gnt_list_entry->node); gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); rinfo->persistent_gnts_c--; - __free_page(gnt_list_entry->page); - kfree(gnt_list_entry); + gnt_list_entry->gref = GRANT_INVALID_REF; + list_add_tail(&gnt_list_entry->node, &rinfo->grants); } spin_unlock_irqrestore(&rinfo->ring_lock, flags);