Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "A small collection of fixes that should go in before -rc1.  The pull
  request contains:

   - A two patch fix for a regression with block enabled tagging caused
     by a commit in the initial pull request.  One patch is from Martin
     and ensures that SCSI doesn't truncate 64-bit block flags, the
     other one is from me and prevents us from double using struct
     request queuelist for both completion and busy tags.  This caused
     anything from a boot crash for some, to crashes under load.

   - A blk-mq fix for a potential soft stall when hot unplugging CPUs
     with busy IO.

   - percpu_counter fix is listed in here, that caused a suspend issue
     with virtio-blk due to percpu counters having an inconsistent state
     during CPU removal.  Andrew sent this in separately a few days ago,
     but it's here.  JFYI.

   - A few fixes for block integrity from Martin.

   - A ratelimit fix for loop from Mike Galbraith, to avoid spewing too
     much in error cases"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: fix regression with block enabled tagging
  scsi: Make sure cmd_flags are 64-bit
  block: Ensure we only enable integrity metadata for reads and writes
  block: Fix integrity verification
  block: Fix for_each_bvec()
  drivers/block/loop.c: ratelimit error messages
  blk-mq: fix potential stall during CPU unplug with IO pending
  percpu_counter: fix bad counter state during suspend
This commit is contained in:
Linus Torvalds 2014-04-10 09:26:55 -07:00
commit dd76a786af
12 changed files with 53 additions and 37 deletions

View file

@ -1307,7 +1307,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
struct request_list *rl = blk_rq_rl(req);
BUG_ON(!list_empty(&req->queuelist));
BUG_ON(!hlist_unhashed(&req->hash));
BUG_ON(ELV_ON_HASH(req));
blk_free_request(rl, req);
freed_request(rl, flags);

View file

@ -956,6 +956,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
unsigned int cpu)
{
struct blk_mq_hw_ctx *hctx = data;
struct request_queue *q = hctx->queue;
struct blk_mq_ctx *ctx;
LIST_HEAD(tmp);
@ -965,7 +966,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
/*
* Move ctx entries to new CPU, if this one is going away.
*/
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
ctx = __blk_mq_get_ctx(q, cpu);
spin_lock(&ctx->lock);
if (!list_empty(&ctx->rq_list)) {
@ -977,7 +978,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
if (list_empty(&tmp))
return;
ctx = blk_mq_get_ctx(hctx->queue);
ctx = blk_mq_get_ctx(q);
spin_lock(&ctx->lock);
while (!list_empty(&tmp)) {
@ -988,10 +989,13 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
list_move_tail(&rq->queuelist, &ctx->rq_list);
}
hctx = q->mq_ops->map_queue(q, ctx->cpu);
blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(ctx);
blk_mq_run_hw_queue(hctx, true);
}
static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,

View file

@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h)
while (!list_empty(&local_list)) {
struct request *rq;
rq = list_entry(local_list.next, struct request, queuelist);
list_del_init(&rq->queuelist);
rq = list_entry(local_list.next, struct request, ipi_list);
list_del_init(&rq->ipi_list);
rq->q->softirq_done_fn(rq);
}
}
@ -45,14 +45,9 @@ static void trigger_softirq(void *data)
local_irq_save(flags);
list = this_cpu_ptr(&blk_cpu_done);
/*
* We reuse queuelist for a list of requests to process. Since the
* queuelist is used by the block layer only for requests waiting to be
* submitted to the device it is unused now.
*/
list_add_tail(&rq->queuelist, list);
list_add_tail(&rq->ipi_list, list);
if (list->next == &rq->queuelist)
if (list->next == &rq->ipi_list)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_restore(flags);
@ -141,7 +136,7 @@ void __blk_complete_request(struct request *req)
struct list_head *list;
do_local:
list = this_cpu_ptr(&blk_cpu_done);
list_add_tail(&req->queuelist, list);
list_add_tail(&req->ipi_list, list);
/*
* if the list only contains our just added request,
@ -149,7 +144,7 @@ do_local:
* entries there, someone already raised the irq but it
* hasn't run yet.
*/
if (list->next == &req->queuelist)
if (list->next == &req->ipi_list)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
} else if (raise_blk_irq(ccpu, req))
goto do_local;

View file

@ -78,7 +78,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
/*
* Internal elevator interface
*/
#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash)
#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
void blk_insert_flush(struct request *rq);
void blk_abort_flushes(struct request_queue *q);

View file

@ -247,6 +247,7 @@ EXPORT_SYMBOL(elevator_exit);
static inline void __elv_rqhash_del(struct request *rq)
{
hash_del(&rq->hash);
rq->cmd_flags &= ~REQ_HASHED;
}
static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@ -261,6 +262,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)
BUG_ON(ELV_ON_HASH(rq));
hash_add(e->hash, &rq->hash, rq_hash_key(rq));
rq->cmd_flags |= REQ_HASHED;
}
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)

View file

@ -237,7 +237,7 @@ static int __do_lo_send_write(struct file *file,
file_end_write(file);
if (likely(bw == len))
return 0;
printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
(unsigned long long)pos, len);
if (bw >= 0)
bw = -EIO;
@ -277,7 +277,7 @@ static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
return __do_lo_send_write(lo->lo_backing_file,
page_address(page), bvec->bv_len,
pos);
printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, "
"length %i.\n", (unsigned long long)pos, bvec->bv_len);
if (ret > 0)
ret = -EIO;
@ -316,7 +316,7 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
out:
return ret;
fail:
printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
ret = -ENOMEM;
goto out;
}
@ -345,7 +345,7 @@ lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
size = p->bsize;
if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
printk(KERN_ERR "loop: transfer error block %ld\n",
printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n",
page->index);
size = -EINVAL;
}

View file

@ -184,7 +184,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
*/
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, int timeout, int retries, int flags,
unsigned char *sense, int timeout, int retries, u64 flags,
int *resid)
{
struct request *req;
@ -235,7 +235,7 @@ EXPORT_SYMBOL(scsi_execute);
int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
struct scsi_sense_hdr *sshdr, int timeout, int retries,
int *resid, int flags)
int *resid, u64 flags)
{
char *sense = NULL;
int result;

View file

@ -182,6 +182,9 @@ static int bdev_integrity_enabled(struct block_device *bdev, int rw)
*/
int bio_integrity_enabled(struct bio *bio)
{
if (!bio_is_rw(bio))
return 0;
/* Already protected? */
if (bio_integrity(bio))
return 0;
@ -309,10 +312,9 @@ static int bio_integrity_generate_verify(struct bio *bio, int operate)
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
struct blk_integrity_exchg bix;
struct bio_vec bv;
struct bvec_iter iter;
struct bio_vec *bv;
sector_t sector;
unsigned int sectors, ret = 0;
unsigned int sectors, ret = 0, i;
void *prot_buf = bio->bi_integrity->bip_buf;
if (operate)
@ -323,16 +325,16 @@ static int bio_integrity_generate_verify(struct bio *bio, int operate)
bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
bix.sector_size = bi->sector_size;
bio_for_each_segment(bv, bio, iter) {
void *kaddr = kmap_atomic(bv.bv_page);
bix.data_buf = kaddr + bv.bv_offset;
bix.data_size = bv.bv_len;
bio_for_each_segment_all(bv, bio, i) {
void *kaddr = kmap_atomic(bv->bv_page);
bix.data_buf = kaddr + bv->bv_offset;
bix.data_size = bv->bv_len;
bix.prot_buf = prot_buf;
bix.sector = sector;
if (operate) {
if (operate)
bi->generate_fn(&bix);
} else {
else {
ret = bi->verify_fn(&bix);
if (ret) {
kunmap_atomic(kaddr);
@ -340,7 +342,7 @@ static int bio_integrity_generate_verify(struct bio *bio, int operate)
}
}
sectors = bv.bv_len / bi->sector_size;
sectors = bv->bv_len / bi->sector_size;
sector += sectors;
prot_buf += sectors * bi->tuple_size;

View file

@ -216,9 +216,9 @@ static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
}
#define for_each_bvec(bvl, bio_vec, iter, start) \
for ((iter) = start; \
(bvl) = bvec_iter_bvec((bio_vec), (iter)), \
(iter).bi_size; \
for (iter = (start); \
(iter).bi_size && \
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))

View file

@ -189,6 +189,7 @@ enum rq_flag_bits {
__REQ_KERNEL, /* direct IO to kernel pages */
__REQ_PM, /* runtime pm request */
__REQ_END, /* last of chain of requests */
__REQ_HASHED, /* on IO scheduler merge hash */
__REQ_NR_BITS, /* stops here */
};
@ -241,5 +242,6 @@ enum rq_flag_bits {
#define REQ_KERNEL (1ULL << __REQ_KERNEL)
#define REQ_PM (1ULL << __REQ_PM)
#define REQ_END (1ULL << __REQ_END)
#define REQ_HASHED (1ULL << __REQ_HASHED)
#endif /* __LINUX_BLK_TYPES_H */

View file

@ -118,7 +118,18 @@ struct request {
struct bio *bio;
struct bio *biotail;
struct hlist_node hash; /* merge hash */
/*
* The hash is used inside the scheduler, and killed once the
* request reaches the dispatch list. The ipi_list is only used
* to queue the request for softirq completion, which is long
* after the request has been unhashed (and even removed from
* the dispatch list).
*/
union {
struct hlist_node hash; /* merge hash */
struct list_head ipi_list;
};
/*
* The rb_node is only used inside the io scheduler, requests
* are pruned when moved to the dispatch queue. So let the

View file

@ -423,11 +423,11 @@ extern int scsi_is_target_device(const struct device *);
extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, int timeout, int retries,
int flag, int *resid);
u64 flags, int *resid);
extern int scsi_execute_req_flags(struct scsi_device *sdev,
const unsigned char *cmd, int data_direction, void *buffer,
unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
int retries, int *resid, int flags);
int retries, int *resid, u64 flags);
static inline int scsi_execute_req(struct scsi_device *sdev,
const unsigned char *cmd, int data_direction, void *buffer,
unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,