1
0
Fork 0

block: make rq_init() do a full memset()

This requires moving rq_init() from get_request() to blk_alloc_request().
The upside is that we can now require an rq_init() from any path that
wishes to hand the request to the block layer.

rq_init() will be exported for the code that uses struct request
without blk_get_request.

This is a preparation for large command support, which needs to
initialize struct request in a proper way (that is, just doing a
memset() will not work).

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
hifive-unleashed-5.1
FUJITA Tomonori 2008-04-25 12:26:28 +02:00 committed by Jens Axboe
parent c3270e577c
commit 1afb20f301
2 changed files with 5 additions and 32 deletions

View File

@ -143,10 +143,8 @@ static void queue_flush(struct request_queue *q, unsigned which)
end_io = post_flush_end_io;
}
rq->cmd_flags = REQ_HARDBARRIER;
rq_init(q, rq);
rq->elevator_private = NULL;
rq->elevator_private2 = NULL;
rq->cmd_flags = REQ_HARDBARRIER;
rq->rq_disk = q->bar_rq.rq_disk;
rq->end_io = end_io;
q->prepare_flush_fn(q, rq);
@ -167,14 +165,11 @@ static inline struct request *start_ordered(struct request_queue *q,
blkdev_dequeue_request(rq);
q->orig_bar_rq = rq;
rq = &q->bar_rq;
rq->cmd_flags = 0;
rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->cmd_flags |= REQ_RW;
if (q->ordered & QUEUE_ORDERED_FUA)
rq->cmd_flags |= REQ_FUA;
rq->elevator_private = NULL;
rq->elevator_private2 = NULL;
init_request_from_bio(rq, q->orig_bar_rq->bio);
rq->end_io = bar_end_io;

View File

@ -107,40 +107,18 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
}
EXPORT_SYMBOL(blk_get_backing_dev_info);
/*
* We can't just memset() the structure, since the allocation path
* already stored some information in the request.
*/
void rq_init(struct request_queue *q, struct request *rq)
{
memset(rq, 0, sizeof(*rq));
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->donelist);
rq->q = q;
rq->sector = rq->hard_sector = (sector_t) -1;
rq->nr_sectors = rq->hard_nr_sectors = 0;
rq->current_nr_sectors = rq->hard_cur_sectors = 0;
rq->bio = rq->biotail = NULL;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
rq->rq_disk = NULL;
rq->nr_phys_segments = 0;
rq->nr_hw_segments = 0;
rq->ioprio = 0;
rq->special = NULL;
rq->buffer = NULL;
rq->tag = -1;
rq->errors = 0;
rq->ref_count = 1;
rq->cmd_len = 0;
memset(rq->cmd, 0, sizeof(rq->cmd));
rq->data_len = 0;
rq->extra_len = 0;
rq->sense_len = 0;
rq->data = NULL;
rq->sense = NULL;
rq->end_io = NULL;
rq->end_io_data = NULL;
rq->next_rq = NULL;
}
static void req_bio_endio(struct request *rq, struct bio *bio,
@ -607,6 +585,8 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
if (!rq)
return NULL;
rq_init(q, rq);
/*
* first three bits are identical in rq->cmd_flags and bio->bi_rw,
* see bio.h and blkdev.h
@ -789,8 +769,6 @@ rq_starved:
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
rq_init(q, rq);
blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
out:
return rq;