1
0
Fork 0

block: implement request_queue->dma_drain_needed

Draining shouldn't be done for commands where overflow may indicate
data integrity issues.  Add dma_drain_needed callback to
request_queue.  Drain buffer is appened iff this function returns
non-zero.

Signed-off-by: Tejun Heo <htejun@gmail.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
hifive-unleashed-5.1
Tejun Heo 2008-02-19 11:36:53 +01:00 committed by Jens Axboe
parent 6b00769fe1
commit 2fb98e8414
3 changed files with 11 additions and 5 deletions

View File

@ -220,7 +220,7 @@ new_segment:
bvprv = bvec;
} /* segments in rq */
if (q->dma_drain_size) {
if (q->dma_drain_size && q->dma_drain_needed(rq)) {
sg->page_link &= ~0x02;
sg = sg_next(sg);
sg_set_page(sg, virt_to_page(q->dma_drain_buffer),

View File

@ -296,6 +296,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
* blk_queue_dma_drain - Set up a drain buffer for excess dma.
*
* @q: the request queue for the device
* @dma_drain_needed: fn which returns non-zero if drain is necessary
* @buf: physically contiguous buffer
* @size: size of the buffer in bytes
*
@ -315,14 +316,16 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
* device can support otherwise there won't be room for the drain
* buffer.
*/
int blk_queue_dma_drain(struct request_queue *q, void *buf,
unsigned int size)
extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size)
{
if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
return -EINVAL;
/* make room for appending the drain */
--q->max_hw_segments;
--q->max_phys_segments;
q->dma_drain_needed = dma_drain_needed;
q->dma_drain_buffer = buf;
q->dma_drain_size = size;

View File

@ -259,6 +259,7 @@ struct bio_vec;
typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
enum blk_queue_state {
Queue_down,
@ -295,6 +296,7 @@ struct request_queue
merge_bvec_fn *merge_bvec_fn;
prepare_flush_fn *prepare_flush_fn;
softirq_done_fn *softirq_done_fn;
dma_drain_needed_fn *dma_drain_needed;
/*
* Dispatch queue sorting
@ -699,8 +701,9 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern int blk_queue_dma_drain(struct request_queue *q, void *buf,
unsigned int size);
extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);