1
0
Fork 0

xen/blkback: Move processing of BLKIF_OP_DISCARD from dispatch_rw_block_io

.. and move it to its own function that will deal with the
discard operation.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
hifive-unleashed-5.1
Konrad Rzeszutek Wilk 2011-10-12 17:26:47 -04:00
parent 5ea4298669
commit 421463526f
1 changed files with 24 additions and 30 deletions

View File

@ -416,12 +416,16 @@ static int xen_blkbk_map(struct blkif_request *req,
return ret;
}
static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
static int dispatch_discard_io(struct xen_blkif *blkif,
struct blkif_request *req)
{
int err = 0;
int status = BLKIF_RSP_OKAY;
struct block_device *bdev = blkif->vbd.bdev;
blkif->st_ds_req++;
xen_blkif_get(blkif);
if (blkif->blk_backend_type == BLKIF_BACKEND_PHY) {
unsigned long secure = (blkif->vbd.discard_secure &&
(req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
@ -453,6 +457,8 @@ static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
status = BLKIF_RSP_ERROR;
make_response(blkif, req->u.discard.id, req->operation, status);
xen_blkif_put(blkif);
return err;
}
static void xen_blk_drain_io(struct xen_blkif *blkif)
@ -576,8 +582,11 @@ __do_block_io_op(struct xen_blkif *blkif)
/* Apply all sanity checks to /private copy/ of request. */
barrier();
if (dispatch_rw_block_io(blkif, &req, pending_req))
if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
free_req(pending_req);
if (dispatch_discard_io(blkif, &req))
break;
} else if (dispatch_rw_block_io(blkif, &req, pending_req))
break;
/* Yield point for this unbounded loop. */
@ -636,24 +645,16 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
blkif->st_f_req++;
operation = WRITE_FLUSH;
break;
case BLKIF_OP_DISCARD:
blkif->st_ds_req++;
operation = REQ_DISCARD;
break;
default:
operation = 0; /* make gcc happy */
goto fail_response;
break;
}
if (unlikely(operation == REQ_DISCARD))
nseg = 0;
else
/* Check that the number of segments is sane. */
nseg = req->u.rw.nr_segments;
/* Check that the number of segments is sane. */
nseg = req->u.rw.nr_segments;
if (unlikely(nseg == 0 && operation != WRITE_FLUSH &&
operation != REQ_DISCARD) ||
if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
nseg);
@ -714,7 +715,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* the hypercall to unmap the grants - that is all done in
* xen_blkbk_unmap.
*/
if (nseg && xen_blkbk_map(req, pending_req, seg))
if (xen_blkbk_map(req, pending_req, seg))
goto fail_flush;
/*
@ -746,23 +747,16 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
/* This will be hit if the operation was a flush or discard. */
if (!bio) {
BUG_ON(operation != WRITE_FLUSH && operation != REQ_DISCARD);
BUG_ON(operation != WRITE_FLUSH);
if (operation == WRITE_FLUSH) {
bio = bio_alloc(GFP_KERNEL, 0);
if (unlikely(bio == NULL))
goto fail_put_bio;
bio = bio_alloc(GFP_KERNEL, 0);
if (unlikely(bio == NULL))
goto fail_put_bio;
biolist[nbio++] = bio;
bio->bi_bdev = preq.bdev;
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
} else if (operation == REQ_DISCARD) {
xen_blk_discard(blkif, req);
xen_blkif_put(blkif);
free_req(pending_req);
return 0;
}
biolist[nbio++] = bio;
bio->bi_bdev = preq.bdev;
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
}
/*