1
0
Fork 0

aio: separate out ring reservation from req allocation

This is in preparation for certain types of IO not needing a ring
reserveration.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
hifive-unleashed-5.1
Christoph Hellwig 2018-11-19 15:57:42 -07:00 committed by Jens Axboe
parent bc9bff6162
commit 432c79978c
1 changed files with 17 additions and 13 deletions

View File

@ -902,7 +902,7 @@ static void put_reqs_available(struct kioctx *ctx, unsigned nr)
local_irq_restore(flags);
}
static bool get_reqs_available(struct kioctx *ctx)
static bool __get_reqs_available(struct kioctx *ctx)
{
struct kioctx_cpu *kcpu;
bool ret = false;
@ -994,6 +994,14 @@ static void user_refill_reqs_available(struct kioctx *ctx)
spin_unlock_irq(&ctx->completion_lock);
}
static bool get_reqs_available(struct kioctx *ctx)
{
if (__get_reqs_available(ctx))
return true;
user_refill_reqs_available(ctx);
return __get_reqs_available(ctx);
}
/* aio_get_req
* Allocate a slot for an aio request.
* Returns NULL if no requests are free.
@ -1002,24 +1010,15 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
{
struct aio_kiocb *req;
if (!get_reqs_available(ctx)) {
user_refill_reqs_available(ctx);
if (!get_reqs_available(ctx))
return NULL;
}
req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
if (unlikely(!req))
goto out_put;
return NULL;
percpu_ref_get(&ctx->reqs);
INIT_LIST_HEAD(&req->ki_list);
refcount_set(&req->ki_refcnt, 0);
req->ki_ctx = ctx;
return req;
out_put:
put_reqs_available(ctx, 1);
return NULL;
}
static struct kioctx *lookup_ioctx(unsigned long ctx_id)
@ -1807,9 +1806,13 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
return -EINVAL;
}
if (!get_reqs_available(ctx))
return -EAGAIN;
ret = -EAGAIN;
req = aio_get_req(ctx);
if (unlikely(!req))
return -EAGAIN;
goto out_put_reqs_available;
if (iocb.aio_flags & IOCB_FLAG_RESFD) {
/*
@ -1872,11 +1875,12 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
goto out_put_req;
return 0;
out_put_req:
put_reqs_available(ctx, 1);
percpu_ref_put(&ctx->reqs);
if (req->ki_eventfd)
eventfd_ctx_put(req->ki_eventfd);
kmem_cache_free(kiocb_cachep, req);
out_put_reqs_available:
put_reqs_available(ctx, 1);
return ret;
}