1
0
Fork 0

io_uring: always plug for any number of IOs

Currently we only plug if we're doing more than two request. We're going
to be relying on always having the plug there to pass down information,
so plug unconditionally.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
zero-sugar-mainline-defconfig
Jens Axboe 2020-06-01 08:30:41 -06:00
parent 5a473e8311
commit ac8691c415
1 changed files with 5 additions and 10 deletions

View File

@ -676,7 +676,6 @@ struct io_kiocb {
};
};
#define IO_PLUG_THRESHOLD 2
#define IO_IOPOLL_BATCH 8
struct io_submit_state {
@ -5914,7 +5913,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
struct file *ring_file, int ring_fd)
{
struct io_submit_state state, *statep = NULL;
struct io_submit_state state;
struct io_kiocb *link = NULL;
int i, submitted = 0;
@ -5931,10 +5930,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
if (!percpu_ref_tryget_many(&ctx->refs, nr))
return -EAGAIN;
if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, nr);
statep = &state;
}
io_submit_state_start(&state, nr);
ctx->ring_fd = ring_fd;
ctx->ring_file = ring_file;
@ -5949,14 +5945,14 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
io_consume_sqe(ctx);
break;
}
req = io_alloc_req(ctx, statep);
req = io_alloc_req(ctx, &state);
if (unlikely(!req)) {
if (!submitted)
submitted = -EAGAIN;
break;
}
err = io_init_req(ctx, req, sqe, statep);
err = io_init_req(ctx, req, sqe, &state);
io_consume_sqe(ctx);
/* will complete beyond this point, count as submitted */
submitted++;
@ -5982,8 +5978,7 @@ fail_req:
}
if (link)
io_queue_link_head(link);
if (statep)
io_submit_state_end(&state);
io_submit_state_end(&state);
/* Commit SQ ring head once we've consumed and submitted all SQEs */
io_commit_sqring(ctx);