diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index 570c005062ab..e02c1e04529e 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -524,7 +524,7 @@ struct kiocb_priv { unsigned actual; }; -static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e) +static int ep_aio_cancel(struct kiocb *iocb) { struct kiocb_priv *priv = iocb->private; struct ep_data *epdata; @@ -540,7 +540,6 @@ static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e) // spin_unlock(&epdata->dev->lock); local_irq_enable(); - aio_put_req(iocb); return value; } diff --git a/fs/aio.c b/fs/aio.c index 7b470bfbf891..12b37689dd2c 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -358,8 +358,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) } EXPORT_SYMBOL(kiocb_set_cancel_fn); -static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb, - struct io_event *res) +static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) { kiocb_cancel_fn *old, *cancel; int ret = -EINVAL; @@ -381,12 +380,10 @@ static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb, atomic_inc(&kiocb->ki_users); spin_unlock_irq(&ctx->ctx_lock); - memset(res, 0, sizeof(*res)); - res->obj = (u64)(unsigned long)kiocb->ki_obj.user; - res->data = kiocb->ki_user_data; - ret = cancel(kiocb, res); + ret = cancel(kiocb); spin_lock_irq(&ctx->ctx_lock); + aio_put_req(kiocb); return ret; } @@ -408,7 +405,6 @@ static void free_ioctx(struct work_struct *work) { struct kioctx *ctx = container_of(work, struct kioctx, free_work); struct aio_ring *ring; - struct io_event res; struct kiocb *req; unsigned cpu, head, avail; @@ -419,7 +415,7 @@ static void free_ioctx(struct work_struct *work) struct kiocb, ki_list); list_del_init(&req->ki_list); - kiocb_cancel(ctx, req, &res); + kiocb_cancel(ctx, req); } spin_unlock_irq(&ctx->ctx_lock); @@ -795,21 +791,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2) spin_unlock_irqrestore(&ctx->ctx_lock, flags); } - /* - * cancelled requests don't get events, userland was given one - * when the event got cancelled. - */ - if (unlikely(xchg(&iocb->ki_cancel, - KIOCB_CANCELLED) == KIOCB_CANCELLED)) { - /* - * Can't use the percpu reqs_available here - could race with - * free_ioctx() - */ - atomic_inc(&ctx->reqs_available); - /* Still need the wake_up in case free_ioctx is waiting */ - goto put_rq; - } - /* * Add a completion event to the ring buffer. Must be done holding * ctx->completion_lock to prevent other code from messing with the tail @@ -862,7 +843,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2) if (iocb->ki_eventfd != NULL) eventfd_signal(iocb->ki_eventfd, 1); -put_rq: /* everything turned out well, dispose of the aiocb. */ aio_put_req(iocb); @@ -1439,7 +1419,6 @@ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result) { - struct io_event res; struct kioctx *ctx; struct kiocb *kiocb; u32 key; @@ -1457,18 +1436,19 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, kiocb = lookup_kiocb(ctx, iocb, key); if (kiocb) - ret = kiocb_cancel(ctx, kiocb, &res); + ret = kiocb_cancel(ctx, kiocb); else ret = -EINVAL; spin_unlock_irq(&ctx->ctx_lock); if (!ret) { - /* Cancellation succeeded -- copy the result - * into the user's buffer. + /* + * The result argument is no longer used - the io_event is + * always delivered via the ring buffer. -EINPROGRESS indicates + * cancellation is progress: */ - if (copy_to_user(result, &res, sizeof(res))) - ret = -EFAULT; + ret = -EINPROGRESS; } percpu_ref_put(&ctx->users); diff --git a/include/linux/aio.h b/include/linux/aio.h index 1bdf965339f9..8c8dd1d84d2e 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -27,7 +27,7 @@ struct kiocb; */ #define KIOCB_CANCELLED ((void *) (~0ULL)) -typedef int (kiocb_cancel_fn)(struct kiocb *, struct io_event *); +typedef int (kiocb_cancel_fn)(struct kiocb *); struct kiocb { atomic_t ki_users;