diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index be117495eb43..9755aac0fe26 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -5,12 +5,26 @@ * * This file provides the user-space API for AEAD ciphers. * - * This file is derived from algif_skcipher.c. - * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. + * + * The following concept of the memory management is used: + * + * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is + * filled by user space with the data submitted via sendpage/sendmsg. Filling + * up the TX SGL does not cause a crypto operation -- the data will only be + * tracked by the kernel. Upon receipt of one recvmsg call, the caller must + * provide a buffer which is tracked with the RX SGL. + * + * During the processing of the recvmsg operation, the cipher request is + * allocated and prepared. As part of the recvmsg operation, the processed + * TX buffers are extracted from the TX SGL into a separate SGL. + * + * After the completion of the crypto operation, the RX SGL and the cipher + * request is released. The extracted TX SGL parts are released together with + * the RX SGL release. */ #include @@ -25,24 +39,32 @@ #include #include -struct aead_sg_list { - unsigned int cur; - struct scatterlist sg[ALG_MAX_PAGES]; +struct aead_tsgl { + struct list_head list; + unsigned int cur; /* Last processed SG entry */ + struct scatterlist sg[0]; /* Array of SGs forming the SGL */ }; -struct aead_async_rsgl { +struct aead_rsgl { struct af_alg_sgl sgl; struct list_head list; + size_t sg_num_bytes; /* Bytes of data in that SGL */ }; struct aead_async_req { - struct scatterlist *tsgl; - struct aead_async_rsgl first_rsgl; - struct list_head list; struct kiocb *iocb; struct sock *sk; - unsigned int tsgls; - char iv[]; + + struct aead_rsgl first_rsgl; /* First RX SG */ + struct list_head rsgl_list; /* Track RX SGs */ + + struct scatterlist *tsgl; /* priv. TX SGL of buffers to process */ + unsigned int tsgl_entries; /* number of entries in priv. TX SGL */ + + unsigned int outlen; /* Filled output buf length */ + + unsigned int areqlen; /* Length of this data struct */ + struct aead_request aead_req; /* req ctx trails this struct */ }; struct aead_tfm { @@ -51,25 +73,26 @@ struct aead_tfm { }; struct aead_ctx { - struct aead_sg_list tsgl; - struct aead_async_rsgl first_rsgl; - struct list_head list; + struct list_head tsgl_list; /* Link to TX SGL */ void *iv; - - struct af_alg_completion completion; - - unsigned long used; - - unsigned int len; - bool more; - bool merge; - bool enc; - size_t aead_assoclen; - struct aead_request aead_req; + + struct af_alg_completion completion; /* sync work queue */ + + size_t used; /* TX bytes sent to kernel */ + size_t rcvused; /* total RX bytes to be processed by kernel */ + + bool more; /* More data to be expected? */ + bool merge; /* Merge new data into existing SG */ + bool enc; /* Crypto operation: enc, dec */ + + unsigned int len; /* Length of allocated memory for this struct */ }; +#define MAX_SGL_ENTS ((4096 - sizeof(struct aead_tsgl)) / \ + sizeof(struct scatterlist) - 1) + static inline int aead_sndbuf(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); @@ -84,9 +107,29 @@ static inline bool aead_writable(struct sock *sk) return PAGE_SIZE <= aead_sndbuf(sk); } -static inline bool aead_sufficient_data(struct aead_ctx *ctx) +static inline int aead_rcvbuf(struct sock *sk) { - unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); + struct alg_sock *ask = alg_sk(sk); + struct aead_ctx *ctx = ask->private; + + return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - + ctx->rcvused, 0); +} + +static inline bool aead_readable(struct sock *sk) +{ + return PAGE_SIZE <= aead_rcvbuf(sk); +} + +static inline bool aead_sufficient_data(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); + struct aead_ctx *ctx = ask->private; + struct aead_tfm *aeadc = pask->private; + struct crypto_aead *tfm = aeadc->aead; + unsigned int as = crypto_aead_authsize(tfm); /* * The minimum amount of memory needed for an AEAD cipher is @@ -95,33 +138,166 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx) return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); } -static void aead_reset_ctx(struct aead_ctx *ctx) -{ - struct aead_sg_list *sgl = &ctx->tsgl; - - sg_init_table(sgl->sg, ALG_MAX_PAGES); - sgl->cur = 0; - ctx->used = 0; - ctx->more = 0; - ctx->merge = 0; -} - -static void aead_put_sgl(struct sock *sk) +static int aead_alloc_tsgl(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct aead_ctx *ctx = ask->private; - struct aead_sg_list *sgl = &ctx->tsgl; - struct scatterlist *sg = sgl->sg; + struct aead_tsgl *sgl; + struct scatterlist *sg = NULL; + + sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list); + if (!list_empty(&ctx->tsgl_list)) + sg = sgl->sg; + + if (!sg || sgl->cur >= MAX_SGL_ENTS) { + sgl = sock_kmalloc(sk, sizeof(*sgl) + + sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), + GFP_KERNEL); + if (!sgl) + return -ENOMEM; + + sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); + sgl->cur = 0; + + if (sg) + sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); + + list_add_tail(&sgl->list, &ctx->tsgl_list); + } + + return 0; +} + +static unsigned int aead_count_tsgl(struct sock *sk, size_t bytes) +{ + struct alg_sock *ask = alg_sk(sk); + struct aead_ctx *ctx = ask->private; + struct aead_tsgl *sgl, *tmp; + unsigned int i; + unsigned int sgl_count = 0; + + if (!bytes) + return 0; + + list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) { + struct scatterlist *sg = sgl->sg; + + for (i = 0; i < sgl->cur; i++) { + sgl_count++; + if (sg[i].length >= bytes) + return sgl_count; + + bytes -= sg[i].length; + } + } + + return sgl_count; +} + +static void aead_pull_tsgl(struct sock *sk, size_t used, + struct scatterlist *dst) +{ + struct alg_sock *ask = alg_sk(sk); + struct aead_ctx *ctx = ask->private; + struct aead_tsgl *sgl; + struct scatterlist *sg; unsigned int i; - for (i = 0; i < sgl->cur; i++) { - if (!sg_page(sg + i)) - continue; + while (!list_empty(&ctx->tsgl_list)) { + sgl = list_first_entry(&ctx->tsgl_list, struct aead_tsgl, + list); + sg = sgl->sg; - put_page(sg_page(sg + i)); - sg_assign_page(sg + i, NULL); + for (i = 0; i < sgl->cur; i++) { + size_t plen = min_t(size_t, used, sg[i].length); + struct page *page = sg_page(sg + i); + + if (!page) + continue; + + /* + * Assumption: caller created aead_count_tsgl(len) + * SG entries in dst. + */ + if (dst) + sg_set_page(dst + i, page, plen, sg[i].offset); + + sg[i].length -= plen; + sg[i].offset += plen; + + used -= plen; + ctx->used -= plen; + + if (sg[i].length) + return; + + if (!dst) + put_page(page); + sg_assign_page(sg + i, NULL); + } + + list_del(&sgl->list); + sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * + (MAX_SGL_ENTS + 1)); } - aead_reset_ctx(ctx); + + if (!ctx->used) + ctx->merge = 0; +} + +static void aead_free_areq_sgls(struct aead_async_req *areq) +{ + struct sock *sk = areq->sk; + struct alg_sock *ask = alg_sk(sk); + struct aead_ctx *ctx = ask->private; + struct aead_rsgl *rsgl, *tmp; + struct scatterlist *tsgl; + struct scatterlist *sg; + unsigned int i; + + list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { + ctx->rcvused -= rsgl->sg_num_bytes; + af_alg_free_sg(&rsgl->sgl); + list_del(&rsgl->list); + if (rsgl != &areq->first_rsgl) + sock_kfree_s(sk, rsgl, sizeof(*rsgl)); + } + + tsgl = areq->tsgl; + for_each_sg(tsgl, sg, areq->tsgl_entries, i) { + if (!sg_page(sg)) + continue; + put_page(sg_page(sg)); + } + + if (areq->tsgl && areq->tsgl_entries) + sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); +} + +static int aead_wait_for_wmem(struct sock *sk, unsigned int flags) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + int err = -ERESTARTSYS; + long timeout; + + if (flags & MSG_DONTWAIT) + return -EAGAIN; + + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); + + add_wait_queue(sk_sleep(sk), &wait); + for (;;) { + if (signal_pending(current)) + break; + timeout = MAX_SCHEDULE_TIMEOUT; + if (sk_wait_event(sk, &timeout, aead_writable(sk), &wait)) { + err = 0; + break; + } + } + remove_wait_queue(sk_sleep(sk), &wait); + + return err; } static void aead_wmem_wakeup(struct sock *sk) @@ -153,6 +329,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags) return -EAGAIN; sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); + add_wait_queue(sk_sleep(sk), &wait); for (;;) { if (signal_pending(current)) @@ -176,8 +353,6 @@ static void aead_data_wakeup(struct sock *sk) struct aead_ctx *ctx = ask->private; struct socket_wq *wq; - if (ctx->more) - return; if (!ctx->used) return; @@ -195,15 +370,18 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); struct aead_ctx *ctx = ask->private; - unsigned ivsize = - crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req)); - struct aead_sg_list *sgl = &ctx->tsgl; + struct aead_tfm *aeadc = pask->private; + struct crypto_aead *tfm = aeadc->aead; + unsigned int ivsize = crypto_aead_ivsize(tfm); + struct aead_tsgl *sgl; struct af_alg_control con = {}; long copied = 0; bool enc = 0; bool init = 0; - int err = -EINVAL; + int err = 0; if (msg->msg_controllen) { err = af_alg_cmsg_send(msg, &con); @@ -227,8 +405,10 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } lock_sock(sk); - if (!ctx->more && ctx->used) + if (!ctx->more && ctx->used) { + err = -EINVAL; goto unlock; + } if (init) { ctx->enc = enc; @@ -239,11 +419,14 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } while (size) { + struct scatterlist *sg; size_t len = size; - struct scatterlist *sg = NULL; + size_t plen; /* use the existing memory in an allocated page */ if (ctx->merge) { + sgl = list_entry(ctx->tsgl_list.prev, + struct aead_tsgl, list); sg = sgl->sg + sgl->cur - 1; len = min_t(unsigned long, len, PAGE_SIZE - sg->offset - sg->length); @@ -264,57 +447,60 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } if (!aead_writable(sk)) { - /* user space sent too much data */ - aead_put_sgl(sk); - err = -EMSGSIZE; - goto unlock; + err = aead_wait_for_wmem(sk, msg->msg_flags); + if (err) + goto unlock; } /* allocate a new page */ len = min_t(unsigned long, size, aead_sndbuf(sk)); - while (len) { - size_t plen = 0; - if (sgl->cur >= ALG_MAX_PAGES) { - aead_put_sgl(sk); - err = -E2BIG; - goto unlock; - } + err = aead_alloc_tsgl(sk); + if (err) + goto unlock; + + sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, + list); + sg = sgl->sg; + if (sgl->cur) + sg_unmark_end(sg + sgl->cur - 1); + + do { + unsigned int i = sgl->cur; - sg = sgl->sg + sgl->cur; plen = min_t(size_t, len, PAGE_SIZE); - sg_assign_page(sg, alloc_page(GFP_KERNEL)); - err = -ENOMEM; - if (!sg_page(sg)) - goto unlock; - - err = memcpy_from_msg(page_address(sg_page(sg)), - msg, plen); - if (err) { - __free_page(sg_page(sg)); - sg_assign_page(sg, NULL); + sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); + if (!sg_page(sg + i)) { + err = -ENOMEM; goto unlock; } - sg->offset = 0; - sg->length = plen; + err = memcpy_from_msg(page_address(sg_page(sg + i)), + msg, plen); + if (err) { + __free_page(sg_page(sg + i)); + sg_assign_page(sg + i, NULL); + goto unlock; + } + + sg[i].length = plen; len -= plen; ctx->used += plen; copied += plen; - sgl->cur++; size -= plen; - ctx->merge = plen & (PAGE_SIZE - 1); - } + sgl->cur++; + } while (len && sgl->cur < MAX_SGL_ENTS); + + if (!size) + sg_mark_end(sg + sgl->cur - 1); + + ctx->merge = plen & (PAGE_SIZE - 1); } err = 0; ctx->more = msg->msg_flags & MSG_MORE; - if (!ctx->more && !aead_sufficient_data(ctx)) { - aead_put_sgl(sk); - err = -EMSGSIZE; - } unlock: aead_data_wakeup(sk); @@ -329,15 +515,12 @@ static ssize_t aead_sendpage(struct socket *sock, struct page *page, struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct aead_ctx *ctx = ask->private; - struct aead_sg_list *sgl = &ctx->tsgl; + struct aead_tsgl *sgl; int err = -EINVAL; if (flags & MSG_SENDPAGE_NOTLAST) flags |= MSG_MORE; - if (sgl->cur >= ALG_MAX_PAGES) - return -E2BIG; - lock_sock(sk); if (!ctx->more && ctx->used) goto unlock; @@ -346,13 +529,22 @@ static ssize_t aead_sendpage(struct socket *sock, struct page *page, goto done; if (!aead_writable(sk)) { - /* user space sent too much data */ - aead_put_sgl(sk); - err = -EMSGSIZE; - goto unlock; + err = aead_wait_for_wmem(sk, flags); + if (err) + goto unlock; } + err = aead_alloc_tsgl(sk); + if (err) + goto unlock; + ctx->merge = 0; + sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list); + + if (sgl->cur) + sg_unmark_end(sgl->sg + sgl->cur - 1); + + sg_mark_end(sgl->sg + sgl->cur); get_page(page); sg_set_page(sgl->sg + sgl->cur, page, size, offset); @@ -363,11 +555,6 @@ static ssize_t aead_sendpage(struct socket *sock, struct page *page, done: ctx->more = flags & MSG_MORE; - if (!ctx->more && !aead_sufficient_data(ctx)) { - aead_put_sgl(sk); - err = -EMSGSIZE; - } - unlock: aead_data_wakeup(sk); release_sock(sk); @@ -375,204 +562,52 @@ unlock: return err ?: size; } -#define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \ - ((char *)req + sizeof(struct aead_request) + \ - crypto_aead_reqsize(tfm)) - - #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \ - crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \ - sizeof(struct aead_request) - static void aead_async_cb(struct crypto_async_request *_req, int err) { - struct aead_request *req = _req->data; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aead_async_req *areq = GET_ASYM_REQ(req, tfm); + struct aead_async_req *areq = _req->data; struct sock *sk = areq->sk; - struct scatterlist *sg = areq->tsgl; - struct aead_async_rsgl *rsgl; struct kiocb *iocb = areq->iocb; - unsigned int i, reqlen = GET_REQ_SIZE(tfm); + unsigned int resultlen; - list_for_each_entry(rsgl, &areq->list, list) { - af_alg_free_sg(&rsgl->sgl); - if (rsgl != &areq->first_rsgl) - sock_kfree_s(sk, rsgl, sizeof(*rsgl)); - } + lock_sock(sk); - for (i = 0; i < areq->tsgls; i++) - put_page(sg_page(sg + i)); + /* Buffer size written by crypto operation. */ + resultlen = areq->outlen; - sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); - sock_kfree_s(sk, req, reqlen); + aead_free_areq_sgls(areq); + sock_kfree_s(sk, areq, areq->areqlen); __sock_put(sk); - iocb->ki_complete(iocb, err, err); -} -static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, - int flags) -{ - struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req); - struct aead_async_req *areq; - struct aead_request *req = NULL; - struct aead_sg_list *sgl = &ctx->tsgl; - struct aead_async_rsgl *last_rsgl = NULL, *rsgl; - unsigned int as = crypto_aead_authsize(tfm); - unsigned int i, reqlen = GET_REQ_SIZE(tfm); - int err = -ENOMEM; - unsigned long used; - size_t outlen = 0; - size_t usedpages = 0; + iocb->ki_complete(iocb, err ? err : resultlen, 0); - lock_sock(sk); - if (ctx->more) { - err = aead_wait_for_data(sk, flags); - if (err) - goto unlock; - } - - if (!aead_sufficient_data(ctx)) - goto unlock; - - used = ctx->used; - if (ctx->enc) - outlen = used + as; - else - outlen = used - as; - - req = sock_kmalloc(sk, reqlen, GFP_KERNEL); - if (unlikely(!req)) - goto unlock; - - areq = GET_ASYM_REQ(req, tfm); - memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl)); - INIT_LIST_HEAD(&areq->list); - areq->iocb = msg->msg_iocb; - areq->sk = sk; - memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm)); - aead_request_set_tfm(req, tfm); - aead_request_set_ad(req, ctx->aead_assoclen); - aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - aead_async_cb, req); - used -= ctx->aead_assoclen; - - /* take over all tx sgls from ctx */ - areq->tsgl = sock_kmalloc(sk, - sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1), - GFP_KERNEL); - if (unlikely(!areq->tsgl)) - goto free; - - sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1)); - for (i = 0; i < sgl->cur; i++) - sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]), - sgl->sg[i].length, sgl->sg[i].offset); - - areq->tsgls = sgl->cur; - - /* create rx sgls */ - while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { - size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), - (outlen - usedpages)); - - if (list_empty(&areq->list)) { - rsgl = &areq->first_rsgl; - - } else { - rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); - if (unlikely(!rsgl)) { - err = -ENOMEM; - goto free; - } - } - rsgl->sgl.npages = 0; - list_add_tail(&rsgl->list, &areq->list); - - /* make one iovec available as scatterlist */ - err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); - if (err < 0) - goto free; - - usedpages += err; - - /* chain the new scatterlist with previous one */ - if (last_rsgl) - af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); - - last_rsgl = rsgl; - - iov_iter_advance(&msg->msg_iter, err); - } - - /* ensure output buffer is sufficiently large */ - if (usedpages < outlen) { - err = -EINVAL; - goto unlock; - } - - aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used, - areq->iv); - err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); - if (err) { - if (err == -EINPROGRESS) { - sock_hold(sk); - err = -EIOCBQUEUED; - aead_reset_ctx(ctx); - goto unlock; - } else if (err == -EBADMSG) { - aead_put_sgl(sk); - } - goto free; - } - aead_put_sgl(sk); - -free: - list_for_each_entry(rsgl, &areq->list, list) { - af_alg_free_sg(&rsgl->sgl); - if (rsgl != &areq->first_rsgl) - sock_kfree_s(sk, rsgl, sizeof(*rsgl)); - } - if (areq->tsgl) - sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); - if (req) - sock_kfree_s(sk, req, reqlen); -unlock: - aead_wmem_wakeup(sk); release_sock(sk); - return err ? err : outlen; } -static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) +static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); struct aead_ctx *ctx = ask->private; - unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); - struct aead_sg_list *sgl = &ctx->tsgl; - struct aead_async_rsgl *last_rsgl = NULL; - struct aead_async_rsgl *rsgl, *tmp; - int err = -EINVAL; - unsigned long used = 0; - size_t outlen = 0; - size_t usedpages = 0; - - lock_sock(sk); + struct aead_tfm *aeadc = pask->private; + struct crypto_aead *tfm = aeadc->aead; + unsigned int as = crypto_aead_authsize(tfm); + unsigned int areqlen = + sizeof(struct aead_async_req) + crypto_aead_reqsize(tfm); + struct aead_async_req *areq; + struct aead_rsgl *last_rsgl = NULL; + int err = 0; + size_t used = 0; /* [in] TX bufs to be en/decrypted */ + size_t outlen = 0; /* [out] RX bufs produced by kernel */ + size_t usedpages = 0; /* [in] RX bufs to be used from user */ + size_t processed = 0; /* [in] TX bufs to be consumed */ /* - * Please see documentation of aead_request_set_crypt for the - * description of the AEAD memory structure expected from the caller. + * Data length provided by caller via sendmsg/sendpage that has not + * yet been processed. */ - - if (ctx->more) { - err = aead_wait_for_data(sk, flags); - if (err) - goto unlock; - } - - /* data length provided by caller via sendmsg/sendpage */ used = ctx->used; /* @@ -584,8 +619,8 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) * the error message in sendmsg/sendpage and still call recvmsg. This * check here protects the kernel integrity. */ - if (!aead_sufficient_data(ctx)) - goto unlock; + if (!aead_sufficient_data(sk)) + return -EINVAL; /* * Calculate the minimum output buffer size holding the result of the @@ -606,84 +641,170 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) */ used -= ctx->aead_assoclen; - /* convert iovecs of output buffers into scatterlists */ - while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { - size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), - (outlen - usedpages)); + /* Allocate cipher request for current operation. */ + areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); + if (unlikely(!areq)) + return -ENOMEM; + areq->areqlen = areqlen; + areq->sk = sk; + INIT_LIST_HEAD(&areq->rsgl_list); + areq->tsgl = NULL; + areq->tsgl_entries = 0; - if (list_empty(&ctx->list)) { - rsgl = &ctx->first_rsgl; + /* convert iovecs of output buffers into RX SGL */ + while (outlen > usedpages && msg_data_left(msg)) { + struct aead_rsgl *rsgl; + size_t seglen; + + /* limit the amount of readable buffers */ + if (!aead_readable(sk)) + break; + + if (!ctx->used) { + err = aead_wait_for_data(sk, flags); + if (err) + goto free; + } + + seglen = min_t(size_t, (outlen - usedpages), + msg_data_left(msg)); + + if (list_empty(&areq->rsgl_list)) { + rsgl = &areq->first_rsgl; } else { rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); if (unlikely(!rsgl)) { err = -ENOMEM; - goto unlock; + goto free; } } + rsgl->sgl.npages = 0; - list_add_tail(&rsgl->list, &ctx->list); + list_add_tail(&rsgl->list, &areq->rsgl_list); /* make one iovec available as scatterlist */ err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); if (err < 0) - goto unlock; - usedpages += err; + goto free; + /* chain the new scatterlist with previous one */ if (last_rsgl) af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); last_rsgl = rsgl; - + usedpages += err; + ctx->rcvused += err; + rsgl->sg_num_bytes = err; iov_iter_advance(&msg->msg_iter, err); } - /* ensure output buffer is sufficiently large */ + /* + * Ensure output buffer is sufficiently large. If the caller provides + * less buffer space, only use the relative required input size. This + * allows AIO operation where the caller sent all data to be processed + * and the AIO operation performs the operation on the different chunks + * of the input data. + */ if (usedpages < outlen) { - err = -EINVAL; - goto unlock; + size_t less = outlen - usedpages; + + if (used < less) { + err = -EINVAL; + goto free; + } + used -= less; + outlen -= less; } - sg_mark_end(sgl->sg + sgl->cur - 1); - aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg, - used, ctx->iv); - aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen); + /* + * Create a per request TX SGL for this request which tracks the + * SG entries from the global TX SGL. + */ + processed = used + ctx->aead_assoclen; + areq->tsgl_entries = aead_count_tsgl(sk, processed); + if (!areq->tsgl_entries) + areq->tsgl_entries = 1; + areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries, + GFP_KERNEL); + if (!areq->tsgl) { + err = -ENOMEM; + goto free; + } + sg_init_table(areq->tsgl, areq->tsgl_entries); + aead_pull_tsgl(sk, processed, areq->tsgl); - err = af_alg_wait_for_completion(ctx->enc ? - crypto_aead_encrypt(&ctx->aead_req) : - crypto_aead_decrypt(&ctx->aead_req), + /* Initialize the crypto operation */ + aead_request_set_crypt(&areq->aead_req, areq->tsgl, + areq->first_rsgl.sgl.sg, used, ctx->iv); + aead_request_set_ad(&areq->aead_req, ctx->aead_assoclen); + aead_request_set_tfm(&areq->aead_req, tfm); + + if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { + /* AIO operation */ + areq->iocb = msg->msg_iocb; + aead_request_set_callback(&areq->aead_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + aead_async_cb, areq); + err = ctx->enc ? crypto_aead_encrypt(&areq->aead_req) : + crypto_aead_decrypt(&areq->aead_req); + } else { + /* Synchronous operation */ + aead_request_set_callback(&areq->aead_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_complete, &ctx->completion); + err = af_alg_wait_for_completion(ctx->enc ? + crypto_aead_encrypt(&areq->aead_req) : + crypto_aead_decrypt(&areq->aead_req), &ctx->completion); - - if (err) { - /* EBADMSG implies a valid cipher operation took place */ - if (err == -EBADMSG) - aead_put_sgl(sk); - - goto unlock; } - aead_put_sgl(sk); - err = 0; + /* AIO operation in progress */ + if (err == -EINPROGRESS) { + sock_hold(sk); -unlock: - list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { - af_alg_free_sg(&rsgl->sgl); - list_del(&rsgl->list); - if (rsgl != &ctx->first_rsgl) - sock_kfree_s(sk, rsgl, sizeof(*rsgl)); + /* Remember output size that will be generated. */ + areq->outlen = outlen; + + return -EIOCBQUEUED; } - INIT_LIST_HEAD(&ctx->list); - aead_wmem_wakeup(sk); - release_sock(sk); + +free: + aead_free_areq_sgls(areq); + if (areq) + sock_kfree_s(sk, areq, areqlen); return err ? err : outlen; } -static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, - int flags) +static int aead_recvmsg(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) { - return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? - aead_recvmsg_async(sock, msg, flags) : - aead_recvmsg_sync(sock, msg, flags); + struct sock *sk = sock->sk; + int ret = 0; + + lock_sock(sk); + while (msg_data_left(msg)) { + int err = _aead_recvmsg(sock, msg, ignored, flags); + + /* + * This error covers -EIOCBQUEUED which implies that we can + * only handle one AIO request. If the caller wants to have + * multiple AIO requests in parallel, he must make multiple + * separate AIO calls. + */ + if (err <= 0) { + if (err == -EIOCBQUEUED || err == -EBADMSG) + ret = err; + goto out; + } + + ret += err; + } + +out: + aead_wmem_wakeup(sk); + release_sock(sk); + return ret; } static unsigned int aead_poll(struct file *file, struct socket *sock, @@ -874,11 +995,13 @@ static void aead_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct aead_ctx *ctx = ask->private; - unsigned int ivlen = crypto_aead_ivsize( - crypto_aead_reqtfm(&ctx->aead_req)); + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); + struct aead_tfm *aeadc = pask->private; + struct crypto_aead *tfm = aeadc->aead; + unsigned int ivlen = crypto_aead_ivsize(tfm); - WARN_ON(refcount_read(&sk->sk_refcnt) != 0); - aead_put_sgl(sk); + aead_pull_tsgl(sk, ctx->used, NULL); sock_kzfree_s(sk, ctx->iv, ivlen); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); @@ -890,7 +1013,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) struct alg_sock *ask = alg_sk(sk); struct aead_tfm *tfm = private; struct crypto_aead *aead = tfm->aead; - unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead); + unsigned int len = sizeof(*ctx); unsigned int ivlen = crypto_aead_ivsize(aead); ctx = sock_kmalloc(sk, len, GFP_KERNEL); @@ -905,23 +1028,18 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) } memset(ctx->iv, 0, ivlen); + INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; ctx->used = 0; + ctx->rcvused = 0; ctx->more = 0; ctx->merge = 0; ctx->enc = 0; - ctx->tsgl.cur = 0; ctx->aead_assoclen = 0; af_alg_init_completion(&ctx->completion); - sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES); - INIT_LIST_HEAD(&ctx->list); ask->private = ctx; - aead_request_set_tfm(&ctx->aead_req, aead); - aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); - sk->sk_destruct = aead_sock_destruct; return 0;