1
0
Fork 0

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 "This fixes a potential weakness in IPsec CBC IV generation, as well as
  a number of issues that arose out of an OOM crash on ARM with CTR-mode
  AES"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: arm64/aes-ctr - fix NULL dereference in tail processing
  crypto: arm/aes-ctr - fix NULL dereference in tail processing
  crypto: skcipher - Fix blkcipher walk OOM crash
  crypto: echainiv - Replace chaining with multiplication
hifive-unleashed-5.1
Linus Torvalds 2016-09-19 12:58:34 -07:00
commit 7bb91e0673
4 changed files with 29 additions and 95 deletions

View File

@ -284,7 +284,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % AES_BLOCK_SIZE);
}
if (nbytes) {
if (walk.nbytes % AES_BLOCK_SIZE) {
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
u8 __aligned(8) tail[AES_BLOCK_SIZE];

View File

@ -216,7 +216,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % AES_BLOCK_SIZE);
}
if (nbytes) {
if (walk.nbytes % AES_BLOCK_SIZE) {
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
u8 __aligned(8) tail[AES_BLOCK_SIZE];

View File

@ -233,6 +233,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
return blkcipher_walk_done(desc, walk, -EINVAL);
}
bsize = min(walk->walk_blocksize, n);
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
BLKCIPHER_WALK_DIFF);
if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
@ -245,7 +247,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
}
}
bsize = min(walk->walk_blocksize, n);
n = scatterwalk_clamp(&walk->in, n);
n = scatterwalk_clamp(&walk->out, n);

View File

@ -1,8 +1,8 @@
/*
* echainiv: Encrypted Chain IV Generator
*
* This generator generates an IV based on a sequence number by xoring it
* with a salt and then encrypting it with the same key as used to encrypt
* This generator generates an IV based on a sequence number by multiplying
* it with a salt and then encrypting it with the same key as used to encrypt
* the plain text. This algorithm requires that the block size be equal
* to the IV size. It is mainly useful for CBC.
*
@ -24,81 +24,17 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/string.h>
#define MAX_IV_SIZE 16
static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
/* We don't care if we get preempted and read/write IVs from the next CPU. */
static void echainiv_read_iv(u8 *dst, unsigned size)
{
u32 *a = (u32 *)dst;
u32 __percpu *b = echainiv_iv;
for (; size >= 4; size -= 4) {
*a++ = this_cpu_read(*b);
b++;
}
}
static void echainiv_write_iv(const u8 *src, unsigned size)
{
const u32 *a = (const u32 *)src;
u32 __percpu *b = echainiv_iv;
for (; size >= 4; size -= 4) {
this_cpu_write(*b, *a);
a++;
b++;
}
}
static void echainiv_encrypt_complete2(struct aead_request *req, int err)
{
struct aead_request *subreq = aead_request_ctx(req);
struct crypto_aead *geniv;
unsigned int ivsize;
if (err == -EINPROGRESS)
return;
if (err)
goto out;
geniv = crypto_aead_reqtfm(req);
ivsize = crypto_aead_ivsize(geniv);
echainiv_write_iv(subreq->iv, ivsize);
if (req->iv != subreq->iv)
memcpy(req->iv, subreq->iv, ivsize);
out:
if (req->iv != subreq->iv)
kzfree(subreq->iv);
}
static void echainiv_encrypt_complete(struct crypto_async_request *base,
int err)
{
struct aead_request *req = base->data;
echainiv_encrypt_complete2(req, err);
aead_request_complete(req, err);
}
static int echainiv_encrypt(struct aead_request *req)
{
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
struct aead_request *subreq = aead_request_ctx(req);
crypto_completion_t compl;
void *data;
__be64 nseqno;
u64 seqno;
u8 *info;
unsigned int ivsize = crypto_aead_ivsize(geniv);
int err;
@ -108,8 +44,6 @@ static int echainiv_encrypt(struct aead_request *req)
aead_request_set_tfm(subreq, ctx->child);
compl = echainiv_encrypt_complete;
data = req;
info = req->iv;
if (req->src != req->dst) {
@ -127,29 +61,30 @@ static int echainiv_encrypt(struct aead_request *req)
return err;
}
if (unlikely(!IS_ALIGNED((unsigned long)info,
crypto_aead_alignmask(geniv) + 1))) {
info = kmalloc(ivsize, req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
GFP_ATOMIC);
if (!info)
return -ENOMEM;
memcpy(info, req->iv, ivsize);
}
aead_request_set_callback(subreq, req->base.flags, compl, data);
aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->dst, req->dst,
req->cryptlen, info);
aead_request_set_ad(subreq, req->assoclen);
crypto_xor(info, ctx->salt, ivsize);
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
echainiv_read_iv(info, ivsize);
memcpy(&nseqno, info + ivsize - 8, 8);
seqno = be64_to_cpu(nseqno);
memset(info, 0, ivsize);
err = crypto_aead_encrypt(subreq);
echainiv_encrypt_complete2(req, err);
return err;
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
do {
u64 a;
memcpy(&a, ctx->salt + ivsize - 8, 8);
a |= 1;
a *= seqno;
memcpy(info + ivsize - 8, &a, 8);
} while ((ivsize -= 8));
return crypto_aead_encrypt(subreq);
}
static int echainiv_decrypt(struct aead_request *req)
@ -196,8 +131,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
alg = crypto_spawn_aead_alg(spawn);
err = -EINVAL;
if (inst->alg.ivsize & (sizeof(u32) - 1) ||
inst->alg.ivsize > MAX_IV_SIZE)
if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
goto free_inst;
inst->alg.encrypt = echainiv_encrypt;
@ -206,7 +140,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
inst->alg.init = aead_init_geniv;
inst->alg.exit = aead_exit_geniv;
inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
inst->alg.base.cra_ctxsize += inst->alg.ivsize;