alistair23-linux/include/crypto/cbc.h
Thomas Gleixner 2874c5fd28 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152
Based on 1 normalized pattern(s):

  this program is free software you can redistribute it and or modify
  it under the terms of the gnu general public license as published by
  the free software foundation either version 2 of the license or at
  your option any later version

extracted by the scancode license scanner the SPDX license identifier

  GPL-2.0-or-later

has been chosen to replace the boilerplate/reference in 3029 file(s).

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Allison Randal <allison@lohutok.net>
Cc: linux-spdx@vger.kernel.org
Link: https://lkml.kernel.org/r/20190527070032.746973796@linutronix.de
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-05-30 11:26:32 -07:00

142 lines
3.3 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* CBC: Cipher Block Chaining mode
*
* Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
*/
#ifndef _CRYPTO_CBC_H
#define _CRYPTO_CBC_H
#include <crypto/internal/skcipher.h>
#include <linux/string.h>
#include <linux/types.h>
static inline int crypto_cbc_encrypt_segment(
struct skcipher_walk *walk, struct crypto_skcipher *tfm,
void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
unsigned int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
do {
crypto_xor(iv, src, bsize);
fn(tfm, iv, dst);
memcpy(iv, dst, bsize);
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static inline int crypto_cbc_encrypt_inplace(
struct skcipher_walk *walk, struct crypto_skcipher *tfm,
void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
unsigned int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
do {
crypto_xor(src, iv, bsize);
fn(tfm, src, src);
iv = src;
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req,
void (*fn)(struct crypto_skcipher *,
const u8 *, u8 *))
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
if (walk.src.virt.addr == walk.dst.virt.addr)
err = crypto_cbc_encrypt_inplace(&walk, tfm, fn);
else
err = crypto_cbc_encrypt_segment(&walk, tfm, fn);
err = skcipher_walk_done(&walk, err);
}
return err;
}
static inline int crypto_cbc_decrypt_segment(
struct skcipher_walk *walk, struct crypto_skcipher *tfm,
void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
unsigned int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
do {
fn(tfm, src, dst);
crypto_xor(dst, iv, bsize);
iv = src;
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static inline int crypto_cbc_decrypt_inplace(
struct skcipher_walk *walk, struct crypto_skcipher *tfm,
void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
unsigned int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 last_iv[MAX_CIPHER_BLOCKSIZE];
/* Start of the last block. */
src += nbytes - (nbytes & (bsize - 1)) - bsize;
memcpy(last_iv, src, bsize);
for (;;) {
fn(tfm, src, src);
if ((nbytes -= bsize) < bsize)
break;
crypto_xor(src, src - bsize, bsize);
src -= bsize;
}
crypto_xor(src, walk->iv, bsize);
memcpy(walk->iv, last_iv, bsize);
return nbytes;
}
static inline int crypto_cbc_decrypt_blocks(
struct skcipher_walk *walk, struct crypto_skcipher *tfm,
void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
{
if (walk->src.virt.addr == walk->dst.virt.addr)
return crypto_cbc_decrypt_inplace(walk, tfm, fn);
else
return crypto_cbc_decrypt_segment(walk, tfm, fn);
}
#endif /* _CRYPTO_CBC_H */