1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Merge the crypto tree for 4.1 to pull in the changeset that disables
algif_aead.
hifive-unleashed-5.1
Herbert Xu 2015-05-28 11:16:41 +08:00
commit 6d7e3d8995
13 changed files with 77 additions and 42 deletions

View File

@ -147,13 +147,21 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
put_unaligned_le32(ctx->crc, out);
return 0;
}
static int chksumc_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
put_unaligned_le32(~ctx->crc, out);
return 0;
}
static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
{
put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out);
put_unaligned_le32(crc32_arm64_le_hw(crc, data, len), out);
return 0;
}
@ -199,6 +207,14 @@ static int crc32_cra_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = 0;
return 0;
}
static int crc32c_cra_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = ~0;
return 0;
}
@ -229,7 +245,7 @@ static struct shash_alg crc32c_alg = {
.setkey = chksum_setkey,
.init = chksum_init,
.update = chksumc_update,
.final = chksum_final,
.final = chksumc_final,
.finup = chksumc_finup,
.digest = chksumc_digest,
.descsize = sizeof(struct chksum_desc_ctx),
@ -241,7 +257,7 @@ static struct shash_alg crc32c_alg = {
.cra_alignmask = 0,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_init = crc32_cra_init,
.cra_init = crc32c_cra_init,
}
};

View File

@ -74,6 +74,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha1_ce_final(struct shash_desc *desc, u8 *out)
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
sctx->finalize = 0;
kernel_neon_begin_partial(16);
sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
kernel_neon_end();

View File

@ -75,6 +75,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha256_ce_final(struct shash_desc *desc, u8 *out)
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
sctx->finalize = 0;
kernel_neon_begin_partial(28);
sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
kernel_neon_end();

View File

@ -16,11 +16,12 @@
#define GHASH_DIGEST_SIZE 16
struct ghash_ctx {
u8 icv[16];
u8 key[16];
u8 key[GHASH_BLOCK_SIZE];
};
struct ghash_desc_ctx {
u8 icv[GHASH_BLOCK_SIZE];
u8 key[GHASH_BLOCK_SIZE];
u8 buffer[GHASH_BLOCK_SIZE];
u32 bytes;
};
@ -28,8 +29,10 @@ struct ghash_desc_ctx {
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
memset(dctx, 0, sizeof(*dctx));
memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
return 0;
}
@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
}
memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
return 0;
}
@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
unsigned int n;
u8 *buf = dctx->buffer;
int ret;
@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
src += n;
if (!dctx->bytes) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
if (ret != n)
return -EIO;
src += n;
@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
return 0;
}
static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
static int ghash_flush(struct ghash_desc_ctx *dctx)
{
u8 *buf = dctx->buffer;
int ret;
@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
memset(pos, 0, dctx->bytes);
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
dctx->bytes = 0;
}
dctx->bytes = 0;
return 0;
}
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
int ret;
ret = ghash_flush(ctx, dctx);
ret = ghash_flush(dctx);
if (!ret)
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
return ret;
}

View File

@ -79,7 +79,7 @@ NUM_BLKS = %rdx
c = %rcx
d = %r8
e = %rdx
y3 = %rdi
y3 = %rsi
TBL = %rbp

View File

@ -1532,15 +1532,6 @@ config CRYPTO_USER_API_RNG
This option enables the user-spaces interface for random
number generator algorithms.
config CRYPTO_USER_API_AEAD
tristate "User-space interface for AEAD cipher algorithms"
depends on NET
select CRYPTO_AEAD
select CRYPTO_USER_API
help
This option enables the user-spaces interface for AEAD
cipher algorithms.
config CRYPTO_HASH_INFO
bool

View File

@ -34,7 +34,7 @@ struct aead_ctx {
/*
* RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
* can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
* bytes
* pages
*/
#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
@ -436,11 +436,10 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
if (err < 0)
goto unlock;
usedpages += err;
/* chain the new scatterlist with initial list */
/* chain the new scatterlist with previous one */
if (cnt)
scatterwalk_crypto_chain(ctx->rsgl[0].sg,
ctx->rsgl[cnt].sg, 1,
sg_nents(ctx->rsgl[cnt-1].sg));
af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
/* we do not need more iovecs as we have sufficient memory */
if (outlen <= usedpages)
break;

View File

@ -57,7 +57,7 @@ static void bcm63xx_rng_cleanup(struct hwrng *rng)
val &= ~RNG_EN;
__raw_writel(val, priv->regs + RNG_CTRL);
clk_didsable_unprepare(prov->clk);
clk_disable_unprepare(priv->clk);
}
static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
@ -97,14 +97,14 @@ static int bcm63xx_rng_probe(struct platform_device *pdev)
priv->rng.name = pdev->name;
priv->rng.init = bcm63xx_rng_init;
priv->rng.cleanup = bcm63xx_rng_cleanup;
prov->rng.data_present = bcm63xx_rng_data_present;
priv->rng.data_present = bcm63xx_rng_data_present;
priv->rng.data_read = bcm63xx_rng_data_read;
priv->clk = devm_clk_get(&pdev->dev, "ipsec");
if (IS_ERR(priv->clk)) {
error = PTR_ERR(priv->clk);
dev_err(&pdev->dev, "no clock for device: %d\n", error);
return error;
ret = PTR_ERR(priv->clk);
dev_err(&pdev->dev, "no clock for device: %d\n", ret);
return ret;
}
if (!devm_request_mem_region(&pdev->dev, r->start,
@ -120,11 +120,11 @@ static int bcm63xx_rng_probe(struct platform_device *pdev)
return -ENOMEM;
}
error = devm_hwrng_register(&pdev->dev, &priv->rng);
if (error) {
ret = devm_hwrng_register(&pdev->dev, &priv->rng);
if (ret) {
dev_err(&pdev->dev, "failed to register rng device: %d\n",
error);
return error;
ret);
return ret;
}
dev_info(&pdev->dev, "registered RNG driver\n");

View File

@ -466,8 +466,9 @@ config CRYPTO_DEV_VMX
source "drivers/crypto/vmx/Kconfig"
config CRYPTO_DEV_IMGTEC_HASH
depends on MIPS || COMPILE_TEST
tristate "Imagination Technologies hardware hash accelerator"
depends on MIPS || COMPILE_TEST
depends on HAS_DMA
select CRYPTO_ALGAPI
select CRYPTO_MD5
select CRYPTO_SHA1

View File

@ -9,10 +9,24 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
/*
* This version is i.e. to prevent dead stores elimination on @ptr
* where gcc and llvm may behave differently when otherwise using
* normal barrier(): while gcc behavior gets along with a normal
* barrier(), llvm needs an explicit input variable to be assumed
* clobbered. The issue is as follows: while the inline asm might
* access any memory it wants, the compiler could have fit all of
* @ptr into memory registers instead, and since @ptr never escaped
* from that, it proofed that the inline asm wasn't touching any of
* it. This version works well with both compilers, i.e. we're telling
* the compiler that the inline asm absolutely may see the contents
* of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
*/
#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
/*
* This macro obfuscates arithmetic on a variable address so that gcc

View File

@ -13,9 +13,12 @@
/* Intel ECC compiler doesn't support gcc specific asm stmts.
* It uses intrinsics to do the equivalent things.
*/
#undef barrier_data
#undef RELOC_HIDE
#undef OPTIMIZER_HIDE_VAR
#define barrier_data(ptr) barrier()
#define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
__ptr = (unsigned long) (ptr); \

View File

@ -169,6 +169,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define barrier() __memory_barrier()
#endif
#ifndef barrier_data
# define barrier_data(ptr) barrier()
#endif
/* Unreachable code */
#ifndef unreachable
# define unreachable() do { } while (1)

View File

@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset);
void memzero_explicit(void *s, size_t count)
{
memset(s, 0, count);
barrier();
barrier_data(s);
}
EXPORT_SYMBOL(memzero_explicit);