From 4c6ab3ee4cdb86cbd4e9400dd22fad7701cbe795 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 21 Sep 2009 23:21:53 -0700 Subject: [PATCH 1/2] crypto: padlock-sha - Fix stack alignment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The PadLock hardware requires the output buffer for SHA to be 128-bit aligned. We currentply place the buffer on the stack, and ask gcc to align it to 128 bits. That doesn't work on i386 because the kernel stack is only aligned to 32 bits. This patch changes the code to align the buffer by hand so that the hardware doesn't fault on unaligned buffers. Reported-by: Séguier Régis Tested-by: Séguier Régis Signed-off-by: Herbert Xu --- drivers/crypto/padlock-sha.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 76cb6b345e7b..0af80577dc7b 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -24,6 +24,12 @@ #include #include "padlock.h" +#ifdef CONFIG_64BIT +#define STACK_ALIGN 16 +#else +#define STACK_ALIGN 4 +#endif + struct padlock_sha_desc { struct shash_desc fallback; }; @@ -64,7 +70,9 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, /* We can't store directly to *out as it may be unaligned. */ /* BTW Don't reduce the buffer size below 128 Bytes! * PadLock microcode needs it that big. */ - char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); + char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); struct padlock_sha_desc *dctx = shash_desc_ctx(desc); struct sha1_state state; unsigned int space; @@ -128,7 +136,9 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, /* We can't store directly to *out as it may be unaligned. */ /* BTW Don't reduce the buffer size below 128 Bytes! * PadLock microcode needs it that big. */ - char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); + char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); struct padlock_sha_desc *dctx = shash_desc_ctx(desc); struct sha256_state state; unsigned int space; From 13b79b971564ddd0f14e706592472adc8199e912 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Tue, 20 Oct 2009 16:20:47 +0900 Subject: [PATCH 2/2] crypto: aesni-intel - Fix irq_fpu_usable usage When renaming kernel_fpu_using to irq_fpu_usable, the semantics of the function is changed too, from mesuring whether kernel is using FPU, that is, the FPU is NOT available, to measuring whether FPU is usable, that is, the FPU is available. But the usage of irq_fpu_usable in aesni-intel_glue.c is not changed accordingly. This patch fixes this. Signed-off-by: Huang Ying Signed-off-by: Herbert Xu --- arch/x86/crypto/aesni-intel_glue.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 585edebe12cf..49c552c060e9 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -82,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, return -EINVAL; } - if (irq_fpu_usable()) + if (!irq_fpu_usable()) err = crypto_aes_expand_key(ctx, in_key, key_len); else { kernel_fpu_begin(); @@ -103,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); - if (irq_fpu_usable()) + if (!irq_fpu_usable()) crypto_aes_encrypt_x86(ctx, dst, src); else { kernel_fpu_begin(); @@ -116,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); - if (irq_fpu_usable()) + if (!irq_fpu_usable()) crypto_aes_decrypt_x86(ctx, dst, src); else { kernel_fpu_begin(); @@ -342,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req) struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); - if (irq_fpu_usable()) { + if (!irq_fpu_usable()) { struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); @@ -363,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req) struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); - if (irq_fpu_usable()) { + if (!irq_fpu_usable()) { struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req));