1
0
Fork 0

crypto: stm32/crc32 - fix ext4 chksum BUG_ON()

[ Upstream commit 49c2c082e0 ]

Allow use of crc_update without prior call to crc_init.
And change (and fix) driver to use CRC device even on unaligned buffers.

Fixes: b51dbe9091 ("crypto: stm32 - Support for STM32 CRC32 crypto module")

Signed-off-by: Nicolas Toromanoff <nicolas.toromanoff@st.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Sasha Levin <sashal@kernel.org>
5.4-rM2-2.2.x-imx-squashed
Nicolas Toromanoff 2020-05-12 16:11:09 +02:00 committed by Greg Kroah-Hartman
parent c79c643fca
commit 8fe61a2027
1 changed files with 48 additions and 50 deletions

View File

@ -28,8 +28,10 @@
/* Registers values */
#define CRC_CR_RESET BIT(0)
#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
#define CRC_INIT_DEFAULT 0xFFFFFFFF
#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5))
#define CRC_CR_REV_IN_BYTE BIT(5)
#define CRC_CR_REV_OUT BIT(7)
#define CRC_AUTOSUSPEND_DELAY 50
@ -38,8 +40,6 @@ struct stm32_crc {
struct device *dev;
void __iomem *regs;
struct clk *clk;
u8 pending_data[sizeof(u32)];
size_t nb_pending_bytes;
};
struct stm32_crc_list {
@ -59,7 +59,6 @@ struct stm32_crc_ctx {
struct stm32_crc_desc_ctx {
u32 partial; /* crc32c: partial in first 4 bytes of that struct */
struct stm32_crc *crc;
};
static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
@ -101,25 +100,22 @@ static int stm32_crc_init(struct shash_desc *desc)
struct stm32_crc *crc;
spin_lock_bh(&crc_list.lock);
list_for_each_entry(crc, &crc_list.dev_list, list) {
ctx->crc = crc;
break;
}
crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
spin_unlock_bh(&crc_list.lock);
pm_runtime_get_sync(ctx->crc->dev);
pm_runtime_get_sync(crc->dev);
/* Reset, set key, poly and configure in bit reverse mode */
writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
/* Store partial result */
ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
ctx->crc->nb_pending_bytes = 0;
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
pm_runtime_mark_last_busy(ctx->crc->dev);
pm_runtime_put_autosuspend(ctx->crc->dev);
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
return 0;
}
@ -128,31 +124,49 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
unsigned int length)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc *crc = ctx->crc;
u32 *d32;
unsigned int i;
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
spin_lock_bh(&crc_list.lock);
crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
spin_unlock_bh(&crc_list.lock);
pm_runtime_get_sync(crc->dev);
if (unlikely(crc->nb_pending_bytes)) {
while (crc->nb_pending_bytes != sizeof(u32) && length) {
/* Fill in pending data */
crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
/*
* Restore previously calculated CRC for this context as init value
* Restore polynomial configuration
* Configure in register for word input data,
* Configure out register in reversed bit mode data.
*/
writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
/* Configure for byte data */
writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
writeb_relaxed(*d8++, crc->regs + CRC_DR);
length--;
}
if (crc->nb_pending_bytes == sizeof(u32)) {
/* Process completed pending data */
writel_relaxed(*(u32 *)crc->pending_data,
crc->regs + CRC_DR);
crc->nb_pending_bytes = 0;
}
/* Configure for word data */
writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
}
d32 = (u32 *)d8;
for (i = 0; i < length >> 2; i++)
/* Process 32 bits data */
writel_relaxed(*(d32++), crc->regs + CRC_DR);
for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
if (length) {
/* Configure for byte data */
writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
crc->regs + CRC_CR);
while (length--)
writeb_relaxed(*d8++, crc->regs + CRC_DR);
}
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
@ -160,22 +174,6 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
/* Check for pending data (non 32 bits) */
length &= 3;
if (likely(!length))
return 0;
if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
/* Shall not happen */
dev_err(crc->dev, "Pending data overflow\n");
return -EINVAL;
}
d8 = (const u8 *)d32;
for (i = 0; i < length; i++)
/* Store pending data */
crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
return 0;
}