1
0
Fork 0

crypto: stm32/crc32 - protect from concurrent accesses

Protect STM32 CRC device from concurrent accesses.

As we create a spinlocked section that increase with buffer size,
we provide a module parameter to release the pressure by splitting
critical section in chunks.

Size of each chunk is defined in burst_size module parameter.
By default burst_size=0, i.e. don't split incoming buffer.

Signed-off-by: Nicolas Toromanoff <nicolas.toromanoff@st.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
alistair/sunxi64-5.8
Nicolas Toromanoff 2020-05-12 16:11:13 +02:00 committed by Herbert Xu
parent 100f84beee
commit 7795c0baf5
1 changed files with 45 additions and 2 deletions

View File

@ -35,11 +35,16 @@
#define CRC_AUTOSUSPEND_DELAY 50
static unsigned int burst_size;
module_param(burst_size, uint, 0644);
MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)");
struct stm32_crc {
struct list_head list;
struct device *dev;
void __iomem *regs;
struct clk *clk;
spinlock_t lock;
};
struct stm32_crc_list {
@ -109,6 +114,7 @@ static int stm32_crc_init(struct shash_desc *desc)
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
unsigned long flags;
crc = stm32_crc_get_next_crc();
if (!crc)
@ -116,6 +122,8 @@ static int stm32_crc_init(struct shash_desc *desc)
pm_runtime_get_sync(crc->dev);
spin_lock_irqsave(&crc->lock, flags);
/* Reset, set key, poly and configure in bit reverse mode */
writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
@ -125,18 +133,21 @@ static int stm32_crc_init(struct shash_desc *desc)
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
spin_unlock_irqrestore(&crc->lock, flags);
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
return 0;
}
static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
unsigned int length)
static int burst_update(struct shash_desc *desc, const u8 *d8,
size_t length)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
unsigned long flags;
crc = stm32_crc_get_next_crc();
if (!crc)
@ -144,6 +155,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
pm_runtime_get_sync(crc->dev);
spin_lock_irqsave(&crc->lock, flags);
/*
* Restore previously calculated CRC for this context as init value
* Restore polynomial configuration
@ -182,12 +195,40 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
spin_unlock_irqrestore(&crc->lock, flags);
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
return 0;
}
static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
unsigned int length)
{
const unsigned int burst_sz = burst_size;
unsigned int rem_sz;
const u8 *cur;
size_t size;
int ret;
if (!burst_sz)
return burst_update(desc, d8, length);
/* Digest first bytes not 32bit aligned at first pass in the loop */
size = min(length,
burst_sz + (unsigned int)d8 - ALIGN_DOWN((unsigned int)d8,
sizeof(u32)));
for (rem_sz = length, cur = d8; rem_sz;
rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
ret = burst_update(desc, cur, size);
if (ret)
return ret;
}
return 0;
}
static int stm32_crc_final(struct shash_desc *desc, u8 *out)
{
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
@ -300,6 +341,8 @@ static int stm32_crc_probe(struct platform_device *pdev)
pm_runtime_irq_safe(dev);
pm_runtime_enable(dev);
spin_lock_init(&crc->lock);
platform_set_drvdata(pdev, crc);
spin_lock(&crc_list.lock);