1
0
Fork 0

crypto: caam/qi - add support for TLS 1.0 record

TLS 1.0 descriptors run on SEC 4.x or higher.
For now, only tls10(hmac(sha1),cbc(aes)) algorithm
is registered by the driver.

Known limitations:
 - when src == dst - there should be no element in the src scatterlist array
   that contains both associated data and message data.
 - when src != dst - associated data is not copied from source into
   destination.
 - for decryption when src != dst the size of the destination should be
   large enough so that the buffer may contain the decrypted authenc and
   padded data.

Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
5.4-rM2-2.2.x-imx-squashed
Radu Alexe 2017-05-25 15:51:50 +03:00 committed by Dong Aisheng
parent f20e83fe09
commit ba17612d5e
4 changed files with 932 additions and 0 deletions

View File

@ -621,6 +621,420 @@ copy_iv:
}
EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
/**
* cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
* with OP_ALG_AAI_CBC
* @adata: pointer to authentication transform definitions.
* A split key is required for SEC Era < 6; the size of the split key
* is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
* ANDed with OP_ALG_AAI_HMAC_PRECOMP.
* @assoclen: associated data length
* @ivsize: initialization vector size
* @authsize: authentication data size
* @blocksize: block cipher size
* @era: SEC Era
*/
void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int assoclen,
unsigned int ivsize, unsigned int authsize,
unsigned int blocksize, int era)
{
u32 *key_jump_cmd, *zero_payload_jump_cmd;
u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
/*
* Compute the index (in bytes) for the LOAD with destination of
* Class 1 Data Size Register and for the LOAD that generates padding
*/
if (adata->key_inline) {
idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
cdata->keylen - 4 * CAAM_CMD_SZ;
idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
cdata->keylen - 2 * CAAM_CMD_SZ;
} else {
idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
4 * CAAM_CMD_SZ;
idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2 * CAAM_CMD_SZ;
}
stidx = 1 << HDR_START_IDX_SHIFT;
init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
/* skip key loading if they are loaded due to sharing */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
if (era < 6) {
if (adata->key_inline)
append_key_as_imm(desc, adata->key_virt,
adata->keylen_pad, adata->keylen,
CLASS_2 | KEY_DEST_MDHA_SPLIT |
KEY_ENC);
else
append_key(desc, adata->key_dma, adata->keylen,
CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
} else {
append_proto_dkp(desc, adata);
}
if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
else
append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
KEY_DEST_CLASS_REG);
set_jump_tgt_here(desc, key_jump_cmd);
/* class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_ENCRYPT);
/* class 1 operation */
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_ENCRYPT);
/* payloadlen = input data length - (assoclen + ivlen) */
append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
/* math1 = payloadlen + icvlen */
append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
/* padlen = block_size - math1 % block_size */
append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
/* cryptlen = payloadlen + icvlen + padlen */
append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
/*
* update immediate data with the padding length value
* for the LOAD in the class 1 data size register.
*/
append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
(idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
(idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
/* overwrite PL field for the padding iNFO FIFO entry */
append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
(idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
(idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
/* store encrypted payload, icv and padding */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
/* if payload length is zero, jump to zero-payload commands */
append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
JUMP_COND_MATH_Z);
/* load iv in context1 */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
LDST_CLASS_1_CCB | ivsize);
/* read assoc for authentication */
append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_MSG);
/* insnoop payload */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
/* jump the zero-payload commands */
append_jump(desc, JUMP_TEST_ALL | 3);
/* zero-payload commands */
set_jump_tgt_here(desc, zero_payload_jump_cmd);
/* load iv in context1 */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
LDST_CLASS_1_CCB | ivsize);
/* assoc data is the only data for authentication */
append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
/* send icv to encryption */
append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
authsize);
/* update class 1 data size register with padding length */
append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
/* generate padding and send it to encryption */
genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
}
EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
/**
* cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
* with OP_ALG_AAI_CBC
* @adata: pointer to authentication transform definitions.
* A split key is required for SEC Era < 6; the size of the split key
* is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
* ANDed with OP_ALG_AAI_HMAC_PRECOMP.
* @assoclen: associated data length
* @ivsize: initialization vector size
* @authsize: authentication data size
* @blocksize: block cipher size
* @era: SEC Era
*/
void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int assoclen,
unsigned int ivsize, unsigned int authsize,
unsigned int blocksize, int era)
{
u32 stidx, jumpback;
u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
/*
* Pointer Size bool determines the size of address pointers.
* false - Pointers fit in one 32-bit word.
* true - Pointers fit in two 32-bit words.
*/
bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
stidx = 1 << HDR_START_IDX_SHIFT;
init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
/* skip key loading if they are loaded due to sharing */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
if (era < 6)
append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC);
else
append_proto_dkp(desc, adata);
append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
KEY_DEST_CLASS_REG);
set_jump_tgt_here(desc, key_jump_cmd);
/* class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT | OP_ALG_ICV_ON);
/* class 1 operation */
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT);
/* VSIL = input data length - 2 * block_size */
append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
blocksize);
/*
* payloadlen + icvlen + padlen = input data length - (assoclen +
* ivsize)
*/
append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
/* skip data to the last but one cipher block */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
/* load iv for the last cipher block */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
LDST_CLASS_1_CCB | ivsize);
/* read last cipher block */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
FIFOLD_TYPE_LAST1 | blocksize);
/* move decrypted block into math0 and math1 */
append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
blocksize);
/* reset AES CHA */
append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
/* rewind input sequence */
append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
/* key1 is in decryption form */
append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
/* load iv in context1 */
append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
LDST_SRCDST_WORD_CLASS_CTX | ivsize);
/* read sequence number */
append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
/* load Type, Version and Len fields in math0 */
append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
/* compute (padlen - 1) */
append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
/* math2 = icvlen + (padlen - 1) + 1 */
append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
/* VSOL = payloadlen + icvlen + padlen */
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
if (caam_little_end)
append_moveb(desc, MOVE_WAITCOMP |
MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
/* update Len field */
append_math_sub(desc, REG0, REG0, REG2, 8);
/* store decrypted payload, icv and padding */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
/* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
JUMP_COND_MATH_Z);
/* send Type, Version and Len(pre ICV) fields to authentication */
append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
(3 << MOVE_OFFSET_SHIFT) | 5);
/* outsnooping payload */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
FIFOLDST_VLF);
skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
set_jump_tgt_here(desc, zero_payload_jump_cmd);
/* send Type, Version and Len(pre ICV) fields to authentication */
append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
(3 << MOVE_OFFSET_SHIFT) | 5);
set_jump_tgt_here(desc, skip_zero_jump_cmd);
append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
/* load icvlen and padlen */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
/* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
/*
* Start a new input sequence using the SEQ OUT PTR command options,
* pointer and length used when the current output sequence was defined.
*/
if (ps) {
/*
* Move the lower 32 bits of Shared Descriptor address, the
* SEQ OUT PTR command, Output Pointer (2 words) and
* Output Length into math registers.
*/
if (caam_little_end)
append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
MOVE_DEST_MATH0 |
(55 * 4 << MOVE_OFFSET_SHIFT) | 20);
else
append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
MOVE_DEST_MATH0 |
(54 * 4 << MOVE_OFFSET_SHIFT) | 20);
/* Transform SEQ OUT PTR command in SEQ IN PTR command */
append_math_and_imm_u32(desc, REG0, REG0, IMM,
~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
/* Append a JUMP command after the copied fields */
jumpback = CMD_JUMP | (char)-9;
append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
LDST_SRCDST_WORD_DECO_MATH2 |
(4 << LDST_OFFSET_SHIFT));
append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
/* Move the updated fields back to the Job Descriptor */
if (caam_little_end)
append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
MOVE_DEST_DESCBUF |
(55 * 4 << MOVE_OFFSET_SHIFT) | 24);
else
append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
MOVE_DEST_DESCBUF |
(54 * 4 << MOVE_OFFSET_SHIFT) | 24);
/*
* Read the new SEQ IN PTR command, Input Pointer, Input Length
* and then jump back to the next command from the
* Shared Descriptor.
*/
append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
} else {
/*
* Move the SEQ OUT PTR command, Output Pointer (1 word) and
* Output Length into math registers.
*/
if (caam_little_end)
append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
MOVE_DEST_MATH0 |
(54 * 4 << MOVE_OFFSET_SHIFT) | 12);
else
append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
MOVE_DEST_MATH0 |
(53 * 4 << MOVE_OFFSET_SHIFT) | 12);
/* Transform SEQ OUT PTR command in SEQ IN PTR command */
append_math_and_imm_u64(desc, REG0, REG0, IMM,
~(((u64)(CMD_SEQ_IN_PTR ^
CMD_SEQ_OUT_PTR)) << 32));
/* Append a JUMP command after the copied fields */
jumpback = CMD_JUMP | (char)-7;
append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
LDST_SRCDST_WORD_DECO_MATH1 |
(4 << LDST_OFFSET_SHIFT));
append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
/* Move the updated fields back to the Job Descriptor */
if (caam_little_end)
append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
MOVE_DEST_DESCBUF |
(54 * 4 << MOVE_OFFSET_SHIFT) | 16);
else
append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
MOVE_DEST_DESCBUF |
(53 * 4 << MOVE_OFFSET_SHIFT) | 16);
/*
* Read the new SEQ IN PTR command, Input Pointer, Input Length
* and then jump back to the next command from the
* Shared Descriptor.
*/
append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
}
/* skip payload */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
/* check icv */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
FIFOLD_TYPE_LAST2 | authsize);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
}
EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
/**
* cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction

View File

@ -17,6 +17,9 @@
#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
/* Note: Nonce is counted in cdata.keylen */
#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
@ -72,6 +75,16 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
u32 *nonce, const u32 ctx1_iv_off,
const bool is_qi, int era);
void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int assoclen,
unsigned int ivsize, unsigned int authsize,
unsigned int blocksize, int era);
void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int assoclen,
unsigned int ivsize, unsigned int authsize,
unsigned int blocksize, int era);
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, unsigned int icvsize,
const bool is_qi);

View File

@ -290,6 +290,167 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
return err;
}
static int tls_set_sh_desc(struct crypto_aead *tls)
{
struct caam_ctx *ctx = crypto_aead_ctx(tls);
unsigned int ivsize = crypto_aead_ivsize(tls);
unsigned int blocksize = crypto_aead_blocksize(tls);
unsigned int assoclen = 13; /* always 13 bytes for TLS */
unsigned int data_len[2];
u32 inl_mask;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
* TLS 1.0 encrypt shared descriptor
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
data_len[0] = ctx->adata.keylen_pad;
data_len[1] = ctx->cdata.keylen;
if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
&inl_mask, ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
if (inl_mask & 1)
ctx->adata.key_virt = ctx->key;
else
ctx->adata.key_dma = ctx->key_dma;
if (inl_mask & 2)
ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
assoclen, ivsize, ctx->authsize, blocksize,
ctrlpriv->era);
/*
* TLS 1.0 decrypt shared descriptor
* Keys do not fit inline, regardless of algorithms used
*/
ctx->adata.key_inline = false;
ctx->adata.key_dma = ctx->key_dma;
ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
assoclen, ivsize, ctx->authsize, blocksize,
ctrlpriv->era);
return 0;
}
static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(tls);
ctx->authsize = authsize;
tls_set_sh_desc(tls);
return 0;
}
static int tls_setkey(struct crypto_aead *tls, const u8 *key,
unsigned int keylen)
{
struct caam_ctx *ctx = crypto_aead_ctx(tls);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
#ifdef DEBUG
dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
/*
* If DKP is supported, use it in the shared descriptor to generate
* the split key.
*/
if (ctrlpriv->era >= 6) {
ctx->adata.keylen = keys.authkeylen;
ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
OP_ALG_ALGSEL_MASK);
if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
goto badkey;
memcpy(ctx->key, keys.authkey, keys.authkeylen);
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma,
ctx->adata.keylen_pad +
keys.enckeylen, ctx->dir);
goto skip_split_key;
}
ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
if (ret)
goto badkey;
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
keys.enckeylen, ctx->dir);
#ifdef DEBUG
dev_err(jrdev, "split keylen %d split keylen padded %d\n",
ctx->adata.keylen, ctx->adata.keylen_pad);
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
ret = tls_set_sh_desc(tls);
if (ret)
goto badkey;
/* Now update the driver contexts with the new shared descriptor */
if (ctx->drv_ctx[ENCRYPT]) {
ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
ctx->sh_desc_enc);
if (ret) {
dev_err(jrdev, "driver enc context update failed\n");
goto badkey;
}
}
if (ctx->drv_ctx[DECRYPT]) {
ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
ctx->sh_desc_dec);
if (ret) {
dev_err(jrdev, "driver dec context update failed\n");
goto badkey;
}
}
memzero_explicit(&keys, sizeof(keys));
return ret;
badkey:
crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@ -808,6 +969,29 @@ struct aead_edesc {
struct qm_sg_entry sgt[0];
};
/*
* tls_edesc - s/w-extended tls descriptor
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
* @qm_sg_bytes: length of dma mapped h/w link table
* @tmp: array of scatterlists used by 'scatterwalk_ffwd'
* @qm_sg_dma: bus physical mapped address of h/w link table
* @drv_req: driver-specific request structure
* @sgt: the h/w link table, followed by IV
*/
struct tls_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
struct scatterlist tmp[2];
struct scatterlist *dst;
struct caam_drv_req drv_req;
struct qm_sg_entry sgt[0];
};
/*
* skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input scatterlist
@ -900,6 +1084,18 @@ static void aead_unmap(struct device *dev,
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
}
static void tls_unmap(struct device *dev,
struct tls_edesc *edesc,
struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
edesc->dst_nents, edesc->iv_dma, ivsize, DMA_TO_DEVICE,
edesc->qm_sg_dma, edesc->qm_sg_bytes);
}
static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
struct skcipher_request *req)
{
@ -1192,6 +1388,243 @@ static int aead_decrypt(struct aead_request *req)
return aead_crypt(req, false);
}
static void tls_done(struct caam_drv_req *drv_req, u32 status)
{
struct device *qidev;
struct tls_edesc *edesc;
struct aead_request *aead_req = drv_req->app_ctx;
struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
int ecode = 0;
qidev = caam_ctx->qidev;
if (unlikely(status)) {
caam_jr_strstatus(qidev, status);
ecode = -EIO;
}
edesc = container_of(drv_req, typeof(*edesc), drv_req);
tls_unmap(qidev, edesc, aead_req);
aead_request_complete(aead_req, ecode);
qi_cache_free(edesc);
}
/*
* allocate and map the tls extended descriptor
*/
static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
unsigned int blocksize = crypto_aead_blocksize(aead);
unsigned int padsize, authsize;
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
typeof(*alg), aead);
struct device *qidev = ctx->qidev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct tls_edesc *edesc;
dma_addr_t qm_sg_dma, iv_dma = 0;
int ivsize = 0;
u8 *iv;
int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
int in_len, out_len;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
struct scatterlist *dst;
if (encrypt) {
padsize = blocksize - ((req->cryptlen + ctx->authsize) %
blocksize);
authsize = ctx->authsize + padsize;
} else {
authsize = ctx->authsize;
}
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
return (struct tls_edesc *)drv_ctx;
/* allocate space for base edesc, link tables and IV */
edesc = qi_cache_alloc(GFP_DMA | flags);
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
}
if (likely(req->src == req->dst)) {
src_nents = sg_nents_for_len(req->src, req->assoclen +
req->cryptlen +
(encrypt ? authsize : 0));
if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
req->assoclen + req->cryptlen +
(encrypt ? authsize : 0));
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
DMA_BIDIRECTIONAL);
if (unlikely(!mapped_src_nents)) {
dev_err(qidev, "unable to map source\n");
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
dst = req->dst;
} else {
src_nents = sg_nents_for_len(req->src, req->assoclen +
req->cryptlen);
if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
req->assoclen + req->cryptlen);
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
dst_nents = sg_nents_for_len(dst, req->cryptlen +
(encrypt ? authsize : 0));
if (unlikely(dst_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
req->cryptlen +
(encrypt ? authsize : 0));
qi_cache_free(edesc);
return ERR_PTR(dst_nents);
}
if (src_nents) {
mapped_src_nents = dma_map_sg(qidev, req->src,
src_nents, DMA_TO_DEVICE);
if (unlikely(!mapped_src_nents)) {
dev_err(qidev, "unable to map source\n");
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
} else {
mapped_src_nents = 0;
}
mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
DMA_FROM_DEVICE);
if (unlikely(!mapped_dst_nents)) {
dev_err(qidev, "unable to map destination\n");
dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
}
/*
* Create S/G table: IV, src, dst.
* Input is not contiguous.
*/
qm_sg_ents = 1 + mapped_src_nents +
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
sg_table = &edesc->sgt[0];
qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
ivsize = crypto_aead_ivsize(aead);
iv = (u8 *)(sg_table + qm_sg_ents);
/* Make sure IV is located in a DMAable area */
memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->dst = dst;
edesc->iv_dma = iv_dma;
edesc->drv_req.app_ctx = req;
edesc->drv_req.cbk = tls_done;
edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
qm_sg_index = 1;
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
qm_sg_index += mapped_src_nents;
if (mapped_dst_nents > 1)
sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
qm_sg_index, 0);
qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(qidev, qm_sg_dma)) {
dev_err(qidev, "unable to map S/G table\n");
caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
edesc->qm_sg_dma = qm_sg_dma;
edesc->qm_sg_bytes = qm_sg_bytes;
out_len = req->cryptlen + (encrypt ? authsize : 0);
in_len = ivsize + req->assoclen + req->cryptlen;
fd_sgt = &edesc->drv_req.fd_sgt[0];
dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
if (req->dst == req->src)
dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
(sg_nents_for_len(req->src, req->assoclen) +
1) * sizeof(*sg_table), out_len, 0);
else if (mapped_dst_nents == 1)
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
else
dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
qm_sg_index, out_len, 0);
return edesc;
}
static int tls_crypt(struct aead_request *req, bool encrypt)
{
struct tls_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
int ret;
if (unlikely(caam_congested))
return -EAGAIN;
edesc = tls_edesc_alloc(req, encrypt);
if (IS_ERR_OR_NULL(edesc))
return PTR_ERR(edesc);
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
if (!ret) {
ret = -EINPROGRESS;
} else {
tls_unmap(ctx->qidev, edesc, req);
qi_cache_free(edesc);
}
return ret;
}
static int tls_encrypt(struct aead_request *req)
{
return tls_crypt(req, true);
}
static int tls_decrypt(struct aead_request *req)
{
return tls_crypt(req, false);
}
static int ipsec_gcm_encrypt(struct aead_request *req)
{
return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
@ -2411,6 +2844,26 @@ static struct caam_aead_alg driver_aeads[] = {
.geniv = true,
}
},
{
.aead = {
.base = {
.cra_name = "tls10(hmac(sha1),cbc(aes))",
.cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
.cra_blocksize = AES_BLOCK_SIZE,
},
.setkey = tls_setkey,
.setauthsize = tls_setauthsize,
.encrypt = tls_encrypt,
.decrypt = tls_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
}
}
};
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
@ -2418,6 +2871,16 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
{
struct caam_drv_private *priv;
struct device *dev;
/* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
static const u8 digest_size[] = {
MD5_DIGEST_SIZE,
SHA1_DIGEST_SIZE,
SHA224_DIGEST_SIZE,
SHA256_DIGEST_SIZE,
SHA384_DIGEST_SIZE,
SHA512_DIGEST_SIZE
};
u8 op_id;
/*
* distribute tfms across job rings to ensure in-order
@ -2449,6 +2912,21 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
ctx->qidev = dev;
if (ctx->adata.algtype) {
op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
>> OP_ALG_ALGSEL_SHIFT;
if (op_id < ARRAY_SIZE(digest_size)) {
ctx->authsize = digest_size[op_id];
} else {
dev_err(ctx->jrdev,
"incorrect op_id %d; must be less than %zu\n",
op_id, ARRAY_SIZE(digest_size));
caam_jr_free(ctx->jrdev);
return -EINVAL;
}
} else {
ctx->authsize = 0;
}
spin_lock_init(&ctx->lock);
ctx->drv_ctx[ENCRYPT] = NULL;

View File

@ -1704,4 +1704,31 @@
/* Frame Descriptor Command for Replacement Job Descriptor */
#define FD_CMD_REPLACE_JOB_DESC 0x20000000
/* CHA Control Register bits */
#define CCTRL_RESET_CHA_ALL 0x1
#define CCTRL_RESET_CHA_AESA 0x2
#define CCTRL_RESET_CHA_DESA 0x4
#define CCTRL_RESET_CHA_AFHA 0x8
#define CCTRL_RESET_CHA_KFHA 0x10
#define CCTRL_RESET_CHA_SF8A 0x20
#define CCTRL_RESET_CHA_PKHA 0x40
#define CCTRL_RESET_CHA_MDHA 0x80
#define CCTRL_RESET_CHA_CRCA 0x100
#define CCTRL_RESET_CHA_RNG 0x200
#define CCTRL_RESET_CHA_SF9A 0x400
#define CCTRL_RESET_CHA_ZUCE 0x800
#define CCTRL_RESET_CHA_ZUCA 0x1000
#define CCTRL_UNLOAD_PK_A0 0x10000
#define CCTRL_UNLOAD_PK_A1 0x20000
#define CCTRL_UNLOAD_PK_A2 0x40000
#define CCTRL_UNLOAD_PK_A3 0x80000
#define CCTRL_UNLOAD_PK_B0 0x100000
#define CCTRL_UNLOAD_PK_B1 0x200000
#define CCTRL_UNLOAD_PK_B2 0x400000
#define CCTRL_UNLOAD_PK_B3 0x800000
#define CCTRL_UNLOAD_PK_N 0x1000000
#define CCTRL_UNLOAD_PK_A 0x4000000
#define CCTRL_UNLOAD_PK_B 0x8000000
#define CCTRL_UNLOAD_SBOX 0x10000000
#endif /* DESC_H */