Files
Lin Jinhan 629aef9788 crypto: rockchip: set CRYPTO_ALG_INTERNAL for cra_flags
Rockchip's crypto driver is set to the CRYPTO_ALG_INTERNAL flag,
 which prevents it from being called by other modules of the system
 and is only used for librkcrypto use.

Fixed the panic bug caused by calling hmac(sha256) in Android CTS test:

[  234.124644][    C0] ------------[ cut here ]------------
[  234.124694][    C0] kernel BUG at arch/arm64/kernel/fpsimd.c:1832!
[  234.124708][    C0] Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP
[  234.165910][    C0] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G           OE      6.1.78-android14-11-g55b024554aae-ab11965736 #1
[  234.166912][    C0] Hardware name: Rockchip RK3576 EVB1 V10 Board (DT)
[  234.167486][    C0] pstate: 404000c5 (nZcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[  234.168160][    C0] pc : kernel_neon_begin+0xe8/0x14c
[  234.168623][    C0] lr : cbc_decrypt+0x94/0x104
[  234.169037][    C0] sp : ffffffc008003ce0
[  234.169393][    C0] x29: ffffffc008003ce0 x28: ffffffc009faed80 x27: 00000000000000e0
[  234.170093][    C0] x26: ffffffc00a13e000 x25: 0000000000000001 x24: ffffff800dd85dc0
[  234.170792][    C0] x23: ffffff80f17e5540 x22: ffffff80f17e5600 x21: 0000000000000002
[  234.171492][    C0] x20: ffffff80c3720d70 x19: ffffffc009faed80 x18: ffffffc008005060
[  234.172192][    C0] x17: 000000000000003c x16: 000000000000003c x15: 000000000000003c
[  234.172891][    C0] x14: 0000000000000000 x13: fffffffe030dc9c0 x12: 000000000000003c
[  234.173590][    C0] x11: 000000000000003c x10: 0000000000000008 x9 : 0000000000000080
[  234.174289][    C0] x8 : 00000000000000c0 x7 : 0000000000000000 x6 : 189055e08898eccd
[  234.174988][    C0] x5 : 000000000000003c x4 : 0000000000000fc4 x3 : 0000000000000020
[  234.175687][    C0] x2 : 0000000000000030 x1 : 0000000000000020 x0 : 0000000000000000
[  234.176387][    C0] Call trace:
[  234.176667][    C0]  kernel_neon_begin+0xe8/0x14c
[  234.177092][    C0]  cbc_decrypt+0x94/0x104
[  234.177472][    C0]  crypto_skcipher_decrypt+0x3c/0x54
[  234.177932][    C0]  crypto_authenc_decrypt_tail+0xd8/0xf4
[  234.178423][    C0]  authenc_verify_ahash_done+0x5c/0x6c
[  234.178902][    C0]  rk_ahash_crypto_complete+0x10c/0x204 [rk_crypto]
[  234.179522][    C0]  rk_complete_op+0x78/0x100 [rk_crypto]
[  234.180049][    C0]  rk_crypto_done_task_cb+0xc8/0x100 [rk_crypto]
[  234.180641][    C0]  tasklet_action_common+0x260/0x4bc
[  234.181100][    C0]  tasklet_action+0x24/0x34
[  234.181492][    C0]  __do_softirq+0x11c/0x418
[  234.181883][    C0]  ____do_softirq+0x10/0x20
[  234.182274][    C0]  call_on_irq_stack+0x3c/0x74
[  234.182687][    C0]  do_softirq_own_stack+0x1c/0x2c
[  234.183121][    C0]  __irq_exit_rcu+0x54/0xb4
[  234.183513][    C0]  irq_exit_rcu+0x10/0x1c
[  234.183893][    C0]  el1_interrupt+0xa4/0xd8
[  234.184276][    C0]  el1h_64_irq_handler+0x18/0x24
[  234.184710][    C0]  el1h_64_irq+0x68/0x6c
[  234.185079][    C0]  cpuidle_enter_state+0x1d0/0x5b4
[  234.185526][    C0]  cpuidle_enter+0x38/0x54
[  234.185905][    C0]  do_idle+0x1d4/0x294
[  234.186268][    C0]  cpu_startup_entry+0x34/0x3c
[  234.186682][    C0]  rest_init+0xe0/0xe4
[  234.187042][    C0]  arch_call_rest_init+0x10/0x14
[  234.187477][    C0]  start_kernel+0x384/0x478
[  234.187866][    C0]  __primary_switched+0xc8/0xd4
[  234.188295][    C0] Code: f85f8e5e d65f03c0 943f17c6 34fffcd5 (d4210000)
[  234.188891][    C0] ---[ end trace 0000000000000000 ]---
[  234.204980][    C0] Kernel panic - not syncing: Oops - BUG: Fatal exception in interrupt

Signed-off-by: Lin Jinhan <troy.lin@rock-chips.com>
Change-Id: I9bace812173c232f16fd8cb72466d37fae98a5b6
2024-09-11 16:08:44 +08:00

495 lines
13 KiB
C

/*
* Driver for /dev/crypto device (aka CryptoDev)
*
* Copyright (c) 2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
* Portions Copyright (c) 2010 Michael Weiser
* Portions Copyright (c) 2010 Phil Sutter
*
* This file is part of linux cryptodev.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/ioctl.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/uaccess.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/aead.h>
#include <linux/rtnetlink.h>
#include <crypto/authenc.h>
#include "cryptodev.h"
#include "cipherapi.h"
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
extern const struct crypto_type crypto_givcipher_type;
#endif
static void cryptodev_complete(struct crypto_async_request *req, int err)
{
struct cryptodev_result *res = req->data;
if (err == -EINPROGRESS)
return;
res->err = err;
complete(&res->completion);
}
int cryptodev_get_cipher_keylen(unsigned int *keylen, struct session_op *sop,
int aead)
{
/*
* For blockciphers (AES-CBC) or non-composite aead ciphers (like AES-GCM),
* the key length is simply the cipher keylen obtained from userspace. If
* the cipher is composite aead, the keylen is the sum of cipher keylen,
* hmac keylen and a key header length. This key format is the one used in
* Linux kernel for composite aead ciphers (crypto/authenc.c)
*/
unsigned int klen = sop->keylen;
if (unlikely(sop->keylen > CRYPTO_CIPHER_MAX_KEY_LEN))
return -EINVAL;
if (aead && sop->mackeylen) {
if (unlikely(sop->mackeylen > CRYPTO_HMAC_MAX_KEY_LEN))
return -EINVAL;
klen += sop->mackeylen;
klen += RTA_SPACE(sizeof(struct crypto_authenc_key_param));
}
*keylen = klen;
return 0;
}
int cryptodev_get_cipher_key(uint8_t *key, struct session_op *sop, int aead)
{
/*
* Get cipher key from user-space. For blockciphers just copy it from
* user-space. For composite aead ciphers combine it with the hmac key in
* the format used by Linux kernel in crypto/authenc.c:
*
* [[AUTHENC_KEY_HEADER + CIPHER_KEYLEN] [AUTHENTICATION KEY] [CIPHER KEY]]
*/
struct crypto_authenc_key_param *param;
struct rtattr *rta;
int ret = 0;
if (aead && sop->mackeylen) {
/*
* Composite aead ciphers. The first four bytes are the header type and
* header length for aead keys
*/
rta = (void *)key;
rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
rta->rta_len = RTA_LENGTH(sizeof(*param));
/*
* The next four bytes hold the length of the encryption key
*/
param = RTA_DATA(rta);
param->enckeylen = cpu_to_be32(sop->keylen);
/* Advance key pointer eight bytes and copy the hmac key */
key += RTA_SPACE(sizeof(*param));
if (unlikely(copy_from_user(key, sop->mackey, sop->mackeylen))) {
ret = -EFAULT;
goto error;
}
/* Advance key pointer past the hmac key */
key += sop->mackeylen;
}
/* now copy the blockcipher key */
if (unlikely(copy_from_user(key, sop->key, sop->keylen)))
ret = -EFAULT;
error:
return ret;
}
/* Was correct key length supplied? */
static int check_key_size(size_t keylen, const char *alg_name,
unsigned int min_keysize, unsigned int max_keysize)
{
if (max_keysize > 0 && unlikely((keylen < min_keysize) ||
(keylen > max_keysize))) {
ddebug(1, "Wrong keylen '%zu' for algorithm '%s'. Use %u to %u.",
keylen, alg_name, min_keysize, max_keysize);
return -EINVAL;
}
return 0;
}
int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name,
uint8_t *keyp, size_t keylen, int stream, int aead)
{
int ret;
if (aead == 0) {
unsigned int min_keysize, max_keysize;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
struct crypto_tfm *tfm;
#else
struct ablkcipher_alg *alg;
#endif
out->async.s = cryptodev_crypto_alloc_blkcipher(alg_name, CRYPTO_ALG_INTERNAL,
CRYPTO_ALG_INTERNAL);
if (unlikely(IS_ERR(out->async.s))) {
ddebug(1, "Failed to load cipher %s", alg_name);
return PTR_ERR(out->async.s);
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
tfm = crypto_skcipher_tfm(out->async.s);
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 4, 0))
if ((tfm->__crt_alg->cra_type == &crypto_ablkcipher_type)
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
|| (tfm->__crt_alg->cra_type == &crypto_givcipher_type)
#endif
) {
struct ablkcipher_alg *alg;
alg = &tfm->__crt_alg->cra_ablkcipher;
min_keysize = alg->min_keysize;
max_keysize = alg->max_keysize;
} else
#endif
{
struct skcipher_alg *alg;
alg = crypto_skcipher_alg(out->async.s);
min_keysize = alg->min_keysize;
max_keysize = alg->max_keysize;
}
#else
alg = crypto_ablkcipher_alg(out->async.s);
min_keysize = alg->min_keysize;
max_keysize = alg->max_keysize;
#endif
ret = check_key_size(keylen, alg_name, min_keysize,
max_keysize);
if (ret)
goto error;
out->blocksize = cryptodev_crypto_blkcipher_blocksize(out->async.s);
out->ivsize = cryptodev_crypto_blkcipher_ivsize(out->async.s);
out->alignmask = cryptodev_crypto_blkcipher_alignmask(out->async.s);
ret = cryptodev_crypto_blkcipher_setkey(out->async.s, keyp, keylen);
} else {
out->async.as = crypto_alloc_aead(alg_name, CRYPTO_ALG_INTERNAL,
CRYPTO_ALG_INTERNAL);
if (unlikely(IS_ERR(out->async.as))) {
ddebug(1, "Failed to load cipher %s", alg_name);
return PTR_ERR(out->async.as);
}
out->blocksize = crypto_aead_blocksize(out->async.as);
out->ivsize = crypto_aead_ivsize(out->async.as);
out->alignmask = crypto_aead_alignmask(out->async.as);
ret = crypto_aead_setkey(out->async.as, keyp, keylen);
}
if (unlikely(ret)) {
ddebug(1, "Setting key failed for %s-%zu.", alg_name, keylen*8);
ret = -EINVAL;
goto error;
}
out->stream = stream;
out->aead = aead;
init_completion(&out->async.result.completion);
if (aead == 0) {
out->async.request = cryptodev_blkcipher_request_alloc(out->async.s, GFP_KERNEL);
if (unlikely(!out->async.request)) {
derr(1, "error allocating async crypto request");
ret = -ENOMEM;
goto error;
}
cryptodev_blkcipher_request_set_callback(out->async.request,
CRYPTO_TFM_REQ_MAY_BACKLOG,
cryptodev_complete, &out->async.result);
} else {
out->async.arequest = aead_request_alloc(out->async.as, GFP_KERNEL);
if (unlikely(!out->async.arequest)) {
derr(1, "error allocating async crypto request");
ret = -ENOMEM;
goto error;
}
aead_request_set_callback(out->async.arequest,
CRYPTO_TFM_REQ_MAY_BACKLOG,
cryptodev_complete, &out->async.result);
}
out->init = 1;
return 0;
error:
if (aead == 0) {
cryptodev_blkcipher_request_free(out->async.request);
cryptodev_crypto_free_blkcipher(out->async.s);
} else {
if (out->async.arequest)
aead_request_free(out->async.arequest);
if (out->async.as)
crypto_free_aead(out->async.as);
}
return ret;
}
void cryptodev_cipher_deinit(struct cipher_data *cdata)
{
if (cdata->init) {
if (cdata->aead == 0) {
cryptodev_blkcipher_request_free(cdata->async.request);
cryptodev_crypto_free_blkcipher(cdata->async.s);
} else {
if (cdata->async.arequest)
aead_request_free(cdata->async.arequest);
if (cdata->async.as)
crypto_free_aead(cdata->async.as);
}
cdata->init = 0;
}
}
static inline int waitfor(struct cryptodev_result *cr, ssize_t ret)
{
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
wait_for_completion(&cr->completion);
/* At this point we known for sure the request has finished,
* because wait_for_completion above was not interruptible.
* This is important because otherwise hardware or driver
* might try to access memory which will be freed or reused for
* another request. */
if (unlikely(cr->err)) {
derr(0, "error from async request: %d", cr->err);
return cr->err;
}
break;
default:
return ret;
}
return 0;
}
ssize_t cryptodev_cipher_encrypt(struct cipher_data *cdata,
const struct scatterlist *src, struct scatterlist *dst,
size_t len)
{
int ret;
reinit_completion(&cdata->async.result.completion);
if (cdata->aead == 0) {
cryptodev_blkcipher_request_set_crypt(cdata->async.request,
(struct scatterlist *)src, dst,
len, cdata->async.iv);
ret = cryptodev_crypto_blkcipher_encrypt(cdata->async.request);
} else {
aead_request_set_crypt(cdata->async.arequest,
(struct scatterlist *)src, dst,
len, cdata->async.iv);
ret = crypto_aead_encrypt(cdata->async.arequest);
}
return waitfor(&cdata->async.result, ret);
}
ssize_t cryptodev_cipher_decrypt(struct cipher_data *cdata,
const struct scatterlist *src, struct scatterlist *dst,
size_t len)
{
int ret;
reinit_completion(&cdata->async.result.completion);
if (cdata->aead == 0) {
cryptodev_blkcipher_request_set_crypt(cdata->async.request,
(struct scatterlist *)src, dst,
len, cdata->async.iv);
ret = cryptodev_crypto_blkcipher_decrypt(cdata->async.request);
} else {
aead_request_set_crypt(cdata->async.arequest,
(struct scatterlist *)src, dst,
len, cdata->async.iv);
ret = crypto_aead_decrypt(cdata->async.arequest);
}
return waitfor(&cdata->async.result, ret);
}
/* Hash functions */
int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
int hmac_mode, void *mackey, size_t mackeylen)
{
int ret;
hdata->async.s = crypto_alloc_ahash(alg_name, CRYPTO_ALG_INTERNAL, CRYPTO_ALG_INTERNAL);
if (unlikely(IS_ERR(hdata->async.s))) {
ddebug(1, "Failed to load transform for %s", alg_name);
return PTR_ERR(hdata->async.s);
}
/* Copy the key from user and set to TFM. */
if (hmac_mode != 0) {
ret = crypto_ahash_setkey(hdata->async.s, mackey, mackeylen);
if (unlikely(ret)) {
ddebug(1, "Setting hmac key failed for %s-%zu.",
alg_name, mackeylen*8);
ret = -EINVAL;
goto error;
}
}
hdata->digestsize = crypto_ahash_digestsize(hdata->async.s);
hdata->alignmask = crypto_ahash_alignmask(hdata->async.s);
init_completion(&hdata->async.result.completion);
hdata->async.request = ahash_request_alloc(hdata->async.s, GFP_KERNEL);
if (unlikely(!hdata->async.request)) {
derr(0, "error allocating async crypto request");
ret = -ENOMEM;
goto error;
}
ahash_request_set_callback(hdata->async.request,
CRYPTO_TFM_REQ_MAY_BACKLOG,
cryptodev_complete, &hdata->async.result);
hdata->init = 1;
return 0;
error:
crypto_free_ahash(hdata->async.s);
return ret;
}
void cryptodev_hash_deinit(struct hash_data *hdata)
{
if (hdata->init) {
ahash_request_free(hdata->async.request);
crypto_free_ahash(hdata->async.s);
hdata->init = 0;
}
}
int cryptodev_hash_reset(struct hash_data *hdata)
{
int ret;
ret = crypto_ahash_init(hdata->async.request);
if (unlikely(ret)) {
derr(0, "error in crypto_hash_init()");
return ret;
}
return 0;
}
ssize_t cryptodev_hash_update(struct hash_data *hdata,
struct scatterlist *sg, size_t len)
{
int ret;
reinit_completion(&hdata->async.result.completion);
ahash_request_set_crypt(hdata->async.request, sg, NULL, len);
ret = crypto_ahash_update(hdata->async.request);
return waitfor(&hdata->async.result, ret);
}
int cryptodev_hash_final(struct hash_data *hdata, void *output)
{
int ret;
reinit_completion(&hdata->async.result.completion);
ahash_request_set_crypt(hdata->async.request, NULL, output, 0);
ret = crypto_ahash_final(hdata->async.request);
return waitfor(&hdata->async.result, ret);
}
#ifdef CIOCCPHASH
/* import the current hash state of src to dst */
int cryptodev_hash_copy(struct hash_data *dst, struct hash_data *src)
{
int ret, statesize;
void *statedata = NULL;
struct crypto_tfm *tfm;
if (unlikely(src == NULL || !src->init ||
dst == NULL || !dst->init)) {
return -EINVAL;
}
reinit_completion(&src->async.result.completion);
statesize = crypto_ahash_statesize(src->async.s);
if (unlikely(statesize <= 0)) {
return -EINVAL;
}
statedata = kzalloc(statesize, GFP_KERNEL);
if (unlikely(statedata == NULL)) {
return -ENOMEM;
}
ret = crypto_ahash_export(src->async.request, statedata);
if (unlikely(ret < 0)) {
if (unlikely(ret == -ENOSYS)) {
tfm = crypto_ahash_tfm(src->async.s);
derr(0, "cryptodev_hash_copy: crypto_ahash_export not implemented for "
"alg='%s', driver='%s'", crypto_tfm_alg_name(tfm),
crypto_tfm_alg_driver_name(tfm));
}
goto out;
}
ret = crypto_ahash_import(dst->async.request, statedata);
if (unlikely(ret == -ENOSYS)) {
tfm = crypto_ahash_tfm(dst->async.s);
derr(0, "cryptodev_hash_copy: crypto_ahash_import not implemented for "
"alg='%s', driver='%s'", crypto_tfm_alg_name(tfm),
crypto_tfm_alg_driver_name(tfm));
}
out:
kfree(statedata);
return ret;
}
#endif /* CIOCCPHASH */