You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Algorithms: - add private key generation to ecdh Drivers: - add generic gcm(aes) to aesni-intel - add SafeXcel EIP197 crypto engine driver - add ecb(aes), cfb(aes) and ecb(des3_ede) to cavium - add support for CNN55XX adapters in cavium - add ctr mode to chcr - add support for gcm(aes) to omap" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (140 commits) crypto: testmgr - Reenable sha1/aes in FIPS mode crypto: ccp - Release locks before returning crypto: cavium/nitrox - dma_mapping_error() returns bool crypto: doc - fix typo in docs Documentation/bindings: Document the SafeXel cryptographic engine driver crypto: caam - fix gfp allocation flags (part II) crypto: caam - fix gfp allocation flags (part I) crypto: drbg - Fixes panic in wait_for_completion call crypto: caam - make of_device_ids const. crypto: vmx - remove unnecessary check crypto: n2 - make of_device_ids const crypto: inside-secure - use the base_end pointer in ring rollback crypto: inside-secure - increase the batch size crypto: inside-secure - only dequeue when needed crypto: inside-secure - get the backlog before dequeueing the request crypto: inside-secure - stop requeueing failed requests crypto: inside-secure - use one queue per hw ring crypto: inside-secure - update the context and request later crypto: inside-secure - align the cipher and hash send functions crypto: inside-secure - optimize DSE bufferability control ...
This commit is contained in:
@@ -327,6 +327,15 @@ config HW_RANDOM_PPC4XX
|
||||
This option provides the kernel-side support for the TRNG hardware
|
||||
found in the security function of some PowerPC 4xx SoCs.
|
||||
|
||||
config CRYPTO_DEV_OMAP
|
||||
tristate "Support for OMAP crypto HW accelerators"
|
||||
depends on ARCH_OMAP2PLUS
|
||||
help
|
||||
OMAP processors have various crypto HW accelerators. Select this if
|
||||
you want to use the OMAP modules for any of the crypto algorithms.
|
||||
|
||||
if CRYPTO_DEV_OMAP
|
||||
|
||||
config CRYPTO_DEV_OMAP_SHAM
|
||||
tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
|
||||
depends on ARCH_OMAP2PLUS
|
||||
@@ -348,6 +357,7 @@ config CRYPTO_DEV_OMAP_AES
|
||||
select CRYPTO_CBC
|
||||
select CRYPTO_ECB
|
||||
select CRYPTO_CTR
|
||||
select CRYPTO_AEAD
|
||||
help
|
||||
OMAP processors have AES module accelerator. Select this if you
|
||||
want to use the OMAP module for AES algorithms.
|
||||
@@ -364,6 +374,8 @@ config CRYPTO_DEV_OMAP_DES
|
||||
the ECB and CBC modes of operation are supported by the driver. Also
|
||||
accesses made on unaligned boundaries are supported.
|
||||
|
||||
endif # CRYPTO_DEV_OMAP
|
||||
|
||||
config CRYPTO_DEV_PICOXCELL
|
||||
tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
|
||||
depends on (ARCH_PICOXCELL || COMPILE_TEST) && HAVE_CLK
|
||||
@@ -542,6 +554,7 @@ config CRYPTO_DEV_MXS_DCP
|
||||
|
||||
source "drivers/crypto/qat/Kconfig"
|
||||
source "drivers/crypto/cavium/cpt/Kconfig"
|
||||
source "drivers/crypto/cavium/nitrox/Kconfig"
|
||||
|
||||
config CRYPTO_DEV_CAVIUM_ZIP
|
||||
tristate "Cavium ZIP driver"
|
||||
@@ -656,4 +669,21 @@ config CRYPTO_DEV_BCM_SPU
|
||||
|
||||
source "drivers/crypto/stm32/Kconfig"
|
||||
|
||||
config CRYPTO_DEV_SAFEXCEL
|
||||
tristate "Inside Secure's SafeXcel cryptographic engine driver"
|
||||
depends on HAS_DMA && OF
|
||||
depends on (ARM64 && ARCH_MVEBU) || (COMPILE_TEST && 64BIT)
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_HMAC
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_SHA256
|
||||
select CRYPTO_SHA512
|
||||
help
|
||||
This driver interfaces with the SafeXcel EIP-197 cryptographic engine
|
||||
designed by Inside Secure. Select this if you want to use CBC/ECB
|
||||
chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash
|
||||
algorithms.
|
||||
|
||||
endif # CRYPTO_HW
|
||||
|
||||
@@ -6,6 +6,7 @@ obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
|
||||
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
|
||||
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
|
||||
obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
|
||||
obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
|
||||
obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
|
||||
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
|
||||
@@ -20,7 +21,9 @@ obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
|
||||
n2_crypto-y := n2_core.o n2_asm.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
|
||||
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
|
||||
omap-aes-driver-objs := omap-aes.o omap-aes-gcm.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
|
||||
@@ -39,3 +42,4 @@ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
|
||||
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
|
||||
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
|
||||
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
|
||||
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
|
||||
|
||||
@@ -1179,6 +1179,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
|
||||
dev_set_drvdata(dev, core_dev);
|
||||
core_dev->ofdev = ofdev;
|
||||
core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
|
||||
rc = -ENOMEM;
|
||||
if (!core_dev->dev)
|
||||
goto err_alloc_dev;
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/des.h>
|
||||
#include <crypto/hmac.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/md5.h>
|
||||
#include <crypto/authenc.h>
|
||||
@@ -2510,8 +2511,8 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
|
||||
memcpy(ctx->opad, ctx->ipad, blocksize);
|
||||
|
||||
for (index = 0; index < blocksize; index++) {
|
||||
ctx->ipad[index] ^= 0x36;
|
||||
ctx->opad[index] ^= 0x5c;
|
||||
ctx->ipad[index] ^= HMAC_IPAD_VALUE;
|
||||
ctx->opad[index] ^= HMAC_OPAD_VALUE;
|
||||
}
|
||||
|
||||
flow_dump(" ipad: ", ctx->ipad, blocksize);
|
||||
@@ -2638,7 +2639,7 @@ static int aead_need_fallback(struct aead_request *req)
|
||||
(spu->spu_type == SPU_TYPE_SPUM) &&
|
||||
(ctx->digestsize != 8) && (ctx->digestsize != 12) &&
|
||||
(ctx->digestsize != 16)) {
|
||||
flow_log("%s() AES CCM needs fallbck for digest size %d\n",
|
||||
flow_log("%s() AES CCM needs fallback for digest size %d\n",
|
||||
__func__, ctx->digestsize);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1187,8 +1187,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||
struct aead_edesc *edesc;
|
||||
int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
|
||||
@@ -1475,8 +1475,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||
struct ablkcipher_edesc *edesc;
|
||||
@@ -1681,8 +1680,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
|
||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
|
||||
struct ablkcipher_edesc *edesc;
|
||||
|
||||
@@ -555,8 +555,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
|
||||
typeof(*alg), aead);
|
||||
struct device *qidev = ctx->qidev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||
struct aead_edesc *edesc;
|
||||
dma_addr_t qm_sg_dma, iv_dma = 0;
|
||||
@@ -808,8 +808,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
||||
struct device *qidev = ctx->qidev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||
struct ablkcipher_edesc *edesc;
|
||||
@@ -953,8 +952,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
|
||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
||||
struct device *qidev = ctx->qidev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
|
||||
struct ablkcipher_edesc *edesc;
|
||||
|
||||
@@ -719,8 +719,8 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
u8 *buf = current_buf(state);
|
||||
int *buflen = current_buflen(state);
|
||||
u8 *next_buf = alt_buf(state);
|
||||
@@ -849,8 +849,8 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int buflen = *current_buflen(state);
|
||||
u32 *desc;
|
||||
int sec4_sg_bytes, sec4_sg_src_index;
|
||||
@@ -926,8 +926,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int buflen = *current_buflen(state);
|
||||
u32 *desc;
|
||||
int sec4_sg_src_index;
|
||||
@@ -1013,8 +1013,8 @@ static int ahash_digest(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
u32 *desc;
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
int src_nents, mapped_nents;
|
||||
@@ -1093,8 +1093,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
u8 *buf = current_buf(state);
|
||||
int buflen = *current_buflen(state);
|
||||
u32 *desc;
|
||||
@@ -1154,8 +1154,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
u8 *buf = current_buf(state);
|
||||
int *buflen = current_buflen(state);
|
||||
u8 *next_buf = alt_buf(state);
|
||||
@@ -1280,8 +1280,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int buflen = *current_buflen(state);
|
||||
u32 *desc;
|
||||
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
|
||||
@@ -1370,8 +1370,8 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
u8 *next_buf = alt_buf(state);
|
||||
int *next_buflen = alt_buflen(state);
|
||||
int to_hash;
|
||||
|
||||
+446
-26
File diff suppressed because it is too large
Load Diff
@@ -12,22 +12,76 @@
|
||||
#include "compat.h"
|
||||
#include "pdb.h"
|
||||
|
||||
/**
|
||||
* caam_priv_key_form - CAAM RSA private key representation
|
||||
* CAAM RSA private key may have either of three forms.
|
||||
*
|
||||
* 1. The first representation consists of the pair (n, d), where the
|
||||
* components have the following meanings:
|
||||
* n the RSA modulus
|
||||
* d the RSA private exponent
|
||||
*
|
||||
* 2. The second representation consists of the triplet (p, q, d), where the
|
||||
* components have the following meanings:
|
||||
* p the first prime factor of the RSA modulus n
|
||||
* q the second prime factor of the RSA modulus n
|
||||
* d the RSA private exponent
|
||||
*
|
||||
* 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
|
||||
* where the components have the following meanings:
|
||||
* p the first prime factor of the RSA modulus n
|
||||
* q the second prime factor of the RSA modulus n
|
||||
* dP the first factors's CRT exponent
|
||||
* dQ the second factors's CRT exponent
|
||||
* qInv the (first) CRT coefficient
|
||||
*
|
||||
* The benefit of using the third or the second key form is lower computational
|
||||
* cost for the decryption and signature operations.
|
||||
*/
|
||||
enum caam_priv_key_form {
|
||||
FORM1,
|
||||
FORM2,
|
||||
FORM3
|
||||
};
|
||||
|
||||
/**
|
||||
* caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
|
||||
* @n : RSA modulus raw byte stream
|
||||
* @e : RSA public exponent raw byte stream
|
||||
* @d : RSA private exponent raw byte stream
|
||||
* @p : RSA prime factor p of RSA modulus n
|
||||
* @q : RSA prime factor q of RSA modulus n
|
||||
* @dp : RSA CRT exponent of p
|
||||
* @dp : RSA CRT exponent of q
|
||||
* @qinv : RSA CRT coefficient
|
||||
* @tmp1 : CAAM uses this temporary buffer as internal state buffer.
|
||||
* It is assumed to be as long as p.
|
||||
* @tmp2 : CAAM uses this temporary buffer as internal state buffer.
|
||||
* It is assumed to be as long as q.
|
||||
* @n_sz : length in bytes of RSA modulus n
|
||||
* @e_sz : length in bytes of RSA public exponent
|
||||
* @d_sz : length in bytes of RSA private exponent
|
||||
* @p_sz : length in bytes of RSA prime factor p of RSA modulus n
|
||||
* @q_sz : length in bytes of RSA prime factor q of RSA modulus n
|
||||
* @priv_form : CAAM RSA private key representation
|
||||
*/
|
||||
struct caam_rsa_key {
|
||||
u8 *n;
|
||||
u8 *e;
|
||||
u8 *d;
|
||||
u8 *p;
|
||||
u8 *q;
|
||||
u8 *dp;
|
||||
u8 *dq;
|
||||
u8 *qinv;
|
||||
u8 *tmp1;
|
||||
u8 *tmp2;
|
||||
size_t n_sz;
|
||||
size_t e_sz;
|
||||
size_t d_sz;
|
||||
size_t p_sz;
|
||||
size_t q_sz;
|
||||
enum caam_priv_key_form priv_form;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -59,6 +113,8 @@ struct rsa_edesc {
|
||||
union {
|
||||
struct rsa_pub_pdb pub;
|
||||
struct rsa_priv_f1_pdb priv_f1;
|
||||
struct rsa_priv_f2_pdb priv_f2;
|
||||
struct rsa_priv_f3_pdb priv_f3;
|
||||
} pdb;
|
||||
u32 hw_desc[];
|
||||
};
|
||||
@@ -66,5 +122,7 @@ struct rsa_edesc {
|
||||
/* Descriptor construction primitives. */
|
||||
void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
|
||||
void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
|
||||
void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
|
||||
void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -536,7 +536,7 @@ static int caam_jr_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id caam_jr_match[] = {
|
||||
static const struct of_device_id caam_jr_match[] = {
|
||||
{
|
||||
.compatible = "fsl,sec-v4.0-job-ring",
|
||||
},
|
||||
|
||||
@@ -483,6 +483,8 @@ struct dsa_verify_pdb {
|
||||
#define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
|
||||
#define RSA_PDB_D_SHIFT 12
|
||||
#define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
|
||||
#define RSA_PDB_Q_SHIFT 12
|
||||
#define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT)
|
||||
|
||||
#define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
|
||||
#define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
|
||||
@@ -490,6 +492,8 @@ struct dsa_verify_pdb {
|
||||
#define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
|
||||
|
||||
#define RSA_PRIV_KEY_FRM_1 0
|
||||
#define RSA_PRIV_KEY_FRM_2 1
|
||||
#define RSA_PRIV_KEY_FRM_3 2
|
||||
|
||||
/**
|
||||
* RSA Encrypt Protocol Data Block
|
||||
@@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb {
|
||||
dma_addr_t d_dma;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* RSA Decrypt PDB - Private Key Form #2
|
||||
* @sgf : scatter-gather field
|
||||
* @g_dma : dma address of encrypted input data
|
||||
* @f_dma : dma address of output data
|
||||
* @d_dma : dma address of RSA private exponent
|
||||
* @p_dma : dma address of RSA prime factor p of RSA modulus n
|
||||
* @q_dma : dma address of RSA prime factor q of RSA modulus n
|
||||
* @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
|
||||
* as internal state buffer. It is assumed to be as long as p.
|
||||
* @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
|
||||
* as internal state buffer. It is assumed to be as long as q.
|
||||
* @p_q_len : length in bytes of first two prime factors of the RSA modulus n
|
||||
*/
|
||||
struct rsa_priv_f2_pdb {
|
||||
u32 sgf;
|
||||
dma_addr_t g_dma;
|
||||
dma_addr_t f_dma;
|
||||
dma_addr_t d_dma;
|
||||
dma_addr_t p_dma;
|
||||
dma_addr_t q_dma;
|
||||
dma_addr_t tmp1_dma;
|
||||
dma_addr_t tmp2_dma;
|
||||
u32 p_q_len;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* RSA Decrypt PDB - Private Key Form #3
|
||||
* This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
|
||||
* the RSA modulus.
|
||||
* @sgf : scatter-gather field
|
||||
* @g_dma : dma address of encrypted input data
|
||||
* @f_dma : dma address of output data
|
||||
* @c_dma : dma address of RSA CRT coefficient
|
||||
* @p_dma : dma address of RSA prime factor p of RSA modulus n
|
||||
* @q_dma : dma address of RSA prime factor q of RSA modulus n
|
||||
* @dp_dma : dma address of RSA CRT exponent of RSA prime factor p
|
||||
* @dp_dma : dma address of RSA CRT exponent of RSA prime factor q
|
||||
* @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
|
||||
* as internal state buffer. It is assumed to be as long as p.
|
||||
* @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
|
||||
* as internal state buffer. It is assumed to be as long as q.
|
||||
* @p_q_len : length in bytes of first two prime factors of the RSA modulus n
|
||||
*/
|
||||
struct rsa_priv_f3_pdb {
|
||||
u32 sgf;
|
||||
dma_addr_t g_dma;
|
||||
dma_addr_t f_dma;
|
||||
dma_addr_t c_dma;
|
||||
dma_addr_t p_dma;
|
||||
dma_addr_t q_dma;
|
||||
dma_addr_t dp_dma;
|
||||
dma_addr_t dq_dma;
|
||||
dma_addr_t tmp1_dma;
|
||||
dma_addr_t tmp2_dma;
|
||||
u32 p_q_len;
|
||||
} __packed;
|
||||
|
||||
#endif
|
||||
|
||||
@@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
|
||||
append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
|
||||
RSA_PRIV_KEY_FRM_1);
|
||||
}
|
||||
|
||||
/* Descriptor for RSA Private operation - Private Key Form #2 */
|
||||
void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
|
||||
{
|
||||
init_job_desc_pdb(desc, 0, sizeof(*pdb));
|
||||
append_cmd(desc, pdb->sgf);
|
||||
append_ptr(desc, pdb->g_dma);
|
||||
append_ptr(desc, pdb->f_dma);
|
||||
append_ptr(desc, pdb->d_dma);
|
||||
append_ptr(desc, pdb->p_dma);
|
||||
append_ptr(desc, pdb->q_dma);
|
||||
append_ptr(desc, pdb->tmp1_dma);
|
||||
append_ptr(desc, pdb->tmp2_dma);
|
||||
append_cmd(desc, pdb->p_q_len);
|
||||
append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
|
||||
RSA_PRIV_KEY_FRM_2);
|
||||
}
|
||||
|
||||
/* Descriptor for RSA Private operation - Private Key Form #3 */
|
||||
void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
|
||||
{
|
||||
init_job_desc_pdb(desc, 0, sizeof(*pdb));
|
||||
append_cmd(desc, pdb->sgf);
|
||||
append_ptr(desc, pdb->g_dma);
|
||||
append_ptr(desc, pdb->f_dma);
|
||||
append_ptr(desc, pdb->c_dma);
|
||||
append_ptr(desc, pdb->p_dma);
|
||||
append_ptr(desc, pdb->q_dma);
|
||||
append_ptr(desc, pdb->dp_dma);
|
||||
append_ptr(desc, pdb->dq_dma);
|
||||
append_ptr(desc, pdb->tmp1_dma);
|
||||
append_ptr(desc, pdb->tmp2_dma);
|
||||
append_cmd(desc, pdb->p_q_len);
|
||||
append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
|
||||
RSA_PRIV_KEY_FRM_3);
|
||||
}
|
||||
|
||||
@@ -98,7 +98,6 @@ static inline void update_output_data(struct cpt_request_info *req_info,
|
||||
}
|
||||
|
||||
static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
|
||||
u32 cipher_type, u32 aes_key_type,
|
||||
u32 *argcnt)
|
||||
{
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
@@ -124,11 +123,11 @@ static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
|
||||
req_info->req.param1 = req->nbytes; /* Encryption Data length */
|
||||
req_info->req.param2 = 0; /*Auth data length */
|
||||
|
||||
fctx->enc.enc_ctrl.e.enc_cipher = cipher_type;
|
||||
fctx->enc.enc_ctrl.e.aes_key = aes_key_type;
|
||||
fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
|
||||
fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
|
||||
fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR;
|
||||
|
||||
if (cipher_type == AES_XTS)
|
||||
if (ctx->cipher_type == AES_XTS)
|
||||
memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
|
||||
else
|
||||
memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
|
||||
@@ -154,14 +153,13 @@ static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
|
||||
}
|
||||
|
||||
static inline u32 create_input_list(struct ablkcipher_request *req, u32 enc,
|
||||
u32 cipher_type, u32 aes_key_type,
|
||||
u32 enc_iv_len)
|
||||
{
|
||||
struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
|
||||
struct cpt_request_info *req_info = &rctx->cpt_req;
|
||||
u32 argcnt = 0;
|
||||
|
||||
create_ctx_hdr(req, enc, cipher_type, aes_key_type, &argcnt);
|
||||
create_ctx_hdr(req, enc, &argcnt);
|
||||
update_input_iv(req_info, req->info, enc_iv_len, &argcnt);
|
||||
update_input_data(req_info, req->src, req->nbytes, &argcnt);
|
||||
req_info->incnt = argcnt;
|
||||
@@ -177,7 +175,6 @@ static inline void store_cb_info(struct ablkcipher_request *req,
|
||||
}
|
||||
|
||||
static inline void create_output_list(struct ablkcipher_request *req,
|
||||
u32 cipher_type,
|
||||
u32 enc_iv_len)
|
||||
{
|
||||
struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
|
||||
@@ -197,12 +194,9 @@ static inline void create_output_list(struct ablkcipher_request *req,
|
||||
req_info->outcnt = argcnt;
|
||||
}
|
||||
|
||||
static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc,
|
||||
u32 cipher_type)
|
||||
static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
|
||||
{
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
struct cvm_enc_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
u32 key_type = AES_128_BIT;
|
||||
struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
|
||||
u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
|
||||
struct fc_context *fctx = &rctx->fctx;
|
||||
@@ -210,36 +204,10 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc,
|
||||
void *cdev = NULL;
|
||||
int status;
|
||||
|
||||
switch (ctx->key_len) {
|
||||
case 16:
|
||||
key_type = AES_128_BIT;
|
||||
break;
|
||||
case 24:
|
||||
key_type = AES_192_BIT;
|
||||
break;
|
||||
case 32:
|
||||
if (cipher_type == AES_XTS)
|
||||
key_type = AES_128_BIT;
|
||||
else
|
||||
key_type = AES_256_BIT;
|
||||
break;
|
||||
case 64:
|
||||
if (cipher_type == AES_XTS)
|
||||
key_type = AES_256_BIT;
|
||||
else
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cipher_type == DES3_CBC)
|
||||
key_type = 0;
|
||||
|
||||
memset(req_info, 0, sizeof(struct cpt_request_info));
|
||||
memset(fctx, 0, sizeof(struct fc_context));
|
||||
create_input_list(req, enc, cipher_type, key_type, enc_iv_len);
|
||||
create_output_list(req, cipher_type, enc_iv_len);
|
||||
create_input_list(req, enc, enc_iv_len);
|
||||
create_output_list(req, enc_iv_len);
|
||||
store_cb_info(req, req_info);
|
||||
cdev = dev_handle.cdev[smp_processor_id()];
|
||||
status = cptvf_do_request(cdev, req_info);
|
||||
@@ -254,34 +222,14 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc,
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
int cvm_des3_encrypt_cbc(struct ablkcipher_request *req)
|
||||
int cvm_encrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
return cvm_enc_dec(req, true, DES3_CBC);
|
||||
return cvm_enc_dec(req, true);
|
||||
}
|
||||
|
||||
int cvm_des3_decrypt_cbc(struct ablkcipher_request *req)
|
||||
int cvm_decrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
return cvm_enc_dec(req, false, DES3_CBC);
|
||||
}
|
||||
|
||||
int cvm_aes_encrypt_xts(struct ablkcipher_request *req)
|
||||
{
|
||||
return cvm_enc_dec(req, true, AES_XTS);
|
||||
}
|
||||
|
||||
int cvm_aes_decrypt_xts(struct ablkcipher_request *req)
|
||||
{
|
||||
return cvm_enc_dec(req, false, AES_XTS);
|
||||
}
|
||||
|
||||
int cvm_aes_encrypt_cbc(struct ablkcipher_request *req)
|
||||
{
|
||||
return cvm_enc_dec(req, true, AES_CBC);
|
||||
}
|
||||
|
||||
int cvm_aes_decrypt_cbc(struct ablkcipher_request *req)
|
||||
{
|
||||
return cvm_enc_dec(req, false, AES_CBC);
|
||||
return cvm_enc_dec(req, false);
|
||||
}
|
||||
|
||||
int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
@@ -299,24 +247,93 @@ int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
ctx->key_len = keylen;
|
||||
memcpy(ctx->enc_key, key1, keylen / 2);
|
||||
memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
|
||||
ctx->cipher_type = AES_XTS;
|
||||
switch (ctx->key_len) {
|
||||
case 32:
|
||||
ctx->key_type = AES_128_BIT;
|
||||
break;
|
||||
case 64:
|
||||
ctx->key_type = AES_256_BIT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cvm_enc_dec_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
u32 keylen)
|
||||
static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
|
||||
{
|
||||
if ((keylen == 16) || (keylen == 24) || (keylen == 32)) {
|
||||
ctx->key_len = keylen;
|
||||
switch (ctx->key_len) {
|
||||
case 16:
|
||||
ctx->key_type = AES_128_BIT;
|
||||
break;
|
||||
case 24:
|
||||
ctx->key_type = AES_192_BIT;
|
||||
break;
|
||||
case 32:
|
||||
ctx->key_type = AES_256_BIT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctx->cipher_type == DES3_CBC)
|
||||
ctx->key_type = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int cvm_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
u32 keylen, u8 cipher_type)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
||||
struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if ((keylen == 16) || (keylen == 24) || (keylen == 32)) {
|
||||
ctx->key_len = keylen;
|
||||
ctx->cipher_type = cipher_type;
|
||||
if (!cvm_validate_keylen(ctx, keylen)) {
|
||||
memcpy(ctx->enc_key, key, keylen);
|
||||
return 0;
|
||||
} else {
|
||||
crypto_ablkcipher_set_flags(cipher,
|
||||
CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
static int cvm_cbc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
u32 keylen)
|
||||
{
|
||||
return cvm_setkey(cipher, key, keylen, AES_CBC);
|
||||
}
|
||||
|
||||
static int cvm_ecb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
u32 keylen)
|
||||
{
|
||||
return cvm_setkey(cipher, key, keylen, AES_ECB);
|
||||
}
|
||||
|
||||
static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
u32 keylen)
|
||||
{
|
||||
return cvm_setkey(cipher, key, keylen, AES_CFB);
|
||||
}
|
||||
|
||||
static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
u32 keylen)
|
||||
{
|
||||
return cvm_setkey(cipher, key, keylen, DES3_CBC);
|
||||
}
|
||||
|
||||
static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
u32 keylen)
|
||||
{
|
||||
return cvm_setkey(cipher, key, keylen, DES3_ECB);
|
||||
}
|
||||
|
||||
int cvm_enc_dec_init(struct crypto_tfm *tfm)
|
||||
@@ -349,8 +366,8 @@ struct crypto_alg algs[] = { {
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
.setkey = cvm_xts_setkey,
|
||||
.encrypt = cvm_aes_encrypt_xts,
|
||||
.decrypt = cvm_aes_decrypt_xts,
|
||||
.encrypt = cvm_encrypt,
|
||||
.decrypt = cvm_decrypt,
|
||||
},
|
||||
},
|
||||
.cra_init = cvm_enc_dec_init,
|
||||
@@ -369,9 +386,51 @@ struct crypto_alg algs[] = { {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = cvm_enc_dec_setkey,
|
||||
.encrypt = cvm_aes_encrypt_cbc,
|
||||
.decrypt = cvm_aes_decrypt_cbc,
|
||||
.setkey = cvm_cbc_aes_setkey,
|
||||
.encrypt = cvm_encrypt,
|
||||
.decrypt = cvm_decrypt,
|
||||
},
|
||||
},
|
||||
.cra_init = cvm_enc_dec_init,
|
||||
.cra_module = THIS_MODULE,
|
||||
}, {
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct cvm_enc_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_priority = 4001,
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "cavium-ecb-aes",
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = cvm_ecb_aes_setkey,
|
||||
.encrypt = cvm_encrypt,
|
||||
.decrypt = cvm_decrypt,
|
||||
},
|
||||
},
|
||||
.cra_init = cvm_enc_dec_init,
|
||||
.cra_module = THIS_MODULE,
|
||||
}, {
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct cvm_enc_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_priority = 4001,
|
||||
.cra_name = "cfb(aes)",
|
||||
.cra_driver_name = "cavium-cfb-aes",
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = cvm_cfb_aes_setkey,
|
||||
.encrypt = cvm_encrypt,
|
||||
.decrypt = cvm_decrypt,
|
||||
},
|
||||
},
|
||||
.cra_init = cvm_enc_dec_init,
|
||||
@@ -390,9 +449,30 @@ struct crypto_alg algs[] = { {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = cvm_enc_dec_setkey,
|
||||
.encrypt = cvm_des3_encrypt_cbc,
|
||||
.decrypt = cvm_des3_decrypt_cbc,
|
||||
.setkey = cvm_cbc_des3_setkey,
|
||||
.encrypt = cvm_encrypt,
|
||||
.decrypt = cvm_decrypt,
|
||||
},
|
||||
},
|
||||
.cra_init = cvm_enc_dec_init,
|
||||
.cra_module = THIS_MODULE,
|
||||
}, {
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct cvm_des3_ctx),
|
||||
.cra_alignmask = 7,
|
||||
.cra_priority = 4001,
|
||||
.cra_name = "ecb(des3_ede)",
|
||||
.cra_driver_name = "cavium-ecb-des3_ede",
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = cvm_ecb_des3_setkey,
|
||||
.encrypt = cvm_encrypt,
|
||||
.decrypt = cvm_decrypt,
|
||||
},
|
||||
},
|
||||
.cra_init = cvm_enc_dec_init,
|
||||
|
||||
@@ -77,6 +77,11 @@ union encr_ctrl {
|
||||
} e;
|
||||
};
|
||||
|
||||
struct cvm_cipher {
|
||||
const char *name;
|
||||
u8 value;
|
||||
};
|
||||
|
||||
struct enc_context {
|
||||
union encr_ctrl enc_ctrl;
|
||||
u8 encr_key[32];
|
||||
@@ -96,6 +101,8 @@ struct fc_context {
|
||||
struct cvm_enc_ctx {
|
||||
u32 key_len;
|
||||
u8 enc_key[MAX_KEY_SIZE];
|
||||
u8 cipher_type:4;
|
||||
u8 key_type:2;
|
||||
};
|
||||
|
||||
struct cvm_des3_ctx {
|
||||
|
||||
@@ -525,7 +525,7 @@ static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq)
|
||||
intr = cptvf_read_vf_misc_intr_status(cptvf);
|
||||
/*Check for MISC interrupt types*/
|
||||
if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
|
||||
dev_err(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
|
||||
dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
|
||||
intr, cptvf->vfid);
|
||||
cptvf_handle_mbox_intr(cptvf);
|
||||
cptvf_clear_mbox_intr(cptvf);
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
#
|
||||
# Cavium NITROX Crypto Device configuration
|
||||
#
|
||||
config CRYPTO_DEV_NITROX
|
||||
tristate
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_DES
|
||||
select FW_LOADER
|
||||
|
||||
config CRYPTO_DEV_NITROX_CNN55XX
|
||||
tristate "Support for Cavium CNN55XX driver"
|
||||
depends on PCI_MSI && 64BIT
|
||||
select CRYPTO_DEV_NITROX
|
||||
default m
|
||||
help
|
||||
Support for Cavium NITROX family CNN55XX driver
|
||||
for accelerating crypto workloads.
|
||||
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called n5pf.
|
||||
@@ -0,0 +1,8 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_NITROX_CNN55XX) += n5pf.o
|
||||
|
||||
n5pf-objs := nitrox_main.o \
|
||||
nitrox_isr.o \
|
||||
nitrox_lib.o \
|
||||
nitrox_hal.o \
|
||||
nitrox_reqmgr.o \
|
||||
nitrox_algs.o
|
||||
@@ -0,0 +1,457 @@
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/printk.h>
|
||||
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/des.h>
|
||||
#include <crypto/xts.h>
|
||||
|
||||
#include "nitrox_dev.h"
|
||||
#include "nitrox_common.h"
|
||||
#include "nitrox_req.h"
|
||||
|
||||
#define PRIO 4001
|
||||
|
||||
struct nitrox_cipher {
|
||||
const char *name;
|
||||
enum flexi_cipher value;
|
||||
};
|
||||
|
||||
/**
|
||||
* supported cipher list
|
||||
*/
|
||||
static const struct nitrox_cipher flexi_cipher_table[] = {
|
||||
{ "null", CIPHER_NULL },
|
||||
{ "cbc(des3_ede)", CIPHER_3DES_CBC },
|
||||
{ "ecb(des3_ede)", CIPHER_3DES_ECB },
|
||||
{ "cbc(aes)", CIPHER_AES_CBC },
|
||||
{ "ecb(aes)", CIPHER_AES_ECB },
|
||||
{ "cfb(aes)", CIPHER_AES_CFB },
|
||||
{ "rfc3686(ctr(aes))", CIPHER_AES_CTR },
|
||||
{ "xts(aes)", CIPHER_AES_XTS },
|
||||
{ "cts(cbc(aes))", CIPHER_AES_CBC_CTS },
|
||||
{ NULL, CIPHER_INVALID }
|
||||
};
|
||||
|
||||
static enum flexi_cipher flexi_cipher_type(const char *name)
|
||||
{
|
||||
const struct nitrox_cipher *cipher = flexi_cipher_table;
|
||||
|
||||
while (cipher->name) {
|
||||
if (!strcmp(cipher->name, name))
|
||||
break;
|
||||
cipher++;
|
||||
}
|
||||
return cipher->value;
|
||||
}
|
||||
|
||||
static int flexi_aes_keylen(int keylen)
|
||||
{
|
||||
int aes_keylen;
|
||||
|
||||
switch (keylen) {
|
||||
case AES_KEYSIZE_128:
|
||||
aes_keylen = 1;
|
||||
break;
|
||||
case AES_KEYSIZE_192:
|
||||
aes_keylen = 2;
|
||||
break;
|
||||
case AES_KEYSIZE_256:
|
||||
aes_keylen = 3;
|
||||
break;
|
||||
default:
|
||||
aes_keylen = -EINVAL;
|
||||
break;
|
||||
}
|
||||
return aes_keylen;
|
||||
}
|
||||
|
||||
static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
|
||||
void *fctx;
|
||||
|
||||
/* get the first device */
|
||||
nctx->ndev = nitrox_get_first_device();
|
||||
if (!nctx->ndev)
|
||||
return -ENODEV;
|
||||
|
||||
/* allocate nitrox crypto context */
|
||||
fctx = crypto_alloc_context(nctx->ndev);
|
||||
if (!fctx) {
|
||||
nitrox_put_device(nctx->ndev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
nctx->u.ctx_handle = (uintptr_t)fctx;
|
||||
crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
|
||||
sizeof(struct nitrox_kcrypt_request));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
/* free the nitrox crypto context */
|
||||
if (nctx->u.ctx_handle) {
|
||||
struct flexi_crypto_context *fctx = nctx->u.fctx;
|
||||
|
||||
memset(&fctx->crypto, 0, sizeof(struct crypto_keys));
|
||||
memset(&fctx->auth, 0, sizeof(struct auth_keys));
|
||||
crypto_free_context((void *)fctx);
|
||||
}
|
||||
nitrox_put_device(nctx->ndev);
|
||||
|
||||
nctx->u.ctx_handle = 0;
|
||||
nctx->ndev = NULL;
|
||||
}
|
||||
|
||||
static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher,
|
||||
int aes_keylen, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
|
||||
struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
|
||||
struct flexi_crypto_context *fctx;
|
||||
enum flexi_cipher cipher_type;
|
||||
const char *name;
|
||||
|
||||
name = crypto_tfm_alg_name(tfm);
|
||||
cipher_type = flexi_cipher_type(name);
|
||||
if (unlikely(cipher_type == CIPHER_INVALID)) {
|
||||
pr_err("unsupported cipher: %s\n", name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* fill crypto context */
|
||||
fctx = nctx->u.fctx;
|
||||
fctx->flags = 0;
|
||||
fctx->w0.cipher_type = cipher_type;
|
||||
fctx->w0.aes_keylen = aes_keylen;
|
||||
fctx->w0.iv_source = IV_FROM_DPTR;
|
||||
fctx->flags = cpu_to_be64(*(u64 *)&fctx->w0);
|
||||
/* copy the key to context */
|
||||
memcpy(fctx->crypto.u.key, key, keylen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
int aes_keylen;
|
||||
|
||||
aes_keylen = flexi_aes_keylen(keylen);
|
||||
if (aes_keylen < 0) {
|
||||
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
|
||||
}
|
||||
|
||||
static void nitrox_skcipher_callback(struct skcipher_request *skreq,
|
||||
int err)
|
||||
{
|
||||
if (err) {
|
||||
pr_err_ratelimited("request failed status 0x%0x\n", err);
|
||||
err = -EINVAL;
|
||||
}
|
||||
skcipher_request_complete(skreq, err);
|
||||
}
|
||||
|
||||
static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc)
|
||||
{
|
||||
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
|
||||
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher);
|
||||
struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
|
||||
int ivsize = crypto_skcipher_ivsize(cipher);
|
||||
struct se_crypto_request *creq;
|
||||
|
||||
creq = &nkreq->creq;
|
||||
creq->flags = skreq->base.flags;
|
||||
creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
|
||||
/* fill the request */
|
||||
creq->ctrl.value = 0;
|
||||
creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
|
||||
creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT);
|
||||
/* param0: length of the data to be encrypted */
|
||||
creq->gph.param0 = cpu_to_be16(skreq->cryptlen);
|
||||
creq->gph.param1 = 0;
|
||||
/* param2: encryption data offset */
|
||||
creq->gph.param2 = cpu_to_be16(ivsize);
|
||||
creq->gph.param3 = 0;
|
||||
|
||||
creq->ctx_handle = nctx->u.ctx_handle;
|
||||
creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
|
||||
|
||||
/* copy the iv */
|
||||
memcpy(creq->iv, skreq->iv, ivsize);
|
||||
creq->ivsize = ivsize;
|
||||
creq->src = skreq->src;
|
||||
creq->dst = skreq->dst;
|
||||
|
||||
nkreq->nctx = nctx;
|
||||
nkreq->skreq = skreq;
|
||||
|
||||
/* send the crypto request */
|
||||
return nitrox_process_se_request(nctx->ndev, creq,
|
||||
nitrox_skcipher_callback, skreq);
|
||||
}
|
||||
|
||||
static int nitrox_aes_encrypt(struct skcipher_request *skreq)
|
||||
{
|
||||
return nitrox_skcipher_crypt(skreq, true);
|
||||
}
|
||||
|
||||
static int nitrox_aes_decrypt(struct skcipher_request *skreq)
|
||||
{
|
||||
return nitrox_skcipher_crypt(skreq, false);
|
||||
}
|
||||
|
||||
static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
if (keylen != DES3_EDE_KEY_SIZE) {
|
||||
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return nitrox_skcipher_setkey(cipher, 0, key, keylen);
|
||||
}
|
||||
|
||||
static int nitrox_3des_encrypt(struct skcipher_request *skreq)
|
||||
{
|
||||
return nitrox_skcipher_crypt(skreq, true);
|
||||
}
|
||||
|
||||
static int nitrox_3des_decrypt(struct skcipher_request *skreq)
|
||||
{
|
||||
return nitrox_skcipher_crypt(skreq, false);
|
||||
}
|
||||
|
||||
static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
|
||||
struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
|
||||
struct flexi_crypto_context *fctx;
|
||||
int aes_keylen, ret;
|
||||
|
||||
ret = xts_check_key(tfm, key, keylen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
keylen /= 2;
|
||||
|
||||
aes_keylen = flexi_aes_keylen(keylen);
|
||||
if (aes_keylen < 0) {
|
||||
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
fctx = nctx->u.fctx;
|
||||
/* copy KEY2 */
|
||||
memcpy(fctx->auth.u.key2, (key + keylen), keylen);
|
||||
|
||||
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
|
||||
}
|
||||
|
||||
static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
|
||||
struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
|
||||
struct flexi_crypto_context *fctx;
|
||||
int aes_keylen;
|
||||
|
||||
if (keylen < CTR_RFC3686_NONCE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
fctx = nctx->u.fctx;
|
||||
|
||||
memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE),
|
||||
CTR_RFC3686_NONCE_SIZE);
|
||||
|
||||
keylen -= CTR_RFC3686_NONCE_SIZE;
|
||||
|
||||
aes_keylen = flexi_aes_keylen(keylen);
|
||||
if (aes_keylen < 0) {
|
||||
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
|
||||
}
|
||||
|
||||
static struct skcipher_alg nitrox_skciphers[] = { {
|
||||
.base = {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "n5_cbc(aes)",
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = nitrox_aes_setkey,
|
||||
.encrypt = nitrox_aes_encrypt,
|
||||
.decrypt = nitrox_aes_decrypt,
|
||||
.init = nitrox_skcipher_init,
|
||||
.exit = nitrox_skcipher_exit,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "n5_ecb(aes)",
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = nitrox_aes_setkey,
|
||||
.encrypt = nitrox_aes_encrypt,
|
||||
.decrypt = nitrox_aes_decrypt,
|
||||
.init = nitrox_skcipher_init,
|
||||
.exit = nitrox_skcipher_exit,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "cfb(aes)",
|
||||
.cra_driver_name = "n5_cfb(aes)",
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = nitrox_aes_setkey,
|
||||
.encrypt = nitrox_aes_encrypt,
|
||||
.decrypt = nitrox_aes_decrypt,
|
||||
.init = nitrox_skcipher_init,
|
||||
.exit = nitrox_skcipher_exit,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "xts(aes)",
|
||||
.cra_driver_name = "n5_xts(aes)",
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = nitrox_aes_xts_setkey,
|
||||
.encrypt = nitrox_aes_encrypt,
|
||||
.decrypt = nitrox_aes_decrypt,
|
||||
.init = nitrox_skcipher_init,
|
||||
.exit = nitrox_skcipher_exit,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "rfc3686(ctr(aes))",
|
||||
.cra_driver_name = "n5_rfc3686(ctr(aes))",
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
|
||||
.ivsize = CTR_RFC3686_IV_SIZE,
|
||||
.init = nitrox_skcipher_init,
|
||||
.exit = nitrox_skcipher_exit,
|
||||
.setkey = nitrox_aes_ctr_rfc3686_setkey,
|
||||
.encrypt = nitrox_aes_encrypt,
|
||||
.decrypt = nitrox_aes_decrypt,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "cts(cbc(aes))",
|
||||
.cra_driver_name = "n5_cts(cbc(aes))",
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = nitrox_aes_setkey,
|
||||
.encrypt = nitrox_aes_encrypt,
|
||||
.decrypt = nitrox_aes_decrypt,
|
||||
.init = nitrox_skcipher_init,
|
||||
.exit = nitrox_skcipher_exit,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "cbc(des3_ede)",
|
||||
.cra_driver_name = "n5_cbc(des3_ede)",
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = nitrox_3des_setkey,
|
||||
.encrypt = nitrox_3des_encrypt,
|
||||
.decrypt = nitrox_3des_decrypt,
|
||||
.init = nitrox_skcipher_init,
|
||||
.exit = nitrox_skcipher_exit,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "ecb(des3_ede)",
|
||||
.cra_driver_name = "n5_ecb(des3_ede)",
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = nitrox_3des_setkey,
|
||||
.encrypt = nitrox_3des_encrypt,
|
||||
.decrypt = nitrox_3des_decrypt,
|
||||
.init = nitrox_skcipher_init,
|
||||
.exit = nitrox_skcipher_exit,
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
int nitrox_crypto_register(void)
|
||||
{
|
||||
return crypto_register_skciphers(nitrox_skciphers,
|
||||
ARRAY_SIZE(nitrox_skciphers));
|
||||
}
|
||||
|
||||
void nitrox_crypto_unregister(void)
|
||||
{
|
||||
crypto_unregister_skciphers(nitrox_skciphers,
|
||||
ARRAY_SIZE(nitrox_skciphers));
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
#ifndef __NITROX_COMMON_H
|
||||
#define __NITROX_COMMON_H
|
||||
|
||||
#include "nitrox_dev.h"
|
||||
#include "nitrox_req.h"
|
||||
|
||||
int nitrox_crypto_register(void);
|
||||
void nitrox_crypto_unregister(void);
|
||||
void *crypto_alloc_context(struct nitrox_device *ndev);
|
||||
void crypto_free_context(void *ctx);
|
||||
struct nitrox_device *nitrox_get_first_device(void);
|
||||
void nitrox_put_device(struct nitrox_device *ndev);
|
||||
|
||||
void nitrox_pf_cleanup_isr(struct nitrox_device *ndev);
|
||||
int nitrox_pf_init_isr(struct nitrox_device *ndev);
|
||||
|
||||
int nitrox_common_sw_init(struct nitrox_device *ndev);
|
||||
void nitrox_common_sw_cleanup(struct nitrox_device *ndev);
|
||||
|
||||
void pkt_slc_resp_handler(unsigned long data);
|
||||
int nitrox_process_se_request(struct nitrox_device *ndev,
|
||||
struct se_crypto_request *req,
|
||||
completion_t cb,
|
||||
struct skcipher_request *skreq);
|
||||
void backlog_qflush_work(struct work_struct *work);
|
||||
|
||||
void nitrox_config_emu_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
|
||||
void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
|
||||
void nitrox_config_vfmode(struct nitrox_device *ndev, int mode);
|
||||
void nitrox_config_nps_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_pom_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_rand_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_efl_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_bmi_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_bmo_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_lbc_unit(struct nitrox_device *ndev);
|
||||
void invalidate_lbc(struct nitrox_device *ndev);
|
||||
void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
|
||||
void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
|
||||
|
||||
#endif /* __NITROX_COMMON_H */
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user