You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.9: API: - The crypto engine code now supports hashes. Algorithms: - Allow keys >= 2048 bits in FIPS mode for RSA. Drivers: - Memory overwrite fix for vmx ghash. - Add support for building ARM sha1-neon in Thumb2 mode. - Reenable ARM ghash-ce code by adding import/export. - Reenable img-hash by adding import/export. - Add support for multiple cores in omap-aes. - Add little-endian support for sha1-powerpc. - Add Cavium HWRNG driver for ThunderX SoC" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (137 commits) crypto: caam - treat SGT address pointer as u64 crypto: ccp - Make syslog errors human-readable crypto: ccp - clean up data structure crypto: vmx - Ensure ghash-generic is enabled crypto: testmgr - add guard to dst buffer for ahash_export crypto: caam - Unmap region obtained by of_iomap crypto: sha1-powerpc - little-endian support crypto: gcm - Fix IV buffer size in crypto_gcm_setkey crypto: vmx - Fix memory corruption caused by p8_ghash crypto: ghash-generic - move common definitions to a new header file crypto: caam - fix sg dump hwrng: omap - Only fail if pm_runtime_get_sync returns < 0 crypto: omap-sham - shrink the internal buffer size crypto: omap-sham - add support for export/import crypto: omap-sham - convert driver logic to use sgs for data xmit crypto: omap-sham - change the DMA threshold value to a define crypto: omap-sham - add support functions for sg based data handling crypto: omap-sham - rename sgl to sgl_tmp for deprecation crypto: omap-sham - align algorithms on word offset crypto: omap-sham - add context export/import stubs ...
This commit is contained in:
@@ -797,7 +797,8 @@ kernel crypto API | Caller
|
||||
include/linux/crypto.h and their definition can be seen below.
|
||||
The former function registers a single transformation, while
|
||||
the latter works on an array of transformation descriptions.
|
||||
The latter is useful when registering transformations in bulk.
|
||||
The latter is useful when registering transformations in bulk,
|
||||
for example when a driver implements multiple transformations.
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
@@ -822,18 +823,31 @@ kernel crypto API | Caller
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The bulk registration / unregistration functions require
|
||||
that struct crypto_alg is an array of count size. These
|
||||
functions simply loop over that array and register /
|
||||
unregister each individual algorithm. If an error occurs,
|
||||
the loop is terminated at the offending algorithm definition.
|
||||
That means, the algorithms prior to the offending algorithm
|
||||
are successfully registered. Note, the caller has no way of
|
||||
knowing which cipher implementations have successfully
|
||||
registered. If this is important to know, the caller should
|
||||
loop through the different implementations using the single
|
||||
instance *_alg functions for each individual implementation.
|
||||
The bulk registration/unregistration functions
|
||||
register/unregister each transformation in the given array of
|
||||
length count. They handle errors as follows:
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
crypto_register_algs() succeeds if and only if it
|
||||
successfully registers all the given transformations. If an
|
||||
error occurs partway through, then it rolls back successful
|
||||
registrations before returning the error code. Note that if
|
||||
a driver needs to handle registration errors for individual
|
||||
transformations, then it will need to use the non-bulk
|
||||
function crypto_register_alg() instead.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
crypto_unregister_algs() tries to unregister all the given
|
||||
transformations, continuing on error. It logs errors and
|
||||
always returns zero.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
||||
</sect1>
|
||||
|
||||
<sect1><title>Single-Block Symmetric Ciphers [CIPHER]</title>
|
||||
|
||||
@@ -138,7 +138,7 @@ static struct shash_alg ghash_alg = {
|
||||
.setkey = ghash_setkey,
|
||||
.descsize = sizeof(struct ghash_desc_ctx),
|
||||
.base = {
|
||||
.cra_name = "ghash",
|
||||
.cra_name = "__ghash",
|
||||
.cra_driver_name = "__driver-ghash-ce",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL,
|
||||
@@ -220,6 +220,27 @@ static int ghash_async_digest(struct ahash_request *req)
|
||||
}
|
||||
}
|
||||
|
||||
static int ghash_async_import(struct ahash_request *req, const void *in)
|
||||
{
|
||||
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
||||
|
||||
desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
|
||||
desc->flags = req->base.flags;
|
||||
|
||||
return crypto_shash_import(desc, in);
|
||||
}
|
||||
|
||||
static int ghash_async_export(struct ahash_request *req, void *out)
|
||||
{
|
||||
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
||||
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
||||
|
||||
return crypto_shash_export(desc, out);
|
||||
}
|
||||
|
||||
static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
@@ -268,7 +289,10 @@ static struct ahash_alg ghash_async_alg = {
|
||||
.final = ghash_async_final,
|
||||
.setkey = ghash_async_setkey,
|
||||
.digest = ghash_async_digest,
|
||||
.import = ghash_async_import,
|
||||
.export = ghash_async_export,
|
||||
.halg.digestsize = GHASH_DIGEST_SIZE,
|
||||
.halg.statesize = sizeof(struct ghash_desc_ctx),
|
||||
.halg.base = {
|
||||
.cra_name = "ghash",
|
||||
.cra_driver_name = "ghash-ce",
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
#include <asm/assembler.h>
|
||||
|
||||
.syntax unified
|
||||
.code 32
|
||||
.fpu neon
|
||||
|
||||
.text
|
||||
|
||||
@@ -7,6 +7,15 @@
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
#ifdef __BIG_ENDIAN__
|
||||
#define LWZ(rt, d, ra) \
|
||||
lwz rt,d(ra)
|
||||
#else
|
||||
#define LWZ(rt, d, ra) \
|
||||
li rt,d; \
|
||||
lwbrx rt,rt,ra
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We roll the registers for T, A, B, C, D, E around on each
|
||||
* iteration; T on iteration t is A on iteration t+1, and so on.
|
||||
@@ -23,7 +32,7 @@
|
||||
#define W(t) (((t)%16)+16)
|
||||
|
||||
#define LOADW(t) \
|
||||
lwz W(t),(t)*4(r4)
|
||||
LWZ(W(t),(t)*4,r4)
|
||||
|
||||
#define STEPD0_LOAD(t) \
|
||||
andc r0,RD(t),RB(t); \
|
||||
@@ -33,7 +42,7 @@
|
||||
add r0,RE(t),r15; \
|
||||
add RT(t),RT(t),r6; \
|
||||
add r14,r0,W(t); \
|
||||
lwz W((t)+4),((t)+4)*4(r4); \
|
||||
LWZ(W((t)+4),((t)+4)*4,r4); \
|
||||
rotlwi RB(t),RB(t),30; \
|
||||
add RT(t),RT(t),r14
|
||||
|
||||
|
||||
+61
-12
@@ -39,6 +39,37 @@ struct algif_hash_tfm {
|
||||
bool has_key;
|
||||
};
|
||||
|
||||
static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
|
||||
{
|
||||
unsigned ds;
|
||||
|
||||
if (ctx->result)
|
||||
return 0;
|
||||
|
||||
ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
|
||||
|
||||
ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
|
||||
if (!ctx->result)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(ctx->result, 0, ds);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hash_free_result(struct sock *sk, struct hash_ctx *ctx)
|
||||
{
|
||||
unsigned ds;
|
||||
|
||||
if (!ctx->result)
|
||||
return;
|
||||
|
||||
ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
|
||||
|
||||
sock_kzfree_s(sk, ctx->result, ds);
|
||||
ctx->result = NULL;
|
||||
}
|
||||
|
||||
static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
size_t ignored)
|
||||
{
|
||||
@@ -54,6 +85,9 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
|
||||
lock_sock(sk);
|
||||
if (!ctx->more) {
|
||||
if ((msg->msg_flags & MSG_MORE))
|
||||
hash_free_result(sk, ctx);
|
||||
|
||||
err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
|
||||
&ctx->completion);
|
||||
if (err)
|
||||
@@ -90,6 +124,10 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
|
||||
ctx->more = msg->msg_flags & MSG_MORE;
|
||||
if (!ctx->more) {
|
||||
err = hash_alloc_result(sk, ctx);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
|
||||
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
|
||||
&ctx->completion);
|
||||
@@ -116,6 +154,13 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
|
||||
sg_init_table(ctx->sgl.sg, 1);
|
||||
sg_set_page(ctx->sgl.sg, page, size, offset);
|
||||
|
||||
if (!(flags & MSG_MORE)) {
|
||||
err = hash_alloc_result(sk, ctx);
|
||||
if (err)
|
||||
goto unlock;
|
||||
} else if (!ctx->more)
|
||||
hash_free_result(sk, ctx);
|
||||
|
||||
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
|
||||
|
||||
if (!(flags & MSG_MORE)) {
|
||||
@@ -153,6 +198,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct hash_ctx *ctx = ask->private;
|
||||
unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
|
||||
bool result;
|
||||
int err;
|
||||
|
||||
if (len > ds)
|
||||
@@ -161,17 +207,29 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
msg->msg_flags |= MSG_TRUNC;
|
||||
|
||||
lock_sock(sk);
|
||||
result = ctx->result;
|
||||
err = hash_alloc_result(sk, ctx);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
|
||||
|
||||
if (ctx->more) {
|
||||
ctx->more = 0;
|
||||
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
|
||||
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
|
||||
&ctx->completion);
|
||||
if (err)
|
||||
goto unlock;
|
||||
} else if (!result) {
|
||||
err = af_alg_wait_for_completion(
|
||||
crypto_ahash_digest(&ctx->req),
|
||||
&ctx->completion);
|
||||
}
|
||||
|
||||
err = memcpy_to_msg(msg, ctx->result, len);
|
||||
|
||||
hash_free_result(sk, ctx);
|
||||
|
||||
unlock:
|
||||
release_sock(sk);
|
||||
|
||||
@@ -394,8 +452,7 @@ static void hash_sock_destruct(struct sock *sk)
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct hash_ctx *ctx = ask->private;
|
||||
|
||||
sock_kzfree_s(sk, ctx->result,
|
||||
crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
|
||||
hash_free_result(sk, ctx);
|
||||
sock_kfree_s(sk, ctx, ctx->len);
|
||||
af_alg_release_parent(sk);
|
||||
}
|
||||
@@ -407,20 +464,12 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
|
||||
struct algif_hash_tfm *tfm = private;
|
||||
struct crypto_ahash *hash = tfm->hash;
|
||||
unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
|
||||
unsigned ds = crypto_ahash_digestsize(hash);
|
||||
|
||||
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
|
||||
if (!ctx->result) {
|
||||
sock_kfree_s(sk, ctx, len);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(ctx->result, 0, ds);
|
||||
|
||||
ctx->result = NULL;
|
||||
ctx->len = len;
|
||||
ctx->more = 0;
|
||||
af_alg_init_completion(&ctx->completion);
|
||||
|
||||
@@ -107,10 +107,7 @@ static struct shash_alg alg = {
|
||||
|
||||
static int __init crct10dif_mod_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = crypto_register_shash(&alg);
|
||||
return ret;
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
static void __exit crct10dif_mod_fini(void)
|
||||
|
||||
+150
-39
@@ -14,13 +14,12 @@
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/delay.h>
|
||||
#include <crypto/engine.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include "internal.h"
|
||||
|
||||
#define CRYPTO_ENGINE_MAX_QLEN 10
|
||||
|
||||
void crypto_finalize_request(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req, int err);
|
||||
|
||||
/**
|
||||
* crypto_pump_requests - dequeue one request from engine queue to process
|
||||
* @engine: the hardware engine
|
||||
@@ -34,10 +33,11 @@ static void crypto_pump_requests(struct crypto_engine *engine,
|
||||
bool in_kthread)
|
||||
{
|
||||
struct crypto_async_request *async_req, *backlog;
|
||||
struct ablkcipher_request *req;
|
||||
struct ahash_request *hreq;
|
||||
struct ablkcipher_request *breq;
|
||||
unsigned long flags;
|
||||
bool was_busy = false;
|
||||
int ret;
|
||||
int ret, rtype;
|
||||
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
|
||||
@@ -82,9 +82,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
|
||||
if (!async_req)
|
||||
goto out;
|
||||
|
||||
req = ablkcipher_request_cast(async_req);
|
||||
|
||||
engine->cur_req = req;
|
||||
engine->cur_req = async_req;
|
||||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
||||
@@ -95,6 +93,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
|
||||
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
|
||||
rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
|
||||
/* Until here we get the request need to be encrypted successfully */
|
||||
if (!was_busy && engine->prepare_crypt_hardware) {
|
||||
ret = engine->prepare_crypt_hardware(engine);
|
||||
@@ -104,24 +103,55 @@ static void crypto_pump_requests(struct crypto_engine *engine,
|
||||
}
|
||||
}
|
||||
|
||||
if (engine->prepare_request) {
|
||||
ret = engine->prepare_request(engine, engine->cur_req);
|
||||
switch (rtype) {
|
||||
case CRYPTO_ALG_TYPE_AHASH:
|
||||
hreq = ahash_request_cast(engine->cur_req);
|
||||
if (engine->prepare_hash_request) {
|
||||
ret = engine->prepare_hash_request(engine, hreq);
|
||||
if (ret) {
|
||||
pr_err("failed to prepare request: %d\n", ret);
|
||||
goto req_err;
|
||||
}
|
||||
engine->cur_req_prepared = true;
|
||||
}
|
||||
ret = engine->hash_one_request(engine, hreq);
|
||||
if (ret) {
|
||||
pr_err("failed to prepare request: %d\n", ret);
|
||||
pr_err("failed to hash one request from queue\n");
|
||||
goto req_err;
|
||||
}
|
||||
engine->cur_req_prepared = true;
|
||||
return;
|
||||
case CRYPTO_ALG_TYPE_ABLKCIPHER:
|
||||
breq = ablkcipher_request_cast(engine->cur_req);
|
||||
if (engine->prepare_cipher_request) {
|
||||
ret = engine->prepare_cipher_request(engine, breq);
|
||||
if (ret) {
|
||||
pr_err("failed to prepare request: %d\n", ret);
|
||||
goto req_err;
|
||||
}
|
||||
engine->cur_req_prepared = true;
|
||||
}
|
||||
ret = engine->cipher_one_request(engine, breq);
|
||||
if (ret) {
|
||||
pr_err("failed to cipher one request from queue\n");
|
||||
goto req_err;
|
||||
}
|
||||
return;
|
||||
default:
|
||||
pr_err("failed to prepare request of unknown type\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ret = engine->crypt_one_request(engine, engine->cur_req);
|
||||
if (ret) {
|
||||
pr_err("failed to crypt one request from queue\n");
|
||||
goto req_err;
|
||||
}
|
||||
return;
|
||||
|
||||
req_err:
|
||||
crypto_finalize_request(engine, engine->cur_req, ret);
|
||||
switch (rtype) {
|
||||
case CRYPTO_ALG_TYPE_AHASH:
|
||||
hreq = ahash_request_cast(engine->cur_req);
|
||||
crypto_finalize_hash_request(engine, hreq, ret);
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_ABLKCIPHER:
|
||||
breq = ablkcipher_request_cast(engine->cur_req);
|
||||
crypto_finalize_cipher_request(engine, breq, ret);
|
||||
break;
|
||||
}
|
||||
return;
|
||||
|
||||
out:
|
||||
@@ -137,12 +167,14 @@ static void crypto_pump_work(struct kthread_work *work)
|
||||
}
|
||||
|
||||
/**
|
||||
* crypto_transfer_request - transfer the new request into the engine queue
|
||||
* crypto_transfer_cipher_request - transfer the new request into the
|
||||
* enginequeue
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be listed into the engine queue
|
||||
*/
|
||||
int crypto_transfer_request(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req, bool need_pump)
|
||||
int crypto_transfer_cipher_request(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req,
|
||||
bool need_pump)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
@@ -162,46 +194,88 @@ int crypto_transfer_request(struct crypto_engine *engine,
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_request);
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
|
||||
|
||||
/**
|
||||
* crypto_transfer_request_to_engine - transfer one request to list into the
|
||||
* engine queue
|
||||
* crypto_transfer_cipher_request_to_engine - transfer one request to list
|
||||
* into the engine queue
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be listed into the engine queue
|
||||
*/
|
||||
int crypto_transfer_request_to_engine(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req)
|
||||
int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req)
|
||||
{
|
||||
return crypto_transfer_request(engine, req, true);
|
||||
return crypto_transfer_cipher_request(engine, req, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
|
||||
|
||||
/**
|
||||
* crypto_finalize_request - finalize one request if the request is done
|
||||
* crypto_transfer_hash_request - transfer the new request into the
|
||||
* enginequeue
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be listed into the engine queue
|
||||
*/
|
||||
int crypto_transfer_hash_request(struct crypto_engine *engine,
|
||||
struct ahash_request *req, bool need_pump)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
|
||||
if (!engine->running) {
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
return -ESHUTDOWN;
|
||||
}
|
||||
|
||||
ret = ahash_enqueue_request(&engine->queue, req);
|
||||
|
||||
if (!engine->busy && need_pump)
|
||||
queue_kthread_work(&engine->kworker, &engine->pump_requests);
|
||||
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
|
||||
|
||||
/**
|
||||
* crypto_transfer_hash_request_to_engine - transfer one request to list
|
||||
* into the engine queue
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be listed into the engine queue
|
||||
*/
|
||||
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
|
||||
struct ahash_request *req)
|
||||
{
|
||||
return crypto_transfer_hash_request(engine, req, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
|
||||
|
||||
/**
|
||||
* crypto_finalize_cipher_request - finalize one request if the request is done
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be finalized
|
||||
* @err: error number
|
||||
*/
|
||||
void crypto_finalize_request(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req, int err)
|
||||
void crypto_finalize_cipher_request(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req, int err)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool finalize_cur_req = false;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
if (engine->cur_req == req)
|
||||
if (engine->cur_req == &req->base)
|
||||
finalize_cur_req = true;
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
|
||||
if (finalize_cur_req) {
|
||||
if (engine->cur_req_prepared && engine->unprepare_request) {
|
||||
ret = engine->unprepare_request(engine, req);
|
||||
if (engine->cur_req_prepared &&
|
||||
engine->unprepare_cipher_request) {
|
||||
ret = engine->unprepare_cipher_request(engine, req);
|
||||
if (ret)
|
||||
pr_err("failed to unprepare request\n");
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
engine->cur_req = NULL;
|
||||
engine->cur_req_prepared = false;
|
||||
@@ -212,7 +286,44 @@ void crypto_finalize_request(struct crypto_engine *engine,
|
||||
|
||||
queue_kthread_work(&engine->kworker, &engine->pump_requests);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_request);
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
|
||||
|
||||
/**
|
||||
* crypto_finalize_hash_request - finalize one request if the request is done
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be finalized
|
||||
* @err: error number
|
||||
*/
|
||||
void crypto_finalize_hash_request(struct crypto_engine *engine,
|
||||
struct ahash_request *req, int err)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool finalize_cur_req = false;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
if (engine->cur_req == &req->base)
|
||||
finalize_cur_req = true;
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
|
||||
if (finalize_cur_req) {
|
||||
if (engine->cur_req_prepared &&
|
||||
engine->unprepare_hash_request) {
|
||||
ret = engine->unprepare_hash_request(engine, req);
|
||||
if (ret)
|
||||
pr_err("failed to unprepare request\n");
|
||||
}
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
engine->cur_req = NULL;
|
||||
engine->cur_req_prepared = false;
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
}
|
||||
|
||||
req->base.complete(&req->base, err);
|
||||
|
||||
queue_kthread_work(&engine->kworker, &engine->pump_requests);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
|
||||
|
||||
/**
|
||||
* crypto_engine_start - start the hardware engine
|
||||
@@ -249,7 +360,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_start);
|
||||
int crypto_engine_stop(struct crypto_engine *engine)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned limit = 500;
|
||||
unsigned int limit = 500;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
|
||||
+15
-16
@@ -1178,12 +1178,16 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
|
||||
goto err;
|
||||
|
||||
drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
|
||||
if (!drbg->Vbuf)
|
||||
if (!drbg->Vbuf) {
|
||||
ret = -ENOMEM;
|
||||
goto fini;
|
||||
}
|
||||
drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1);
|
||||
drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
|
||||
if (!drbg->Cbuf)
|
||||
if (!drbg->Cbuf) {
|
||||
ret = -ENOMEM;
|
||||
goto fini;
|
||||
}
|
||||
drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1);
|
||||
/* scratchpad is only generated for CTR and Hash */
|
||||
if (drbg->core->flags & DRBG_HMAC)
|
||||
@@ -1199,8 +1203,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
|
||||
|
||||
if (0 < sb_size) {
|
||||
drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL);
|
||||
if (!drbg->scratchpadbuf)
|
||||
if (!drbg->scratchpadbuf) {
|
||||
ret = -ENOMEM;
|
||||
goto fini;
|
||||
}
|
||||
drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
|
||||
}
|
||||
|
||||
@@ -1917,6 +1923,8 @@ static inline int __init drbg_healthcheck_sanity(void)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&drbg->drbg_mutex);
|
||||
drbg->core = &drbg_cores[coreref];
|
||||
drbg->reseed_threshold = drbg_max_requests(drbg);
|
||||
|
||||
/*
|
||||
* if the following tests fail, it is likely that there is a buffer
|
||||
@@ -1926,12 +1934,6 @@ static inline int __init drbg_healthcheck_sanity(void)
|
||||
* grave bug.
|
||||
*/
|
||||
|
||||
/* get a valid instance of DRBG for following tests */
|
||||
ret = drbg_instantiate(drbg, NULL, coreref, pr);
|
||||
if (ret) {
|
||||
rc = ret;
|
||||
goto outbuf;
|
||||
}
|
||||
max_addtllen = drbg_max_addtl(drbg);
|
||||
max_request_bytes = drbg_max_request_bytes(drbg);
|
||||
drbg_string_fill(&addtl, buf, max_addtllen + 1);
|
||||
@@ -1941,10 +1943,9 @@ static inline int __init drbg_healthcheck_sanity(void)
|
||||
/* overflow max_bits */
|
||||
len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
|
||||
BUG_ON(0 < len);
|
||||
drbg_uninstantiate(drbg);
|
||||
|
||||
/* overflow max addtllen with personalization string */
|
||||
ret = drbg_instantiate(drbg, &addtl, coreref, pr);
|
||||
ret = drbg_seed(drbg, &addtl, false);
|
||||
BUG_ON(0 == ret);
|
||||
/* all tests passed */
|
||||
rc = 0;
|
||||
@@ -1952,9 +1953,7 @@ static inline int __init drbg_healthcheck_sanity(void)
|
||||
pr_devel("DRBG: Sanity tests for failure code paths successfully "
|
||||
"completed\n");
|
||||
|
||||
drbg_uninstantiate(drbg);
|
||||
outbuf:
|
||||
kzfree(drbg);
|
||||
kfree(drbg);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -2006,7 +2005,7 @@ static int __init drbg_init(void)
|
||||
{
|
||||
unsigned int i = 0; /* pointer to drbg_algs */
|
||||
unsigned int j = 0; /* pointer to drbg_cores */
|
||||
int ret = -EFAULT;
|
||||
int ret;
|
||||
|
||||
ret = drbg_healthcheck_sanity();
|
||||
if (ret)
|
||||
@@ -2016,7 +2015,7 @@ static int __init drbg_init(void)
|
||||
pr_info("DRBG: Cannot register all DRBG types"
|
||||
"(slots needed: %zu, slots available: %zu)\n",
|
||||
ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs));
|
||||
return ret;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
+1
-1
@@ -117,7 +117,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
struct crypto_skcipher *ctr = ctx->ctr;
|
||||
struct {
|
||||
be128 hash;
|
||||
u8 iv[8];
|
||||
u8 iv[16];
|
||||
|
||||
struct crypto_gcm_setkey_result result;
|
||||
|
||||
|
||||
+1
-12
@@ -14,24 +14,13 @@
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/gf128mul.h>
|
||||
#include <crypto/ghash.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#define GHASH_BLOCK_SIZE 16
|
||||
#define GHASH_DIGEST_SIZE 16
|
||||
|
||||
struct ghash_ctx {
|
||||
struct gf128mul_4k *gf128;
|
||||
};
|
||||
|
||||
struct ghash_desc_ctx {
|
||||
u8 buffer[GHASH_BLOCK_SIZE];
|
||||
u32 bytes;
|
||||
};
|
||||
|
||||
static int ghash_init(struct shash_desc *desc)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
+1
-6
@@ -612,12 +612,7 @@ EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
|
||||
|
||||
int ahash_mcryptd_digest(struct ahash_request *desc)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = crypto_ahash_init(desc) ?:
|
||||
ahash_mcryptd_finup(desc);
|
||||
|
||||
return err;
|
||||
return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
|
||||
}
|
||||
|
||||
int ahash_mcryptd_update(struct ahash_request *desc)
|
||||
|
||||
+2
-2
@@ -35,8 +35,8 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
|
||||
n_sz--;
|
||||
}
|
||||
|
||||
/* In FIPS mode only allow key size 2K & 3K */
|
||||
if (n_sz != 256 && n_sz != 384) {
|
||||
/* In FIPS mode only allow key size 2K and higher */
|
||||
if (n_sz < 256) {
|
||||
pr_err("RSA: key size not allowed in FIPS mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
+18
-6
@@ -209,16 +209,19 @@ static int ahash_partial_update(struct ahash_request **preq,
|
||||
char *state;
|
||||
struct ahash_request *req;
|
||||
int statesize, ret = -EINVAL;
|
||||
const char guard[] = { 0x00, 0xba, 0xad, 0x00 };
|
||||
|
||||
req = *preq;
|
||||
statesize = crypto_ahash_statesize(
|
||||
crypto_ahash_reqtfm(req));
|
||||
state = kmalloc(statesize, GFP_KERNEL);
|
||||
state = kmalloc(statesize + sizeof(guard), GFP_KERNEL);
|
||||
if (!state) {
|
||||
pr_err("alt: hash: Failed to alloc state for %s\n", algo);
|
||||
goto out_nostate;
|
||||
}
|
||||
memcpy(state + statesize, guard, sizeof(guard));
|
||||
ret = crypto_ahash_export(req, state);
|
||||
WARN_ON(memcmp(state + statesize, guard, sizeof(guard)));
|
||||
if (ret) {
|
||||
pr_err("alt: hash: Failed to export() for %s\n", algo);
|
||||
goto out;
|
||||
@@ -665,7 +668,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
memcpy(key, template[i].key, template[i].klen);
|
||||
|
||||
ret = crypto_aead_setkey(tfm, key, template[i].klen);
|
||||
if (!ret == template[i].fail) {
|
||||
if (template[i].fail == !ret) {
|
||||
pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n",
|
||||
d, j, algo, crypto_aead_get_flags(tfm));
|
||||
goto out;
|
||||
@@ -770,7 +773,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
memcpy(key, template[i].key, template[i].klen);
|
||||
|
||||
ret = crypto_aead_setkey(tfm, key, template[i].klen);
|
||||
if (!ret == template[i].fail) {
|
||||
if (template[i].fail == !ret) {
|
||||
pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n",
|
||||
d, j, algo, crypto_aead_get_flags(tfm));
|
||||
goto out;
|
||||
@@ -1008,6 +1011,9 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
|
||||
if (template[i].np)
|
||||
continue;
|
||||
|
||||
if (fips_enabled && template[i].fips_skip)
|
||||
continue;
|
||||
|
||||
j++;
|
||||
|
||||
ret = -EINVAL;
|
||||
@@ -1023,7 +1029,7 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
|
||||
|
||||
ret = crypto_cipher_setkey(tfm, template[i].key,
|
||||
template[i].klen);
|
||||
if (!ret == template[i].fail) {
|
||||
if (template[i].fail == !ret) {
|
||||
printk(KERN_ERR "alg: cipher: setkey failed "
|
||||
"on test %d for %s: flags=%x\n", j,
|
||||
algo, crypto_cipher_get_flags(tfm));
|
||||
@@ -1112,6 +1118,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
|
||||
if (template[i].np && !template[i].also_non_np)
|
||||
continue;
|
||||
|
||||
if (fips_enabled && template[i].fips_skip)
|
||||
continue;
|
||||
|
||||
if (template[i].iv)
|
||||
memcpy(iv, template[i].iv, ivsize);
|
||||
else
|
||||
@@ -1133,7 +1142,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
|
||||
|
||||
ret = crypto_skcipher_setkey(tfm, template[i].key,
|
||||
template[i].klen);
|
||||
if (!ret == template[i].fail) {
|
||||
if (template[i].fail == !ret) {
|
||||
pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
|
||||
d, j, algo, crypto_skcipher_get_flags(tfm));
|
||||
goto out;
|
||||
@@ -1198,6 +1207,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
|
||||
if (!template[i].np)
|
||||
continue;
|
||||
|
||||
if (fips_enabled && template[i].fips_skip)
|
||||
continue;
|
||||
|
||||
if (template[i].iv)
|
||||
memcpy(iv, template[i].iv, ivsize);
|
||||
else
|
||||
@@ -1211,7 +1223,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
|
||||
|
||||
ret = crypto_skcipher_setkey(tfm, template[i].key,
|
||||
template[i].klen);
|
||||
if (!ret == template[i].fail) {
|
||||
if (template[i].fail == !ret) {
|
||||
pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
|
||||
d, j, algo, crypto_skcipher_get_flags(tfm));
|
||||
goto out;
|
||||
|
||||
@@ -59,6 +59,7 @@ struct hash_testvec {
|
||||
* @tap: How to distribute data in @np SGs
|
||||
* @also_non_np: if set to 1, the test will be also done without
|
||||
* splitting data in @np SGs
|
||||
* @fips_skip: Skip the test vector in FIPS mode
|
||||
*/
|
||||
|
||||
struct cipher_testvec {
|
||||
@@ -75,6 +76,7 @@ struct cipher_testvec {
|
||||
unsigned char klen;
|
||||
unsigned short ilen;
|
||||
unsigned short rlen;
|
||||
bool fips_skip;
|
||||
};
|
||||
|
||||
struct aead_testvec {
|
||||
@@ -18224,6 +18226,7 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = {
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.klen = 32,
|
||||
.fips_skip = 1,
|
||||
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.input = "\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
@@ -18566,6 +18569,7 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.klen = 32,
|
||||
.fips_skip = 1,
|
||||
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.input = "\x91\x7c\xf6\x9e\xbd\x68\xb2\xec"
|
||||
|
||||
+20
-21
@@ -24,6 +24,10 @@
|
||||
#include <linux/preempt.h>
|
||||
#include <asm/xor.h>
|
||||
|
||||
#ifndef XOR_SELECT_TEMPLATE
|
||||
#define XOR_SELECT_TEMPLATE(x) (x)
|
||||
#endif
|
||||
|
||||
/* The xor routines to use. */
|
||||
static struct xor_block_template *active_template;
|
||||
|
||||
@@ -109,6 +113,15 @@ calibrate_xor_blocks(void)
|
||||
void *b1, *b2;
|
||||
struct xor_block_template *f, *fastest;
|
||||
|
||||
fastest = XOR_SELECT_TEMPLATE(NULL);
|
||||
|
||||
if (fastest) {
|
||||
printk(KERN_INFO "xor: automatically using best "
|
||||
"checksumming function %-10s\n",
|
||||
fastest->name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: Since the memory is not actually used for _anything_ but to
|
||||
* test the XOR speed, we don't really want kmemcheck to warn about
|
||||
@@ -126,36 +139,22 @@ calibrate_xor_blocks(void)
|
||||
* all the possible functions, just test the best one
|
||||
*/
|
||||
|
||||
fastest = NULL;
|
||||
|
||||
#ifdef XOR_SELECT_TEMPLATE
|
||||
fastest = XOR_SELECT_TEMPLATE(fastest);
|
||||
#endif
|
||||
|
||||
#define xor_speed(templ) do_xor_speed((templ), b1, b2)
|
||||
|
||||
if (fastest) {
|
||||
printk(KERN_INFO "xor: automatically using best "
|
||||
"checksumming function:\n");
|
||||
xor_speed(fastest);
|
||||
goto out;
|
||||
} else {
|
||||
printk(KERN_INFO "xor: measuring software checksum speed\n");
|
||||
XOR_TRY_TEMPLATES;
|
||||
fastest = template_list;
|
||||
for (f = fastest; f; f = f->next)
|
||||
if (f->speed > fastest->speed)
|
||||
fastest = f;
|
||||
}
|
||||
printk(KERN_INFO "xor: measuring software checksum speed\n");
|
||||
XOR_TRY_TEMPLATES;
|
||||
fastest = template_list;
|
||||
for (f = fastest; f; f = f->next)
|
||||
if (f->speed > fastest->speed)
|
||||
fastest = f;
|
||||
|
||||
printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n",
|
||||
fastest->name, fastest->speed / 1000, fastest->speed % 1000);
|
||||
|
||||
#undef xor_speed
|
||||
|
||||
out:
|
||||
free_pages((unsigned long)b1, 2);
|
||||
|
||||
out:
|
||||
active_template = fastest;
|
||||
return 0;
|
||||
}
|
||||
|
||||
+1
-1
@@ -5,7 +5,7 @@
|
||||
*
|
||||
* Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
|
||||
*
|
||||
* Based om ecb.c
|
||||
* Based on ecb.c
|
||||
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
|
||||
@@ -410,6 +410,19 @@ config HW_RANDOM_MESON
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config HW_RANDOM_CAVIUM
|
||||
tristate "Cavium ThunderX Random Number Generator support"
|
||||
depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
|
||||
default HW_RANDOM
|
||||
---help---
|
||||
This driver provides kernel-side support for the Random Number
|
||||
Generator hardware found on Cavium SoCs.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called cavium_rng.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
endif # HW_RANDOM
|
||||
|
||||
config UML_RANDOM
|
||||
|
||||
@@ -35,3 +35,4 @@ obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
|
||||
|
||||
@@ -24,16 +24,18 @@
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#define DRV_NAME "AMD768-HWRNG"
|
||||
|
||||
#define PFX KBUILD_MODNAME ": "
|
||||
|
||||
#define RNGDATA 0x00
|
||||
#define RNGDONE 0x04
|
||||
#define PMBASE_OFFSET 0xF0
|
||||
#define PMBASE_SIZE 8
|
||||
|
||||
/*
|
||||
* Data for PCI driver interface
|
||||
@@ -50,72 +52,84 @@ static const struct pci_device_id pci_tbl[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, pci_tbl);
|
||||
|
||||
static struct pci_dev *amd_pdev;
|
||||
struct amd768_priv {
|
||||
void __iomem *iobase;
|
||||
struct pci_dev *pcidev;
|
||||
};
|
||||
|
||||
|
||||
static int amd_rng_data_present(struct hwrng *rng, int wait)
|
||||
static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
|
||||
{
|
||||
u32 pmbase = (u32)rng->priv;
|
||||
int data, i;
|
||||
u32 *data = buf;
|
||||
struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
|
||||
size_t read = 0;
|
||||
/* We will wait at maximum one time per read */
|
||||
int timeout = max / 4 + 1;
|
||||
|
||||
for (i = 0; i < 20; i++) {
|
||||
data = !!(inl(pmbase + 0xF4) & 1);
|
||||
if (data || !wait)
|
||||
break;
|
||||
udelay(10);
|
||||
/*
|
||||
* RNG data is available when RNGDONE is set to 1
|
||||
* New random numbers are generated approximately 128 microseconds
|
||||
* after RNGDATA is read
|
||||
*/
|
||||
while (read < max) {
|
||||
if (ioread32(priv->iobase + RNGDONE) == 0) {
|
||||
if (wait) {
|
||||
/* Delay given by datasheet */
|
||||
usleep_range(128, 196);
|
||||
if (timeout-- == 0)
|
||||
return read;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
*data = ioread32(priv->iobase + RNGDATA);
|
||||
data++;
|
||||
read += 4;
|
||||
}
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
static int amd_rng_data_read(struct hwrng *rng, u32 *data)
|
||||
{
|
||||
u32 pmbase = (u32)rng->priv;
|
||||
|
||||
*data = inl(pmbase + 0xF0);
|
||||
|
||||
return 4;
|
||||
return read;
|
||||
}
|
||||
|
||||
static int amd_rng_init(struct hwrng *rng)
|
||||
{
|
||||
struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
|
||||
u8 rnen;
|
||||
|
||||
pci_read_config_byte(amd_pdev, 0x40, &rnen);
|
||||
rnen |= (1 << 7); /* RNG on */
|
||||
pci_write_config_byte(amd_pdev, 0x40, rnen);
|
||||
pci_read_config_byte(priv->pcidev, 0x40, &rnen);
|
||||
rnen |= BIT(7); /* RNG on */
|
||||
pci_write_config_byte(priv->pcidev, 0x40, rnen);
|
||||
|
||||
pci_read_config_byte(amd_pdev, 0x41, &rnen);
|
||||
rnen |= (1 << 7); /* PMIO enable */
|
||||
pci_write_config_byte(amd_pdev, 0x41, rnen);
|
||||
pci_read_config_byte(priv->pcidev, 0x41, &rnen);
|
||||
rnen |= BIT(7); /* PMIO enable */
|
||||
pci_write_config_byte(priv->pcidev, 0x41, rnen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amd_rng_cleanup(struct hwrng *rng)
|
||||
{
|
||||
struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
|
||||
u8 rnen;
|
||||
|
||||
pci_read_config_byte(amd_pdev, 0x40, &rnen);
|
||||
rnen &= ~(1 << 7); /* RNG off */
|
||||
pci_write_config_byte(amd_pdev, 0x40, rnen);
|
||||
pci_read_config_byte(priv->pcidev, 0x40, &rnen);
|
||||
rnen &= ~BIT(7); /* RNG off */
|
||||
pci_write_config_byte(priv->pcidev, 0x40, rnen);
|
||||
}
|
||||
|
||||
|
||||
static struct hwrng amd_rng = {
|
||||
.name = "amd",
|
||||
.init = amd_rng_init,
|
||||
.cleanup = amd_rng_cleanup,
|
||||
.data_present = amd_rng_data_present,
|
||||
.data_read = amd_rng_data_read,
|
||||
.read = amd_rng_read,
|
||||
};
|
||||
|
||||
|
||||
static int __init mod_init(void)
|
||||
{
|
||||
int err = -ENODEV;
|
||||
struct pci_dev *pdev = NULL;
|
||||
const struct pci_device_id *ent;
|
||||
u32 pmbase;
|
||||
struct amd768_priv *priv;
|
||||
|
||||
for_each_pci_dev(pdev) {
|
||||
ent = pci_match_id(pci_tbl, pdev);
|
||||
@@ -123,42 +137,44 @@ static int __init mod_init(void)
|
||||
goto found;
|
||||
}
|
||||
/* Device not found. */
|
||||
goto out;
|
||||
return -ENODEV;
|
||||
|
||||
found:
|
||||
err = pci_read_config_dword(pdev, 0x58, &pmbase);
|
||||
if (err)
|
||||
goto out;
|
||||
err = -EIO;
|
||||
return err;
|
||||
|
||||
pmbase &= 0x0000FF00;
|
||||
if (pmbase == 0)
|
||||
goto out;
|
||||
if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) {
|
||||
dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n",
|
||||
pmbase + 0xF0);
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
amd_rng.priv = (unsigned long)pmbase;
|
||||
amd_pdev = pdev;
|
||||
return -EIO;
|
||||
|
||||
pr_info("AMD768 RNG detected\n");
|
||||
err = hwrng_register(&amd_rng);
|
||||
if (err) {
|
||||
pr_err(PFX "RNG registering failed (%d)\n",
|
||||
err);
|
||||
release_region(pmbase + 0xF0, 8);
|
||||
goto out;
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET,
|
||||
PMBASE_SIZE, DRV_NAME)) {
|
||||
dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
|
||||
pmbase + 0xF0);
|
||||
return -EBUSY;
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
|
||||
priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET,
|
||||
PMBASE_SIZE);
|
||||
if (!priv->iobase) {
|
||||
pr_err(DRV_NAME "Cannot map ioport\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
amd_rng.priv = (unsigned long)priv;
|
||||
priv->pcidev = pdev;
|
||||
|
||||
pr_info(DRV_NAME " detected\n");
|
||||
return devm_hwrng_register(&pdev->dev, &amd_rng);
|
||||
}
|
||||
|
||||
static void __exit mod_exit(void)
|
||||
{
|
||||
u32 pmbase = (unsigned long)amd_rng.priv;
|
||||
release_region(pmbase + 0xF0, 8);
|
||||
hwrng_unregister(&amd_rng);
|
||||
}
|
||||
|
||||
module_init(mod_init);
|
||||
|
||||
@@ -92,9 +92,10 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
|
||||
bcm2835_rng_ops.priv = (unsigned long)rng_base;
|
||||
|
||||
rng_id = of_match_node(bcm2835_rng_of_match, np);
|
||||
if (!rng_id)
|
||||
if (!rng_id) {
|
||||
iounmap(rng_base);
|
||||
return -EINVAL;
|
||||
|
||||
}
|
||||
/* Check for rng init function, execute it */
|
||||
rng_setup = rng_id->data;
|
||||
if (rng_setup)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user