You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (48 commits) [NETFILTER]: Fix non-ANSI func. decl. [TG3]: Identify Serdes devices more clearly. [TG3]: Use msleep. [TG3]: Use netif_msg_*. [TG3]: Allow partial speed advertisement. [TG3]: Add TG3_FLG2_IS_NIC flag. [TG3]: Add 5787F device ID. [TG3]: Fix Phy loopback. [WANROUTER]: Kill kmalloc debugging code. [TCP] inet_twdr_hangman: Delete unnecessary memory barrier(). [NET]: Memory barrier cleanups [IPSEC]: Fix inetpeer leak in ipv4 xfrm dst entries. audit: disable ipsec auditing when CONFIG_AUDITSYSCALL=n audit: Add auditing to ipsec [IRDA] irlan: Fix compile warning when CONFIG_PROC_FS=n [IrDA]: Incorrect TTP header reservation [IrDA]: PXA FIR code device model conversion [GENETLINK]: Fix misplaced command flags. [NETLIK]: Add a pointer to the Generic Netlink wiki page. [IPV6] RAW: Don't release unlocked sock. ...
This commit is contained in:
@@ -58,6 +58,8 @@ fore200e.txt
|
||||
- FORE Systems PCA-200E/SBA-200E ATM NIC driver info.
|
||||
framerelay.txt
|
||||
- info on using Frame Relay/Data Link Connection Identifier (DLCI).
|
||||
generic_netlink.txt
|
||||
- info on Generic Netlink
|
||||
ip-sysctl.txt
|
||||
- /proc/sys/net/ipv4/* variables
|
||||
ip_dynaddr.txt
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
A wiki document on how to use Generic Netlink can be found here:
|
||||
|
||||
* http://linux-net.osdl.org/index.php/Generic_Netlink_HOWTO
|
||||
@@ -39,6 +39,17 @@ config CRYPTO_HMAC
|
||||
HMAC: Keyed-Hashing for Message Authentication (RFC2104).
|
||||
This is required for IPSec.
|
||||
|
||||
config CRYPTO_XCBC
|
||||
tristate "XCBC support"
|
||||
depends on EXPERIMENTAL
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
XCBC: Keyed-Hashing with encryption algorithm
|
||||
http://www.ietf.org/rfc/rfc3566.txt
|
||||
http://csrc.nist.gov/encryption/modes/proposedmodes/
|
||||
xcbc-mac/xcbc-mac-spec.pdf
|
||||
|
||||
config CRYPTO_NULL
|
||||
tristate "Null algorithms"
|
||||
select CRYPTO_ALGAPI
|
||||
@@ -128,6 +139,16 @@ config CRYPTO_TGR192
|
||||
See also:
|
||||
<http://www.cs.technion.ac.il/~biham/Reports/Tiger/>.
|
||||
|
||||
config CRYPTO_GF128MUL
|
||||
tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
help
|
||||
Efficient table driven implementation of multiplications in the
|
||||
field GF(2^128). This is needed by some cypher modes. This
|
||||
option will be selected automatically if you select such a
|
||||
cipher mode. Only select this option by hand if you expect to load
|
||||
an external module that requires these functions.
|
||||
|
||||
config CRYPTO_ECB
|
||||
tristate "ECB support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
@@ -147,6 +168,19 @@ config CRYPTO_CBC
|
||||
CBC: Cipher Block Chaining mode
|
||||
This block cipher algorithm is required for IPSec.
|
||||
|
||||
config CRYPTO_LRW
|
||||
tristate "LRW support (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
select CRYPTO_GF128MUL
|
||||
help
|
||||
LRW: Liskov Rivest Wagner, a tweakable, non malleable, non movable
|
||||
narrow block cipher mode for dm-crypt. Use it with cipher
|
||||
specification string aes-lrw-benbi, the key must be 256, 320 or 384.
|
||||
The first 128, 192 or 256 bits in the key are used for AES and the
|
||||
rest is used to tie each cipher block to its logical position.
|
||||
|
||||
config CRYPTO_DES
|
||||
tristate "DES and Triple DES EDE cipher algorithms"
|
||||
select CRYPTO_ALGAPI
|
||||
|
||||
@@ -15,6 +15,7 @@ obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
|
||||
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
|
||||
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
|
||||
obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
|
||||
obj-$(CONFIG_CRYPTO_MD4) += md4.o
|
||||
obj-$(CONFIG_CRYPTO_MD5) += md5.o
|
||||
@@ -23,8 +24,10 @@ obj-$(CONFIG_CRYPTO_SHA256) += sha256.o
|
||||
obj-$(CONFIG_CRYPTO_SHA512) += sha512.o
|
||||
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
|
||||
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
|
||||
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
|
||||
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
|
||||
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
|
||||
obj-$(CONFIG_CRYPTO_LRW) += lrw.o
|
||||
obj-$(CONFIG_CRYPTO_DES) += des.o
|
||||
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o
|
||||
|
||||
@@ -466,23 +466,8 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
|
||||
kfree(tfm);
|
||||
}
|
||||
|
||||
int crypto_alg_available(const char *name, u32 flags)
|
||||
{
|
||||
int ret = 0;
|
||||
struct crypto_alg *alg = crypto_alg_mod_lookup(name, 0,
|
||||
CRYPTO_ALG_ASYNC);
|
||||
|
||||
if (!IS_ERR(alg)) {
|
||||
crypto_mod_put(alg);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
|
||||
EXPORT_SYMBOL_GPL(crypto_free_tfm);
|
||||
EXPORT_SYMBOL_GPL(crypto_alg_available);
|
||||
|
||||
int crypto_has_alg(const char *name, u32 type, u32 mask)
|
||||
{
|
||||
|
||||
@@ -21,54 +21,6 @@
|
||||
#include "internal.h"
|
||||
#include "scatterwalk.h"
|
||||
|
||||
void crypto_digest_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_hash *hash = crypto_hash_cast(tfm);
|
||||
struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
|
||||
|
||||
crypto_hash_init(&desc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_digest_init);
|
||||
|
||||
void crypto_digest_update(struct crypto_tfm *tfm,
|
||||
struct scatterlist *sg, unsigned int nsg)
|
||||
{
|
||||
struct crypto_hash *hash = crypto_hash_cast(tfm);
|
||||
struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
|
||||
unsigned int nbytes = 0;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < nsg; i++)
|
||||
nbytes += sg[i].length;
|
||||
|
||||
crypto_hash_update(&desc, sg, nbytes);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_digest_update);
|
||||
|
||||
void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
|
||||
{
|
||||
struct crypto_hash *hash = crypto_hash_cast(tfm);
|
||||
struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
|
||||
|
||||
crypto_hash_final(&desc, out);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_digest_final);
|
||||
|
||||
void crypto_digest_digest(struct crypto_tfm *tfm,
|
||||
struct scatterlist *sg, unsigned int nsg, u8 *out)
|
||||
{
|
||||
struct crypto_hash *hash = crypto_hash_cast(tfm);
|
||||
struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
|
||||
unsigned int nbytes = 0;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < nsg; i++)
|
||||
nbytes += sg[i].length;
|
||||
|
||||
crypto_hash_digest(&desc, sg, nbytes, out);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_digest_digest);
|
||||
|
||||
static int init(struct hash_desc *desc)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
|
||||
|
||||
@@ -0,0 +1,466 @@
|
||||
/* gf128mul.c - GF(2^128) multiplication functions
|
||||
*
|
||||
* Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
|
||||
* Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
|
||||
*
|
||||
* Based on Dr Brian Gladman's (GPL'd) work published at
|
||||
* http://fp.gladman.plus.com/cryptography_technology/index.htm
|
||||
* See the original copyright notice below.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*/
|
||||
|
||||
/*
|
||||
---------------------------------------------------------------------------
|
||||
Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
|
||||
|
||||
LICENSE TERMS
|
||||
|
||||
The free distribution and use of this software in both source and binary
|
||||
form is allowed (with or without changes) provided that:
|
||||
|
||||
1. distributions of this source code include the above copyright
|
||||
notice, this list of conditions and the following disclaimer;
|
||||
|
||||
2. distributions in binary form include the above copyright
|
||||
notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other associated materials;
|
||||
|
||||
3. the copyright holder's name is not used to endorse products
|
||||
built using this software without specific written permission.
|
||||
|
||||
ALTERNATIVELY, provided that this notice is retained in full, this product
|
||||
may be distributed under the terms of the GNU General Public License (GPL),
|
||||
in which case the provisions of the GPL apply INSTEAD OF those given above.
|
||||
|
||||
DISCLAIMER
|
||||
|
||||
This software is provided 'as is' with no explicit or implied warranties
|
||||
in respect of its properties, including, but not limited to, correctness
|
||||
and/or fitness for purpose.
|
||||
---------------------------------------------------------------------------
|
||||
Issue 31/01/2006
|
||||
|
||||
This file provides fast multiplication in GF(128) as required by several
|
||||
cryptographic authentication modes
|
||||
*/
|
||||
|
||||
#include <crypto/gf128mul.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#define gf128mul_dat(q) { \
|
||||
q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
|
||||
q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
|
||||
q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
|
||||
q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
|
||||
q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
|
||||
q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
|
||||
q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
|
||||
q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
|
||||
q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
|
||||
q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\
|
||||
q(0x50), q(0x51), q(0x52), q(0x53), q(0x54), q(0x55), q(0x56), q(0x57),\
|
||||
q(0x58), q(0x59), q(0x5a), q(0x5b), q(0x5c), q(0x5d), q(0x5e), q(0x5f),\
|
||||
q(0x60), q(0x61), q(0x62), q(0x63), q(0x64), q(0x65), q(0x66), q(0x67),\
|
||||
q(0x68), q(0x69), q(0x6a), q(0x6b), q(0x6c), q(0x6d), q(0x6e), q(0x6f),\
|
||||
q(0x70), q(0x71), q(0x72), q(0x73), q(0x74), q(0x75), q(0x76), q(0x77),\
|
||||
q(0x78), q(0x79), q(0x7a), q(0x7b), q(0x7c), q(0x7d), q(0x7e), q(0x7f),\
|
||||
q(0x80), q(0x81), q(0x82), q(0x83), q(0x84), q(0x85), q(0x86), q(0x87),\
|
||||
q(0x88), q(0x89), q(0x8a), q(0x8b), q(0x8c), q(0x8d), q(0x8e), q(0x8f),\
|
||||
q(0x90), q(0x91), q(0x92), q(0x93), q(0x94), q(0x95), q(0x96), q(0x97),\
|
||||
q(0x98), q(0x99), q(0x9a), q(0x9b), q(0x9c), q(0x9d), q(0x9e), q(0x9f),\
|
||||
q(0xa0), q(0xa1), q(0xa2), q(0xa3), q(0xa4), q(0xa5), q(0xa6), q(0xa7),\
|
||||
q(0xa8), q(0xa9), q(0xaa), q(0xab), q(0xac), q(0xad), q(0xae), q(0xaf),\
|
||||
q(0xb0), q(0xb1), q(0xb2), q(0xb3), q(0xb4), q(0xb5), q(0xb6), q(0xb7),\
|
||||
q(0xb8), q(0xb9), q(0xba), q(0xbb), q(0xbc), q(0xbd), q(0xbe), q(0xbf),\
|
||||
q(0xc0), q(0xc1), q(0xc2), q(0xc3), q(0xc4), q(0xc5), q(0xc6), q(0xc7),\
|
||||
q(0xc8), q(0xc9), q(0xca), q(0xcb), q(0xcc), q(0xcd), q(0xce), q(0xcf),\
|
||||
q(0xd0), q(0xd1), q(0xd2), q(0xd3), q(0xd4), q(0xd5), q(0xd6), q(0xd7),\
|
||||
q(0xd8), q(0xd9), q(0xda), q(0xdb), q(0xdc), q(0xdd), q(0xde), q(0xdf),\
|
||||
q(0xe0), q(0xe1), q(0xe2), q(0xe3), q(0xe4), q(0xe5), q(0xe6), q(0xe7),\
|
||||
q(0xe8), q(0xe9), q(0xea), q(0xeb), q(0xec), q(0xed), q(0xee), q(0xef),\
|
||||
q(0xf0), q(0xf1), q(0xf2), q(0xf3), q(0xf4), q(0xf5), q(0xf6), q(0xf7),\
|
||||
q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \
|
||||
}
|
||||
|
||||
/* Given the value i in 0..255 as the byte overflow when a field element
|
||||
in GHASH is multipled by x^8, this function will return the values that
|
||||
are generated in the lo 16-bit word of the field value by applying the
|
||||
modular polynomial. The values lo_byte and hi_byte are returned via the
|
||||
macro xp_fun(lo_byte, hi_byte) so that the values can be assembled into
|
||||
memory as required by a suitable definition of this macro operating on
|
||||
the table above
|
||||
*/
|
||||
|
||||
#define xx(p, q) 0x##p##q
|
||||
|
||||
#define xda_bbe(i) ( \
|
||||
(i & 0x80 ? xx(43, 80) : 0) ^ (i & 0x40 ? xx(21, c0) : 0) ^ \
|
||||
(i & 0x20 ? xx(10, e0) : 0) ^ (i & 0x10 ? xx(08, 70) : 0) ^ \
|
||||
(i & 0x08 ? xx(04, 38) : 0) ^ (i & 0x04 ? xx(02, 1c) : 0) ^ \
|
||||
(i & 0x02 ? xx(01, 0e) : 0) ^ (i & 0x01 ? xx(00, 87) : 0) \
|
||||
)
|
||||
|
||||
#define xda_lle(i) ( \
|
||||
(i & 0x80 ? xx(e1, 00) : 0) ^ (i & 0x40 ? xx(70, 80) : 0) ^ \
|
||||
(i & 0x20 ? xx(38, 40) : 0) ^ (i & 0x10 ? xx(1c, 20) : 0) ^ \
|
||||
(i & 0x08 ? xx(0e, 10) : 0) ^ (i & 0x04 ? xx(07, 08) : 0) ^ \
|
||||
(i & 0x02 ? xx(03, 84) : 0) ^ (i & 0x01 ? xx(01, c2) : 0) \
|
||||
)
|
||||
|
||||
static const u16 gf128mul_table_lle[256] = gf128mul_dat(xda_lle);
|
||||
static const u16 gf128mul_table_bbe[256] = gf128mul_dat(xda_bbe);
|
||||
|
||||
/* These functions multiply a field element by x, by x^4 and by x^8
|
||||
* in the polynomial field representation. It uses 32-bit word operations
|
||||
* to gain speed but compensates for machine endianess and hence works
|
||||
* correctly on both styles of machine.
|
||||
*/
|
||||
|
||||
static void gf128mul_x_lle(be128 *r, const be128 *x)
|
||||
{
|
||||
u64 a = be64_to_cpu(x->a);
|
||||
u64 b = be64_to_cpu(x->b);
|
||||
u64 _tt = gf128mul_table_lle[(b << 7) & 0xff];
|
||||
|
||||
r->b = cpu_to_be64((b >> 1) | (a << 63));
|
||||
r->a = cpu_to_be64((a >> 1) ^ (_tt << 48));
|
||||
}
|
||||
|
||||
static void gf128mul_x_bbe(be128 *r, const be128 *x)
|
||||
{
|
||||
u64 a = be64_to_cpu(x->a);
|
||||
u64 b = be64_to_cpu(x->b);
|
||||
u64 _tt = gf128mul_table_bbe[a >> 63];
|
||||
|
||||
r->a = cpu_to_be64((a << 1) | (b >> 63));
|
||||
r->b = cpu_to_be64((b << 1) ^ _tt);
|
||||
}
|
||||
|
||||
static void gf128mul_x8_lle(be128 *x)
|
||||
{
|
||||
u64 a = be64_to_cpu(x->a);
|
||||
u64 b = be64_to_cpu(x->b);
|
||||
u64 _tt = gf128mul_table_lle[b & 0xff];
|
||||
|
||||
x->b = cpu_to_be64((b >> 8) | (a << 56));
|
||||
x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
|
||||
}
|
||||
|
||||
static void gf128mul_x8_bbe(be128 *x)
|
||||
{
|
||||
u64 a = be64_to_cpu(x->a);
|
||||
u64 b = be64_to_cpu(x->b);
|
||||
u64 _tt = gf128mul_table_bbe[a >> 56];
|
||||
|
||||
x->a = cpu_to_be64((a << 8) | (b >> 56));
|
||||
x->b = cpu_to_be64((b << 8) ^ _tt);
|
||||
}
|
||||
|
||||
void gf128mul_lle(be128 *r, const be128 *b)
|
||||
{
|
||||
be128 p[8];
|
||||
int i;
|
||||
|
||||
p[0] = *r;
|
||||
for (i = 0; i < 7; ++i)
|
||||
gf128mul_x_lle(&p[i + 1], &p[i]);
|
||||
|
||||
memset(r, 0, sizeof(r));
|
||||
for (i = 0;;) {
|
||||
u8 ch = ((u8 *)b)[15 - i];
|
||||
|
||||
if (ch & 0x80)
|
||||
be128_xor(r, r, &p[0]);
|
||||
if (ch & 0x40)
|
||||
be128_xor(r, r, &p[1]);
|
||||
if (ch & 0x20)
|
||||
be128_xor(r, r, &p[2]);
|
||||
if (ch & 0x10)
|
||||
be128_xor(r, r, &p[3]);
|
||||
if (ch & 0x08)
|
||||
be128_xor(r, r, &p[4]);
|
||||
if (ch & 0x04)
|
||||
be128_xor(r, r, &p[5]);
|
||||
if (ch & 0x02)
|
||||
be128_xor(r, r, &p[6]);
|
||||
if (ch & 0x01)
|
||||
be128_xor(r, r, &p[7]);
|
||||
|
||||
if (++i >= 16)
|
||||
break;
|
||||
|
||||
gf128mul_x8_lle(r);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(gf128mul_lle);
|
||||
|
||||
void gf128mul_bbe(be128 *r, const be128 *b)
|
||||
{
|
||||
be128 p[8];
|
||||
int i;
|
||||
|
||||
p[0] = *r;
|
||||
for (i = 0; i < 7; ++i)
|
||||
gf128mul_x_bbe(&p[i + 1], &p[i]);
|
||||
|
||||
memset(r, 0, sizeof(r));
|
||||
for (i = 0;;) {
|
||||
u8 ch = ((u8 *)b)[i];
|
||||
|
||||
if (ch & 0x80)
|
||||
be128_xor(r, r, &p[7]);
|
||||
if (ch & 0x40)
|
||||
be128_xor(r, r, &p[6]);
|
||||
if (ch & 0x20)
|
||||
be128_xor(r, r, &p[5]);
|
||||
if (ch & 0x10)
|
||||
be128_xor(r, r, &p[4]);
|
||||
if (ch & 0x08)
|
||||
be128_xor(r, r, &p[3]);
|
||||
if (ch & 0x04)
|
||||
be128_xor(r, r, &p[2]);
|
||||
if (ch & 0x02)
|
||||
be128_xor(r, r, &p[1]);
|
||||
if (ch & 0x01)
|
||||
be128_xor(r, r, &p[0]);
|
||||
|
||||
if (++i >= 16)
|
||||
break;
|
||||
|
||||
gf128mul_x8_bbe(r);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(gf128mul_bbe);
|
||||
|
||||
/* This version uses 64k bytes of table space.
|
||||
A 16 byte buffer has to be multiplied by a 16 byte key
|
||||
value in GF(128). If we consider a GF(128) value in
|
||||
the buffer's lowest byte, we can construct a table of
|
||||
the 256 16 byte values that result from the 256 values
|
||||
of this byte. This requires 4096 bytes. But we also
|
||||
need tables for each of the 16 higher bytes in the
|
||||
buffer as well, which makes 64 kbytes in total.
|
||||
*/
|
||||
/* additional explanation
|
||||
* t[0][BYTE] contains g*BYTE
|
||||
* t[1][BYTE] contains g*x^8*BYTE
|
||||
* ..
|
||||
* t[15][BYTE] contains g*x^120*BYTE */
|
||||
struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g)
|
||||
{
|
||||
struct gf128mul_64k *t;
|
||||
int i, j, k;
|
||||
|
||||
t = kzalloc(sizeof(*t), GFP_KERNEL);
|
||||
if (!t)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
|
||||
if (!t->t[i]) {
|
||||
gf128mul_free_64k(t);
|
||||
t = NULL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
t->t[0]->t[128] = *g;
|
||||
for (j = 64; j > 0; j >>= 1)
|
||||
gf128mul_x_lle(&t->t[0]->t[j], &t->t[0]->t[j + j]);
|
||||
|
||||
for (i = 0;;) {
|
||||
for (j = 2; j < 256; j += j)
|
||||
for (k = 1; k < j; ++k)
|
||||
be128_xor(&t->t[i]->t[j + k],
|
||||
&t->t[i]->t[j], &t->t[i]->t[k]);
|
||||
|
||||
if (++i >= 16)
|
||||
break;
|
||||
|
||||
for (j = 128; j > 0; j >>= 1) {
|
||||
t->t[i]->t[j] = t->t[i - 1]->t[j];
|
||||
gf128mul_x8_lle(&t->t[i]->t[j]);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return t;
|
||||
}
|
||||
EXPORT_SYMBOL(gf128mul_init_64k_lle);
|
||||
|
||||
struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
|
||||
{
|
||||
struct gf128mul_64k *t;
|
||||
int i, j, k;
|
||||
|
||||
t = kzalloc(sizeof(*t), GFP_KERNEL);
|
||||
if (!t)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
|
||||
if (!t->t[i]) {
|
||||
gf128mul_free_64k(t);
|
||||
t = NULL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
t->t[0]->t[1] = *g;
|
||||
for (j = 1; j <= 64; j <<= 1)
|
||||
gf128mul_x_bbe(&t->t[0]->t[j + j], &t->t[0]->t[j]);
|
||||
|
||||
for (i = 0;;) {
|
||||
for (j = 2; j < 256; j += j)
|
||||
for (k = 1; k < j; ++k)
|
||||
be128_xor(&t->t[i]->t[j + k],
|
||||
&t->t[i]->t[j], &t->t[i]->t[k]);
|
||||
|
||||
if (++i >= 16)
|
||||
break;
|
||||
|
||||
for (j = 128; j > 0; j >>= 1) {
|
||||
t->t[i]->t[j] = t->t[i - 1]->t[j];
|
||||
gf128mul_x8_bbe(&t->t[i]->t[j]);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return t;
|
||||
}
|
||||
EXPORT_SYMBOL(gf128mul_init_64k_bbe);
|
||||
|
||||
void gf128mul_free_64k(struct gf128mul_64k *t)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
kfree(t->t[i]);
|
||||
kfree(t);
|
||||
}
|
||||
EXPORT_SYMBOL(gf128mul_free_64k);
|
||||
|
||||
void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t)
|
||||
{
|
||||
u8 *ap = (u8 *)a;
|
||||
be128 r[1];
|
||||
int i;
|
||||
|
||||
*r = t->t[0]->t[ap[0]];
|
||||
for (i = 1; i < 16; ++i)
|
||||
be128_xor(r, r, &t->t[i]->t[ap[i]]);
|
||||
*a = *r;
|
||||
}
|
||||
EXPORT_SYMBOL(gf128mul_64k_lle);
|
||||
|
||||
void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t)
|
||||
{
|
||||
u8 *ap = (u8 *)a;
|
||||
be128 r[1];
|
||||
int i;
|
||||
|
||||
*r = t->t[0]->t[ap[15]];
|
||||
for (i = 1; i < 16; ++i)
|
||||
be128_xor(r, r, &t->t[i]->t[ap[15 - i]]);
|
||||
*a = *r;
|
||||
}
|
||||
EXPORT_SYMBOL(gf128mul_64k_bbe);
|
||||
|
||||
/* This version uses 4k bytes of table space.
|
||||
A 16 byte buffer has to be multiplied by a 16 byte key
|
||||
value in GF(128). If we consider a GF(128) value in a
|
||||
single byte, we can construct a table of the 256 16 byte
|
||||
values that result from the 256 values of this byte.
|
||||
This requires 4096 bytes. If we take the highest byte in
|
||||
the buffer and use this table to get the result, we then
|
||||
have to multiply by x^120 to get the final value. For the
|
||||
next highest byte the result has to be multiplied by x^112
|
||||
and so on. But we can do this by accumulating the result
|
||||
in an accumulator starting with the result for the top
|
||||
byte. We repeatedly multiply the accumulator value by
|
||||
x^8 and then add in (i.e. xor) the 16 bytes of the next
|
||||
lower byte in the buffer, stopping when we reach the
|
||||
lowest byte. This requires a 4096 byte table.
|
||||
*/
|
||||
struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g)
|
||||
{
|
||||
struct gf128mul_4k *t;
|
||||
int j, k;
|
||||
|
||||
t = kzalloc(sizeof(*t), GFP_KERNEL);
|
||||
if (!t)
|
||||
goto out;
|
||||
|
||||
t->t[128] = *g;
|
||||
for (j = 64; j > 0; j >>= 1)
|
||||
gf128mul_x_lle(&t->t[j], &t->t[j+j]);
|
||||
|
||||
for (j = 2; j < 256; j += j)
|
||||
for (k = 1; k < j; ++k)
|
||||
be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
|
||||
|
||||
out:
|
||||
return t;
|
||||
}
|
||||
EXPORT_SYMBOL(gf128mul_init_4k_lle);
|
||||
|
||||
struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
|
||||
{
|
||||
struct gf128mul_4k *t;
|
||||
int j, k;
|
||||
|
||||
t = kzalloc(sizeof(*t), GFP_KERNEL);
|
||||
if (!t)
|
||||
goto out;
|
||||
|
||||
t->t[1] = *g;
|
||||
for (j = 1; j <= 64; j <<= 1)
|
||||
gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
|
||||
|
||||
for (j = 2; j < 256; j += j)
|
||||
for (k = 1; k < j; ++k)
|
||||
be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
|
||||
|
||||
out:
|
||||
return t;
|
||||
}
|
||||
EXPORT_SYMBOL(gf128mul_init_4k_bbe);
|
||||
|
||||
void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t)
|
||||
{
|
||||
u8 *ap = (u8 *)a;
|
||||
be128 r[1];
|
||||
int i = 15;
|
||||
|
||||
*r = t->t[ap[15]];
|
||||
while (i--) {
|
||||
gf128mul_x8_lle(r);
|
||||
be128_xor(r, r, &t->t[ap[i]]);
|
||||
}
|
||||
*a = *r;
|
||||
}
|
||||
EXPORT_SYMBOL(gf128mul_4k_lle);
|
||||
|
||||
void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t)
|
||||
{
|
||||
u8 *ap = (u8 *)a;
|
||||
be128 r[1];
|
||||
int i = 0;
|
||||
|
||||
*r = t->t[ap[0]];
|
||||
while (++i < 16) {
|
||||
gf128mul_x8_bbe(r);
|
||||
be128_xor(r, r, &t->t[ap[i]]);
|
||||
}
|
||||
*a = *r;
|
||||
}
|
||||
EXPORT_SYMBOL(gf128mul_4k_bbe);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");
|
||||
+301
@@ -0,0 +1,301 @@
|
||||
/* LRW: as defined by Cyril Guyot in
|
||||
* http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
|
||||
*
|
||||
* Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
|
||||
*
|
||||
* Based om ecb.c
|
||||
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*/
|
||||
/* This implementation is checked against the test vectors in the above
|
||||
* document and by a test vector provided by Ken Buchanan at
|
||||
* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
|
||||
*
|
||||
* The test vectors are included in the testing module tcrypt.[ch] */
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/gf128mul.h>
|
||||
|
||||
struct priv {
|
||||
struct crypto_cipher *child;
|
||||
/* optimizes multiplying a random (non incrementing, as at the
|
||||
* start of a new sector) value with key2, we could also have
|
||||
* used 4k optimization tables or no optimization at all. In the
|
||||
* latter case we would have to store key2 here */
|
||||
struct gf128mul_64k *table;
|
||||
/* stores:
|
||||
* key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
|
||||
* key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
|
||||
* key2*{ 0,0,...1,1,1,1,1 }, etc
|
||||
* needed for optimized multiplication of incrementing values
|
||||
* with key2 */
|
||||
be128 mulinc[128];
|
||||
};
|
||||
|
||||
static inline void setbit128_bbe(void *b, int bit)
|
||||
{
|
||||
__set_bit(bit ^ 0x78, b);
|
||||
}
|
||||
|
||||
static int setkey(struct crypto_tfm *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct priv *ctx = crypto_tfm_ctx(parent);
|
||||
struct crypto_cipher *child = ctx->child;
|
||||
int err, i;
|
||||
be128 tmp = { 0 };
|
||||
int bsize = crypto_cipher_blocksize(child);
|
||||
|
||||
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
if ((err = crypto_cipher_setkey(child, key, keylen - bsize)))
|
||||
return err;
|
||||
crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
if (ctx->table)
|
||||
gf128mul_free_64k(ctx->table);
|
||||
|
||||
/* initialize multiplication table for Key2 */
|
||||
ctx->table = gf128mul_init_64k_bbe((be128 *)(key + keylen - bsize));
|
||||
if (!ctx->table)
|
||||
return -ENOMEM;
|
||||
|
||||
/* initialize optimization table */
|
||||
for (i = 0; i < 128; i++) {
|
||||
setbit128_bbe(&tmp, i);
|
||||
ctx->mulinc[i] = tmp;
|
||||
gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct sinfo {
|
||||
be128 t;
|
||||
struct crypto_tfm *tfm;
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
|
||||
};
|
||||
|
||||
static inline void inc(be128 *iv)
|
||||
{
|
||||
if (!(iv->b = cpu_to_be64(be64_to_cpu(iv->b) + 1)))
|
||||
iv->a = cpu_to_be64(be64_to_cpu(iv->a) + 1);
|
||||
}
|
||||
|
||||
static inline void lrw_round(struct sinfo *s, void *dst, const void *src)
|
||||
{
|
||||
be128_xor(dst, &s->t, src); /* PP <- T xor P */
|
||||
s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */
|
||||
be128_xor(dst, dst, &s->t); /* C <- T xor CC */
|
||||
}
|
||||
|
||||
/* this returns the number of consequative 1 bits starting
|
||||
* from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
|
||||
static inline int get_index128(be128 *block)
|
||||
{
|
||||
int x;
|
||||
__be32 *p = (__be32 *) block;
|
||||
|
||||
for (p += 3, x = 0; x < 128; p--, x += 32) {
|
||||
u32 val = be32_to_cpup(p);
|
||||
|
||||
if (!~val)
|
||||
continue;
|
||||
|
||||
return x + ffz(val);
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
static int crypt(struct blkcipher_desc *d,
|
||||
struct blkcipher_walk *w, struct priv *ctx,
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
|
||||
{
|
||||
int err;
|
||||
unsigned int avail;
|
||||
const int bs = crypto_cipher_blocksize(ctx->child);
|
||||
struct sinfo s = {
|
||||
.tfm = crypto_cipher_tfm(ctx->child),
|
||||
.fn = fn
|
||||
};
|
||||
be128 *iv;
|
||||
u8 *wsrc;
|
||||
u8 *wdst;
|
||||
|
||||
err = blkcipher_walk_virt(d, w);
|
||||
if (!(avail = w->nbytes))
|
||||
return err;
|
||||
|
||||
wsrc = w->src.virt.addr;
|
||||
wdst = w->dst.virt.addr;
|
||||
|
||||
/* calculate first value of T */
|
||||
iv = (be128 *)w->iv;
|
||||
s.t = *iv;
|
||||
|
||||
/* T <- I*Key2 */
|
||||
gf128mul_64k_bbe(&s.t, ctx->table);
|
||||
|
||||
goto first;
|
||||
|
||||
for (;;) {
|
||||
do {
|
||||
/* T <- I*Key2, using the optimization
|
||||
* discussed in the specification */
|
||||
be128_xor(&s.t, &s.t, &ctx->mulinc[get_index128(iv)]);
|
||||
inc(iv);
|
||||
|
||||
first:
|
||||
lrw_round(&s, wdst, wsrc);
|
||||
|
||||
wsrc += bs;
|
||||
wdst += bs;
|
||||
} while ((avail -= bs) >= bs);
|
||||
|
||||
err = blkcipher_walk_done(d, w, avail);
|
||||
if (!(avail = w->nbytes))
|
||||
break;
|
||||
|
||||
wsrc = w->src.virt.addr;
|
||||
wdst = w->dst.virt.addr;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk w;
|
||||
|
||||
blkcipher_walk_init(&w, dst, src, nbytes);
|
||||
return crypt(desc, &w, ctx,
|
||||
crypto_cipher_alg(ctx->child)->cia_encrypt);
|
||||
}
|
||||
|
||||
static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk w;
|
||||
|
||||
blkcipher_walk_init(&w, dst, src, nbytes);
|
||||
return crypt(desc, &w, ctx,
|
||||
crypto_cipher_alg(ctx->child)->cia_decrypt);
|
||||
}
|
||||
|
||||
static int init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
|
||||
struct priv *ctx = crypto_tfm_ctx(tfm);
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
|
||||
tfm = crypto_spawn_tfm(spawn);
|
||||
if (IS_ERR(tfm))
|
||||
return PTR_ERR(tfm);
|
||||
|
||||
if (crypto_tfm_alg_blocksize(tfm) != 16) {
|
||||
*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx->child = crypto_cipher_cast(tfm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct priv *ctx = crypto_tfm_ctx(tfm);
|
||||
if (ctx->table)
|
||||
gf128mul_free_64k(ctx->table);
|
||||
crypto_free_cipher(ctx->child);
|
||||
}
|
||||
|
||||
static struct crypto_instance *alloc(void *param, unsigned int len)
|
||||
{
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *alg;
|
||||
|
||||
alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
|
||||
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(alg))
|
||||
return ERR_PTR(PTR_ERR(alg));
|
||||
|
||||
inst = crypto_alloc_instance("lrw", alg);
|
||||
if (IS_ERR(inst))
|
||||
goto out_put_alg;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
|
||||
inst->alg.cra_priority = alg->cra_priority;
|
||||
inst->alg.cra_blocksize = alg->cra_blocksize;
|
||||
|
||||
if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7;
|
||||
else inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_blkcipher_type;
|
||||
|
||||
if (!(alg->cra_blocksize % 4))
|
||||
inst->alg.cra_alignmask |= 3;
|
||||
inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
|
||||
inst->alg.cra_blkcipher.min_keysize =
|
||||
alg->cra_cipher.cia_min_keysize + alg->cra_blocksize;
|
||||
inst->alg.cra_blkcipher.max_keysize =
|
||||
alg->cra_cipher.cia_max_keysize + alg->cra_blocksize;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct priv);
|
||||
|
||||
inst->alg.cra_init = init_tfm;
|
||||
inst->alg.cra_exit = exit_tfm;
|
||||
|
||||
inst->alg.cra_blkcipher.setkey = setkey;
|
||||
inst->alg.cra_blkcipher.encrypt = encrypt;
|
||||
inst->alg.cra_blkcipher.decrypt = decrypt;
|
||||
|
||||
out_put_alg:
|
||||
crypto_mod_put(alg);
|
||||
return inst;
|
||||
}
|
||||
|
||||
static void free(struct crypto_instance *inst)
|
||||
{
|
||||
crypto_drop_spawn(crypto_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_tmpl = {
|
||||
.name = "lrw",
|
||||
.alloc = alloc,
|
||||
.free = free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init crypto_module_init(void)
|
||||
{
|
||||
return crypto_register_template(&crypto_tmpl);
|
||||
}
|
||||
|
||||
static void __exit crypto_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&crypto_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_module_init);
|
||||
module_exit(crypto_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("LRW block cipher mode");
|
||||
@@ -906,6 +906,10 @@ static void do_test(void)
|
||||
AES_CBC_ENC_TEST_VECTORS);
|
||||
test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template,
|
||||
AES_CBC_DEC_TEST_VECTORS);
|
||||
test_cipher("lrw(aes)", ENCRYPT, aes_lrw_enc_tv_template,
|
||||
AES_LRW_ENC_TEST_VECTORS);
|
||||
test_cipher("lrw(aes)", DECRYPT, aes_lrw_dec_tv_template,
|
||||
AES_LRW_DEC_TEST_VECTORS);
|
||||
|
||||
//CAST5
|
||||
test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template,
|
||||
@@ -977,6 +981,9 @@ static void do_test(void)
|
||||
test_hash("hmac(sha256)", hmac_sha256_tv_template,
|
||||
HMAC_SHA256_TEST_VECTORS);
|
||||
|
||||
test_hash("xcbc(aes)", aes_xcbc128_tv_template,
|
||||
XCBC_AES_TEST_VECTORS);
|
||||
|
||||
test_hash("michael_mic", michael_mic_tv_template, MICHAEL_MIC_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
@@ -1052,6 +1059,10 @@ static void do_test(void)
|
||||
AES_CBC_ENC_TEST_VECTORS);
|
||||
test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template,
|
||||
AES_CBC_DEC_TEST_VECTORS);
|
||||
test_cipher("lrw(aes)", ENCRYPT, aes_lrw_enc_tv_template,
|
||||
AES_LRW_ENC_TEST_VECTORS);
|
||||
test_cipher("lrw(aes)", DECRYPT, aes_lrw_dec_tv_template,
|
||||
AES_LRW_DEC_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
case 11:
|
||||
@@ -1191,6 +1202,10 @@ static void do_test(void)
|
||||
aes_speed_template);
|
||||
test_cipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
|
||||
aes_speed_template);
|
||||
test_cipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
|
||||
aes_lrw_speed_template);
|
||||
test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
|
||||
aes_lrw_speed_template);
|
||||
break;
|
||||
|
||||
case 201:
|
||||
|
||||
+598
-4
File diff suppressed because it is too large
Load Diff
+348
@@ -0,0 +1,348 @@
|
||||
/*
|
||||
* Copyright (C)2006 USAGI/WIDE Project
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
* Author:
|
||||
* Kazunori Miyazawa <miyazawa@linux-ipv6.org>
|
||||
*/
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include "internal.h"
|
||||
|
||||
static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
|
||||
0x02020202, 0x02020202, 0x02020202, 0x02020202,
|
||||
0x03030303, 0x03030303, 0x03030303, 0x03030303};
|
||||
/*
|
||||
* +------------------------
|
||||
* | <parent tfm>
|
||||
* +------------------------
|
||||
* | crypto_xcbc_ctx
|
||||
* +------------------------
|
||||
* | odds (block size)
|
||||
* +------------------------
|
||||
* | prev (block size)
|
||||
* +------------------------
|
||||
* | key (block size)
|
||||
* +------------------------
|
||||
* | consts (block size * 3)
|
||||
* +------------------------
|
||||
*/
|
||||
struct crypto_xcbc_ctx {
|
||||
struct crypto_tfm *child;
|
||||
u8 *odds;
|
||||
u8 *prev;
|
||||
u8 *key;
|
||||
u8 *consts;
|
||||
void (*xor)(u8 *a, const u8 *b, unsigned int bs);
|
||||
unsigned int keylen;
|
||||
unsigned int len;
|
||||
};
|
||||
|
||||
static void xor_128(u8 *a, const u8 *b, unsigned int bs)
|
||||
{
|
||||
((u32 *)a)[0] ^= ((u32 *)b)[0];
|
||||
((u32 *)a)[1] ^= ((u32 *)b)[1];
|
||||
((u32 *)a)[2] ^= ((u32 *)b)[2];
|
||||
((u32 *)a)[3] ^= ((u32 *)b)[3];
|
||||
}
|
||||
|
||||
static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent,
|
||||
struct crypto_xcbc_ctx *ctx)
|
||||
{
|
||||
int bs = crypto_hash_blocksize(parent);
|
||||
int err = 0;
|
||||
u8 key1[bs];
|
||||
|
||||
if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen)))
|
||||
return err;
|
||||
|
||||
ctx->child->__crt_alg->cra_cipher.cia_encrypt(ctx->child, key1,
|
||||
ctx->consts);
|
||||
|
||||
return crypto_cipher_setkey(ctx->child, key1, bs);
|
||||
}
|
||||
|
||||
static int crypto_xcbc_digest_setkey(struct crypto_hash *parent,
|
||||
const u8 *inkey, unsigned int keylen)
|
||||
{
|
||||
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
|
||||
|
||||
if (keylen != crypto_tfm_alg_blocksize(ctx->child))
|
||||
return -EINVAL;
|
||||
|
||||
ctx->keylen = keylen;
|
||||
memcpy(ctx->key, inkey, keylen);
|
||||
ctx->consts = (u8*)ks;
|
||||
|
||||
return _crypto_xcbc_digest_setkey(parent, ctx);
|
||||
}
|
||||
|
||||
static int crypto_xcbc_digest_init(struct hash_desc *pdesc)
|
||||
{
|
||||
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(pdesc->tfm);
|
||||
int bs = crypto_hash_blocksize(pdesc->tfm);
|
||||
|
||||
ctx->len = 0;
|
||||
memset(ctx->odds, 0, bs);
|
||||
memset(ctx->prev, 0, bs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
|
||||
struct scatterlist *sg,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct crypto_hash *parent = pdesc->tfm;
|
||||
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
|
||||
struct crypto_tfm *tfm = ctx->child;
|
||||
int bs = crypto_hash_blocksize(parent);
|
||||
unsigned int i = 0;
|
||||
|
||||
do {
|
||||
|
||||
struct page *pg = sg[i].page;
|
||||
unsigned int offset = sg[i].offset;
|
||||
unsigned int slen = sg[i].length;
|
||||
|
||||
while (slen > 0) {
|
||||
unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
|
||||
char *p = crypto_kmap(pg, 0) + offset;
|
||||
|
||||
/* checking the data can fill the block */
|
||||
if ((ctx->len + len) <= bs) {
|
||||
memcpy(ctx->odds + ctx->len, p, len);
|
||||
ctx->len += len;
|
||||
slen -= len;
|
||||
|
||||
/* checking the rest of the page */
|
||||
if (len + offset >= PAGE_SIZE) {
|
||||
offset = 0;
|
||||
pg++;
|
||||
} else
|
||||
offset += len;
|
||||
|
||||
crypto_kunmap(p, 0);
|
||||
crypto_yield(tfm->crt_flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* filling odds with new data and encrypting it */
|
||||
memcpy(ctx->odds + ctx->len, p, bs - ctx->len);
|
||||
len -= bs - ctx->len;
|
||||
p += bs - ctx->len;
|
||||
|
||||
ctx->xor(ctx->prev, ctx->odds, bs);
|
||||
tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev);
|
||||
|
||||
/* clearing the length */
|
||||
ctx->len = 0;
|
||||
|
||||
/* encrypting the rest of data */
|
||||
while (len > bs) {
|
||||
ctx->xor(ctx->prev, p, bs);
|
||||
tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev);
|
||||
p += bs;
|
||||
len -= bs;
|
||||
}
|
||||
|
||||
/* keeping the surplus of blocksize */
|
||||
if (len) {
|
||||
memcpy(ctx->odds, p, len);
|
||||
ctx->len = len;
|
||||
}
|
||||
crypto_kunmap(p, 0);
|
||||
crypto_yield(tfm->crt_flags);
|
||||
slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
|
||||
offset = 0;
|
||||
pg++;
|
||||
}
|
||||
nbytes-=sg[i].length;
|
||||
i++;
|
||||
} while (nbytes>0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
|
||||
{
|
||||
struct crypto_hash *parent = pdesc->tfm;
|
||||
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
|
||||
struct crypto_tfm *tfm = ctx->child;
|
||||
int bs = crypto_hash_blocksize(parent);
|
||||
int err = 0;
|
||||
|
||||
if (ctx->len == bs) {
|
||||
u8 key2[bs];
|
||||
|
||||
if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
|
||||
return err;
|
||||
|
||||
tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key2, (const u8*)(ctx->consts+bs));
|
||||
|
||||
ctx->xor(ctx->prev, ctx->odds, bs);
|
||||
ctx->xor(ctx->prev, key2, bs);
|
||||
_crypto_xcbc_digest_setkey(parent, ctx);
|
||||
|
||||
tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev);
|
||||
} else {
|
||||
u8 key3[bs];
|
||||
unsigned int rlen;
|
||||
u8 *p = ctx->odds + ctx->len;
|
||||
*p = 0x80;
|
||||
p++;
|
||||
|
||||
rlen = bs - ctx->len -1;
|
||||
if (rlen)
|
||||
memset(p, 0, rlen);
|
||||
|
||||
if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
|
||||
return err;
|
||||
|
||||
tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key3, (const u8*)(ctx->consts+bs*2));
|
||||
|
||||
ctx->xor(ctx->prev, ctx->odds, bs);
|
||||
ctx->xor(ctx->prev, key3, bs);
|
||||
|
||||
_crypto_xcbc_digest_setkey(parent, ctx);
|
||||
|
||||
tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_xcbc_digest(struct hash_desc *pdesc,
|
||||
struct scatterlist *sg, unsigned int nbytes, u8 *out)
|
||||
{
|
||||
crypto_xcbc_digest_init(pdesc);
|
||||
crypto_xcbc_digest_update(pdesc, sg, nbytes);
|
||||
return crypto_xcbc_digest_final(pdesc, out);
|
||||
}
|
||||
|
||||
static int xcbc_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
|
||||
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
|
||||
int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm));
|
||||
|
||||
tfm = crypto_spawn_tfm(spawn);
|
||||
if (IS_ERR(tfm))
|
||||
return PTR_ERR(tfm);
|
||||
|
||||
switch(bs) {
|
||||
case 16:
|
||||
ctx->xor = xor_128;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx->child = crypto_cipher_cast(tfm);
|
||||
ctx->odds = (u8*)(ctx+1);
|
||||
ctx->prev = ctx->odds + bs;
|
||||
ctx->key = ctx->prev + bs;
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
static void xcbc_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
|
||||
crypto_free_cipher(ctx->child);
|
||||
}
|
||||
|
||||
static struct crypto_instance *xcbc_alloc(void *param, unsigned int len)
|
||||
{
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *alg;
|
||||
alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
|
||||
CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(alg))
|
||||
return ERR_PTR(PTR_ERR(alg));
|
||||
|
||||
switch(alg->cra_blocksize) {
|
||||
case 16:
|
||||
break;
|
||||
default:
|
||||
return ERR_PTR(PTR_ERR(alg));
|
||||
}
|
||||
|
||||
inst = crypto_alloc_instance("xcbc", alg);
|
||||
if (IS_ERR(inst))
|
||||
goto out_put_alg;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH;
|
||||
inst->alg.cra_priority = alg->cra_priority;
|
||||
inst->alg.cra_blocksize = alg->cra_blocksize;
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_hash_type;
|
||||
|
||||
inst->alg.cra_hash.digestsize =
|
||||
(alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
||||
CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
|
||||
alg->cra_blocksize;
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) +
|
||||
ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *));
|
||||
inst->alg.cra_init = xcbc_init_tfm;
|
||||
inst->alg.cra_exit = xcbc_exit_tfm;
|
||||
|
||||
inst->alg.cra_hash.init = crypto_xcbc_digest_init;
|
||||
inst->alg.cra_hash.update = crypto_xcbc_digest_update;
|
||||
inst->alg.cra_hash.final = crypto_xcbc_digest_final;
|
||||
inst->alg.cra_hash.digest = crypto_xcbc_digest;
|
||||
inst->alg.cra_hash.setkey = crypto_xcbc_digest_setkey;
|
||||
|
||||
out_put_alg:
|
||||
crypto_mod_put(alg);
|
||||
return inst;
|
||||
}
|
||||
|
||||
static void xcbc_free(struct crypto_instance *inst)
|
||||
{
|
||||
crypto_drop_spawn(crypto_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_xcbc_tmpl = {
|
||||
.name = "xcbc",
|
||||
.alloc = xcbc_alloc,
|
||||
.free = xcbc_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init crypto_xcbc_module_init(void)
|
||||
{
|
||||
return crypto_register_template(&crypto_xcbc_tmpl);
|
||||
}
|
||||
|
||||
static void __exit crypto_xcbc_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&crypto_xcbc_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_xcbc_module_init);
|
||||
module_exit(crypto_xcbc_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("XCBC keyed hash algorithm");
|
||||
@@ -41,7 +41,7 @@ ifeq ($(CONFIG_ATM_FORE200E_PCA),y)
|
||||
# guess the target endianess to choose the right PCA-200E firmware image
|
||||
ifeq ($(CONFIG_ATM_FORE200E_PCA_DEFAULT_FW),y)
|
||||
byteorder.h := include$(if $(patsubst $(srctree),,$(objtree)),2)/asm/byteorder.h
|
||||
CONFIG_ATM_FORE200E_PCA_FW := $(obj)/pca200e$(if $(shell $(CC) -E -dM $(byteorder.h) | grep ' __LITTLE_ENDIAN '),.bin,_ecd.bin2)
|
||||
CONFIG_ATM_FORE200E_PCA_FW := $(obj)/pca200e$(if $(shell $(CC) $(CPPFLAGS) -E -dM $(byteorder.h) | grep ' __LITTLE_ENDIAN '),.bin,_ecd.bin2)
|
||||
endif
|
||||
endif
|
||||
|
||||
|
||||
@@ -306,6 +306,7 @@ config BLK_DEV_LOOP
|
||||
config BLK_DEV_CRYPTOLOOP
|
||||
tristate "Cryptoloop Support"
|
||||
select CRYPTO
|
||||
select CRYPTO_CBC
|
||||
depends on BLK_DEV_LOOP
|
||||
---help---
|
||||
Say Y here if you want to be able to use the ciphers that are
|
||||
|
||||
@@ -51,4 +51,17 @@ config CRYPTO_DEV_PADLOCK_SHA
|
||||
If unsure say M. The compiled module will be
|
||||
called padlock-sha.ko
|
||||
|
||||
config CRYPTO_DEV_GEODE
|
||||
tristate "Support for the Geode LX AES engine"
|
||||
depends on CRYPTO && X86_32
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_BLKCIPHER
|
||||
default m
|
||||
help
|
||||
Say 'Y' here to use the AMD Geode LX processor on-board AES
|
||||
engine for the CryptoAPI AES alogrithm.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called geode-aes.
|
||||
|
||||
endmenu
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
|
||||
|
||||
@@ -0,0 +1,474 @@
|
||||
/* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <crypto/algapi.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/delay.h>
|
||||
|
||||
#include "geode-aes.h"
|
||||
|
||||
/* Register definitions */
|
||||
|
||||
#define AES_CTRLA_REG 0x0000
|
||||
|
||||
#define AES_CTRL_START 0x01
|
||||
#define AES_CTRL_DECRYPT 0x00
|
||||
#define AES_CTRL_ENCRYPT 0x02
|
||||
#define AES_CTRL_WRKEY 0x04
|
||||
#define AES_CTRL_DCA 0x08
|
||||
#define AES_CTRL_SCA 0x10
|
||||
#define AES_CTRL_CBC 0x20
|
||||
|
||||
#define AES_INTR_REG 0x0008
|
||||
|
||||
#define AES_INTRA_PENDING (1 << 16)
|
||||
#define AES_INTRB_PENDING (1 << 17)
|
||||
|
||||
#define AES_INTR_PENDING (AES_INTRA_PENDING | AES_INTRB_PENDING)
|
||||
#define AES_INTR_MASK 0x07
|
||||
|
||||
#define AES_SOURCEA_REG 0x0010
|
||||
#define AES_DSTA_REG 0x0014
|
||||
#define AES_LENA_REG 0x0018
|
||||
#define AES_WRITEKEY0_REG 0x0030
|
||||
#define AES_WRITEIV0_REG 0x0040
|
||||
|
||||
/* A very large counter that is used to gracefully bail out of an
|
||||
* operation in case of trouble
|
||||
*/
|
||||
|
||||
#define AES_OP_TIMEOUT 0x50000
|
||||
|
||||
/* Static structures */
|
||||
|
||||
static void __iomem * _iobase;
|
||||
static spinlock_t lock;
|
||||
|
||||
/* Write a 128 bit field (either a writable key or IV) */
|
||||
static inline void
|
||||
_writefield(u32 offset, void *value)
|
||||
{
|
||||
int i;
|
||||
for(i = 0; i < 4; i++)
|
||||
iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
|
||||
}
|
||||
|
||||
/* Read a 128 bit field (either a writable key or IV) */
|
||||
static inline void
|
||||
_readfield(u32 offset, void *value)
|
||||
{
|
||||
int i;
|
||||
for(i = 0; i < 4; i++)
|
||||
((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
|
||||
}
|
||||
|
||||
static int
|
||||
do_crypt(void *src, void *dst, int len, u32 flags)
|
||||
{
|
||||
u32 status;
|
||||
u32 counter = AES_OP_TIMEOUT;
|
||||
|
||||
iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
|
||||
iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
|
||||
iowrite32(len, _iobase + AES_LENA_REG);
|
||||
|
||||
/* Start the operation */
|
||||
iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
|
||||
|
||||
do
|
||||
status = ioread32(_iobase + AES_INTR_REG);
|
||||
while(!(status & AES_INTRA_PENDING) && --counter);
|
||||
|
||||
/* Clear the event */
|
||||
iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
|
||||
return counter ? 0 : 1;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
geode_aes_crypt(struct geode_aes_op *op)
|
||||
{
|
||||
|
||||
u32 flags = 0;
|
||||
int iflags;
|
||||
|
||||
if (op->len == 0 || op->src == op->dst)
|
||||
return 0;
|
||||
|
||||
if (op->flags & AES_FLAGS_COHERENT)
|
||||
flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
|
||||
|
||||
if (op->dir == AES_DIR_ENCRYPT)
|
||||
flags |= AES_CTRL_ENCRYPT;
|
||||
|
||||
/* Start the critical section */
|
||||
|
||||
spin_lock_irqsave(&lock, iflags);
|
||||
|
||||
if (op->mode == AES_MODE_CBC) {
|
||||
flags |= AES_CTRL_CBC;
|
||||
_writefield(AES_WRITEIV0_REG, op->iv);
|
||||
}
|
||||
|
||||
if (op->flags & AES_FLAGS_USRKEY) {
|
||||
flags |= AES_CTRL_WRKEY;
|
||||
_writefield(AES_WRITEKEY0_REG, op->key);
|
||||
}
|
||||
|
||||
do_crypt(op->src, op->dst, op->len, flags);
|
||||
|
||||
if (op->mode == AES_MODE_CBC)
|
||||
_readfield(AES_WRITEIV0_REG, op->iv);
|
||||
|
||||
spin_unlock_irqrestore(&lock, iflags);
|
||||
|
||||
return op->len;
|
||||
}
|
||||
|
||||
/* CRYPTO-API Functions */
|
||||
|
||||
static int
|
||||
geode_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int len)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (len != AES_KEY_LENGTH) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(op->key, key, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
|
||||
if ((out == NULL) || (in == NULL))
|
||||
return;
|
||||
|
||||
op->src = (void *) in;
|
||||
op->dst = (void *) out;
|
||||
op->mode = AES_MODE_ECB;
|
||||
op->flags = 0;
|
||||
op->len = AES_MIN_BLOCK_SIZE;
|
||||
op->dir = AES_DIR_ENCRYPT;
|
||||
|
||||
geode_aes_crypt(op);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
|
||||
if ((out == NULL) || (in == NULL))
|
||||
return;
|
||||
|
||||
op->src = (void *) in;
|
||||
op->dst = (void *) out;
|
||||
op->mode = AES_MODE_ECB;
|
||||
op->flags = 0;
|
||||
op->len = AES_MIN_BLOCK_SIZE;
|
||||
op->dir = AES_DIR_DECRYPT;
|
||||
|
||||
geode_aes_crypt(op);
|
||||
}
|
||||
|
||||
|
||||
static struct crypto_alg geode_alg = {
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "geode-aes-128",
|
||||
.cra_priority = 300,
|
||||
.cra_alignmask = 15,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = AES_MIN_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct geode_aes_op),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(geode_alg.cra_list),
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = AES_KEY_LENGTH,
|
||||
.cia_max_keysize = AES_KEY_LENGTH,
|
||||
.cia_setkey = geode_setkey,
|
||||
.cia_encrypt = geode_encrypt,
|
||||
.cia_decrypt = geode_decrypt
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int
|
||||
geode_cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err, ret;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while((nbytes = walk.nbytes)) {
|
||||
op->src = walk.src.virt.addr,
|
||||
op->dst = walk.dst.virt.addr;
|
||||
op->mode = AES_MODE_CBC;
|
||||
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
|
||||
op->dir = AES_DIR_DECRYPT;
|
||||
|
||||
memcpy(op->iv, walk.iv, AES_IV_LENGTH);
|
||||
|
||||
ret = geode_aes_crypt(op);
|
||||
|
||||
memcpy(walk.iv, op->iv, AES_IV_LENGTH);
|
||||
nbytes -= ret;
|
||||
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
geode_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err, ret;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while((nbytes = walk.nbytes)) {
|
||||
op->src = walk.src.virt.addr,
|
||||
op->dst = walk.dst.virt.addr;
|
||||
op->mode = AES_MODE_CBC;
|
||||
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
|
||||
op->dir = AES_DIR_ENCRYPT;
|
||||
|
||||
memcpy(op->iv, walk.iv, AES_IV_LENGTH);
|
||||
|
||||
ret = geode_aes_crypt(op);
|
||||
nbytes -= ret;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg geode_cbc_alg = {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "cbc-aes-geode-128",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_MIN_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct geode_aes_op),
|
||||
.cra_alignmask = 15,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_KEY_LENGTH,
|
||||
.max_keysize = AES_KEY_LENGTH,
|
||||
.setkey = geode_setkey,
|
||||
.encrypt = geode_cbc_encrypt,
|
||||
.decrypt = geode_cbc_decrypt,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int
|
||||
geode_ecb_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err, ret;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while((nbytes = walk.nbytes)) {
|
||||
op->src = walk.src.virt.addr,
|
||||
op->dst = walk.dst.virt.addr;
|
||||
op->mode = AES_MODE_ECB;
|
||||
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
|
||||
op->dir = AES_DIR_DECRYPT;
|
||||
|
||||
ret = geode_aes_crypt(op);
|
||||
nbytes -= ret;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
geode_ecb_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err, ret;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while((nbytes = walk.nbytes)) {
|
||||
op->src = walk.src.virt.addr,
|
||||
op->dst = walk.dst.virt.addr;
|
||||
op->mode = AES_MODE_ECB;
|
||||
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
|
||||
op->dir = AES_DIR_ENCRYPT;
|
||||
|
||||
ret = geode_aes_crypt(op);
|
||||
nbytes -= ret;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg geode_ecb_alg = {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "ecb-aes-geode-128",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_MIN_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct geode_aes_op),
|
||||
.cra_alignmask = 15,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_KEY_LENGTH,
|
||||
.max_keysize = AES_KEY_LENGTH,
|
||||
.setkey = geode_setkey,
|
||||
.encrypt = geode_ecb_encrypt,
|
||||
.decrypt = geode_ecb_decrypt,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static void
|
||||
geode_aes_remove(struct pci_dev *dev)
|
||||
{
|
||||
crypto_unregister_alg(&geode_alg);
|
||||
crypto_unregister_alg(&geode_ecb_alg);
|
||||
crypto_unregister_alg(&geode_cbc_alg);
|
||||
|
||||
pci_iounmap(dev, _iobase);
|
||||
_iobase = NULL;
|
||||
|
||||
pci_release_regions(dev);
|
||||
pci_disable_device(dev);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((ret = pci_enable_device(dev)))
|
||||
return ret;
|
||||
|
||||
if ((ret = pci_request_regions(dev, "geode-aes-128")))
|
||||
goto eenable;
|
||||
|
||||
_iobase = pci_iomap(dev, 0, 0);
|
||||
|
||||
if (_iobase == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto erequest;
|
||||
}
|
||||
|
||||
spin_lock_init(&lock);
|
||||
|
||||
/* Clear any pending activity */
|
||||
iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
|
||||
|
||||
if ((ret = crypto_register_alg(&geode_alg)))
|
||||
goto eiomap;
|
||||
|
||||
if ((ret = crypto_register_alg(&geode_ecb_alg)))
|
||||
goto ealg;
|
||||
|
||||
if ((ret = crypto_register_alg(&geode_cbc_alg)))
|
||||
goto eecb;
|
||||
|
||||
printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
|
||||
return 0;
|
||||
|
||||
eecb:
|
||||
crypto_unregister_alg(&geode_ecb_alg);
|
||||
|
||||
ealg:
|
||||
crypto_unregister_alg(&geode_alg);
|
||||
|
||||
eiomap:
|
||||
pci_iounmap(dev, _iobase);
|
||||
|
||||
erequest:
|
||||
pci_release_regions(dev);
|
||||
|
||||
eenable:
|
||||
pci_disable_device(dev);
|
||||
|
||||
printk(KERN_ERR "geode-aes: GEODE AES initialization failed.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct pci_device_id geode_aes_tbl[] = {
|
||||
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, PCI_ANY_ID, PCI_ANY_ID} ,
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
|
||||
|
||||
static struct pci_driver geode_aes_driver = {
|
||||
.name = "Geode LX AES",
|
||||
.id_table = geode_aes_tbl,
|
||||
.probe = geode_aes_probe,
|
||||
.remove = __devexit_p(geode_aes_remove)
|
||||
};
|
||||
|
||||
static int __init
|
||||
geode_aes_init(void)
|
||||
{
|
||||
return pci_module_init(&geode_aes_driver);
|
||||
}
|
||||
|
||||
static void __exit
|
||||
geode_aes_exit(void)
|
||||
{
|
||||
pci_unregister_driver(&geode_aes_driver);
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
|
||||
MODULE_DESCRIPTION("Geode LX Hardware AES driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
module_init(geode_aes_init);
|
||||
module_exit(geode_aes_exit);
|
||||
@@ -0,0 +1,40 @@
|
||||
/* Copyright (C) 2003-2006, Advanced Micro Devices, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _GEODE_AES_H_
|
||||
#define _GEODE_AES_H_
|
||||
|
||||
#define AES_KEY_LENGTH 16
|
||||
#define AES_IV_LENGTH 16
|
||||
|
||||
#define AES_MIN_BLOCK_SIZE 16
|
||||
|
||||
#define AES_MODE_ECB 0
|
||||
#define AES_MODE_CBC 1
|
||||
|
||||
#define AES_DIR_DECRYPT 0
|
||||
#define AES_DIR_ENCRYPT 1
|
||||
|
||||
#define AES_FLAGS_USRKEY (1 << 0)
|
||||
#define AES_FLAGS_COHERENT (1 << 1)
|
||||
|
||||
struct geode_aes_op {
|
||||
|
||||
void *src;
|
||||
void *dst;
|
||||
|
||||
u32 mode;
|
||||
u32 dir;
|
||||
u32 flags;
|
||||
int len;
|
||||
|
||||
u8 key[AES_KEY_LENGTH];
|
||||
u8 iv[AES_IV_LENGTH];
|
||||
};
|
||||
|
||||
#endif
|
||||
+61
-7
@@ -20,6 +20,7 @@
|
||||
#include <asm/atomic.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "dm.h"
|
||||
|
||||
@@ -85,7 +86,10 @@ struct crypt_config {
|
||||
*/
|
||||
struct crypt_iv_operations *iv_gen_ops;
|
||||
char *iv_mode;
|
||||
struct crypto_cipher *iv_gen_private;
|
||||
union {
|
||||
struct crypto_cipher *essiv_tfm;
|
||||
int benbi_shift;
|
||||
} iv_gen_private;
|
||||
sector_t iv_offset;
|
||||
unsigned int iv_size;
|
||||
|
||||
@@ -113,6 +117,9 @@ static struct kmem_cache *_crypt_io_pool;
|
||||
* encrypted with the bulk cipher using a salt as key. The salt
|
||||
* should be derived from the bulk cipher's key via hashing.
|
||||
*
|
||||
* benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
|
||||
* (needed for LRW-32-AES and possible other narrow block modes)
|
||||
*
|
||||
* plumb: unimplemented, see:
|
||||
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
|
||||
*/
|
||||
@@ -191,21 +198,61 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
|
||||
}
|
||||
kfree(salt);
|
||||
|
||||
cc->iv_gen_private = essiv_tfm;
|
||||
cc->iv_gen_private.essiv_tfm = essiv_tfm;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypt_iv_essiv_dtr(struct crypt_config *cc)
|
||||
{
|
||||
crypto_free_cipher(cc->iv_gen_private);
|
||||
cc->iv_gen_private = NULL;
|
||||
crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
|
||||
cc->iv_gen_private.essiv_tfm = NULL;
|
||||
}
|
||||
|
||||
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
|
||||
{
|
||||
memset(iv, 0, cc->iv_size);
|
||||
*(u64 *)iv = cpu_to_le64(sector);
|
||||
crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv);
|
||||
crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
|
||||
const char *opts)
|
||||
{
|
||||
unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
|
||||
int log = long_log2(bs);
|
||||
|
||||
/* we need to calculate how far we must shift the sector count
|
||||
* to get the cipher block count, we use this shift in _gen */
|
||||
|
||||
if (1 << log != bs) {
|
||||
ti->error = "cypher blocksize is not a power of 2";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (log > 9) {
|
||||
ti->error = "cypher blocksize is > 512";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cc->iv_gen_private.benbi_shift = 9 - log;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypt_iv_benbi_dtr(struct crypt_config *cc)
|
||||
{
|
||||
}
|
||||
|
||||
static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
|
||||
{
|
||||
__be64 val;
|
||||
|
||||
memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
|
||||
|
||||
val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
|
||||
put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -219,13 +266,18 @@ static struct crypt_iv_operations crypt_iv_essiv_ops = {
|
||||
.generator = crypt_iv_essiv_gen
|
||||
};
|
||||
|
||||
static struct crypt_iv_operations crypt_iv_benbi_ops = {
|
||||
.ctr = crypt_iv_benbi_ctr,
|
||||
.dtr = crypt_iv_benbi_dtr,
|
||||
.generator = crypt_iv_benbi_gen
|
||||
};
|
||||
|
||||
static int
|
||||
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
|
||||
struct scatterlist *in, unsigned int length,
|
||||
int write, sector_t sector)
|
||||
{
|
||||
u8 iv[cc->iv_size];
|
||||
u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
|
||||
struct blkcipher_desc desc = {
|
||||
.tfm = cc->tfm,
|
||||
.info = iv,
|
||||
@@ -768,7 +820,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
cc->tfm = tfm;
|
||||
|
||||
/*
|
||||
* Choose ivmode. Valid modes: "plain", "essiv:<esshash>".
|
||||
* Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
|
||||
* See comments at iv code
|
||||
*/
|
||||
|
||||
@@ -778,6 +830,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
cc->iv_gen_ops = &crypt_iv_plain_ops;
|
||||
else if (strcmp(ivmode, "essiv") == 0)
|
||||
cc->iv_gen_ops = &crypt_iv_essiv_ops;
|
||||
else if (strcmp(ivmode, "benbi") == 0)
|
||||
cc->iv_gen_ops = &crypt_iv_benbi_ops;
|
||||
else {
|
||||
ti->error = "Invalid IV mode";
|
||||
goto bad2;
|
||||
|
||||
@@ -704,9 +704,9 @@ static int pxa_irda_stop(struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pxa_irda_suspend(struct device *_dev, pm_message_t state)
|
||||
static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state)
|
||||
{
|
||||
struct net_device *dev = dev_get_drvdata(_dev);
|
||||
struct net_device *dev = platform_get_drvdata(_dev);
|
||||
struct pxa_irda *si;
|
||||
|
||||
if (dev && netif_running(dev)) {
|
||||
@@ -718,9 +718,9 @@ static int pxa_irda_suspend(struct device *_dev, pm_message_t state)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pxa_irda_resume(struct device *_dev)
|
||||
static int pxa_irda_resume(struct platform_device *_dev)
|
||||
{
|
||||
struct net_device *dev = dev_get_drvdata(_dev);
|
||||
struct net_device *dev = platform_get_drvdata(_dev);
|
||||
struct pxa_irda *si;
|
||||
|
||||
if (dev && netif_running(dev)) {
|
||||
@@ -746,9 +746,8 @@ static int pxa_irda_init_iobuf(iobuff_t *io, int size)
|
||||
return io->head ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int pxa_irda_probe(struct device *_dev)
|
||||
static int pxa_irda_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(_dev);
|
||||
struct net_device *dev;
|
||||
struct pxa_irda *si;
|
||||
unsigned int baudrate_mask;
|
||||
@@ -822,9 +821,9 @@ err_mem_1:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pxa_irda_remove(struct device *_dev)
|
||||
static int pxa_irda_remove(struct platform_device *_dev)
|
||||
{
|
||||
struct net_device *dev = dev_get_drvdata(_dev);
|
||||
struct net_device *dev = platform_get_drvdata(_dev);
|
||||
|
||||
if (dev) {
|
||||
struct pxa_irda *si = netdev_priv(dev);
|
||||
@@ -840,9 +839,10 @@ static int pxa_irda_remove(struct device *_dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct device_driver pxa_ir_driver = {
|
||||
.name = "pxa2xx-ir",
|
||||
.bus = &platform_bus_type,
|
||||
static struct platform_driver pxa_ir_driver = {
|
||||
.driver = {
|
||||
.name = "pxa2xx-ir",
|
||||
},
|
||||
.probe = pxa_irda_probe,
|
||||
.remove = pxa_irda_remove,
|
||||
.suspend = pxa_irda_suspend,
|
||||
@@ -851,12 +851,12 @@ static struct device_driver pxa_ir_driver = {
|
||||
|
||||
static int __init pxa_irda_init(void)
|
||||
{
|
||||
return driver_register(&pxa_ir_driver);
|
||||
return platform_driver_register(&pxa_ir_driver);
|
||||
}
|
||||
|
||||
static void __exit pxa_irda_exit(void)
|
||||
{
|
||||
driver_unregister(&pxa_ir_driver);
|
||||
platform_driver_unregister(&pxa_ir_driver);
|
||||
}
|
||||
|
||||
module_init(pxa_irda_init);
|
||||
|
||||
+82
-60
@@ -68,8 +68,8 @@
|
||||
|
||||
#define DRV_MODULE_NAME "tg3"
|
||||
#define PFX DRV_MODULE_NAME ": "
|
||||
#define DRV_MODULE_VERSION "3.69"
|
||||
#define DRV_MODULE_RELDATE "November 15, 2006"
|
||||
#define DRV_MODULE_VERSION "3.70"
|
||||
#define DRV_MODULE_RELDATE "December 1, 2006"
|
||||
|
||||
#define TG3_DEF_MAC_MODE 0
|
||||
#define TG3_DEF_RX_MODE 0
|
||||
@@ -192,6 +192,7 @@ static struct pci_device_id tg3_pci_tbl[] = {
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
|
||||
@@ -1061,7 +1062,7 @@ static void tg3_frob_aux_power(struct tg3 *tp)
|
||||
{
|
||||
struct tg3 *tp_peer = tp;
|
||||
|
||||
if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
|
||||
if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
|
||||
return;
|
||||
|
||||
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
|
||||
@@ -1212,8 +1213,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
|
||||
power_control);
|
||||
udelay(100); /* Delay after power state change */
|
||||
|
||||
/* Switch out of Vaux if it is not a LOM */
|
||||
if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
|
||||
/* Switch out of Vaux if it is a NIC */
|
||||
if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
|
||||
tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
|
||||
|
||||
return 0;
|
||||
@@ -1401,8 +1402,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
|
||||
static void tg3_link_report(struct tg3 *tp)
|
||||
{
|
||||
if (!netif_carrier_ok(tp->dev)) {
|
||||
printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
|
||||
} else {
|
||||
if (netif_msg_link(tp))
|
||||
printk(KERN_INFO PFX "%s: Link is down.\n",
|
||||
tp->dev->name);
|
||||
} else if (netif_msg_link(tp)) {
|
||||
printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
|
||||
tp->dev->name,
|
||||
(tp->link_config.active_speed == SPEED_1000 ?
|
||||
@@ -1557,12 +1560,6 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
|
||||
|
||||
tg3_writephy(tp, MII_ADVERTISE, new_adv);
|
||||
} else if (tp->link_config.speed == SPEED_INVALID) {
|
||||
tp->link_config.advertising =
|
||||
(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
|
||||
ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
|
||||
ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
|
||||
ADVERTISED_Autoneg | ADVERTISED_MII);
|
||||
|
||||
if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
|
||||
tp->link_config.advertising &=
|
||||
~(ADVERTISED_1000baseT_Half |
|
||||
@@ -1706,25 +1703,36 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int tg3_copper_is_advertising_all(struct tg3 *tp)
|
||||
static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
|
||||
{
|
||||
u32 adv_reg, all_mask;
|
||||
u32 adv_reg, all_mask = 0;
|
||||
|
||||
if (mask & ADVERTISED_10baseT_Half)
|
||||
all_mask |= ADVERTISE_10HALF;
|
||||
if (mask & ADVERTISED_10baseT_Full)
|
||||
all_mask |= ADVERTISE_10FULL;
|
||||
if (mask & ADVERTISED_100baseT_Half)
|
||||
all_mask |= ADVERTISE_100HALF;
|
||||
if (mask & ADVERTISED_100baseT_Full)
|
||||
all_mask |= ADVERTISE_100FULL;
|
||||
|
||||
if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
|
||||
return 0;
|
||||
|
||||
all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
|
||||
ADVERTISE_100HALF | ADVERTISE_100FULL);
|
||||
if ((adv_reg & all_mask) != all_mask)
|
||||
return 0;
|
||||
if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
|
||||
u32 tg3_ctrl;
|
||||
|
||||
all_mask = 0;
|
||||
if (mask & ADVERTISED_1000baseT_Half)
|
||||
all_mask |= ADVERTISE_1000HALF;
|
||||
if (mask & ADVERTISED_1000baseT_Full)
|
||||
all_mask |= ADVERTISE_1000FULL;
|
||||
|
||||
if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
|
||||
return 0;
|
||||
|
||||
all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
|
||||
MII_TG3_CTRL_ADV_1000_FULL);
|
||||
if ((tg3_ctrl & all_mask) != all_mask)
|
||||
return 0;
|
||||
}
|
||||
@@ -1884,7 +1892,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
|
||||
/* Force autoneg restart if we are exiting
|
||||
* low power mode.
|
||||
*/
|
||||
if (!tg3_copper_is_advertising_all(tp))
|
||||
if (!tg3_copper_is_advertising_all(tp,
|
||||
tp->link_config.advertising))
|
||||
current_link_up = 0;
|
||||
} else {
|
||||
current_link_up = 0;
|
||||
@@ -3703,8 +3712,9 @@ static void tg3_tx_timeout(struct net_device *dev)
|
||||
{
|
||||
struct tg3 *tp = netdev_priv(dev);
|
||||
|
||||
printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
|
||||
dev->name);
|
||||
if (netif_msg_tx_err(tp))
|
||||
printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
|
||||
dev->name);
|
||||
|
||||
schedule_work(&tp->reset_task);
|
||||
}
|
||||
@@ -6396,16 +6406,17 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
|
||||
udelay(40);
|
||||
|
||||
/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
|
||||
* If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
|
||||
* If TG3_FLG2_IS_NIC is zero, we should read the
|
||||
* register to preserve the GPIO settings for LOMs. The GPIOs,
|
||||
* whether used as inputs or outputs, are set by boot code after
|
||||
* reset.
|
||||
*/
|
||||
if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
|
||||
if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
|
||||
u32 gpio_mask;
|
||||
|
||||
gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
|
||||
GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
|
||||
gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
|
||||
GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
|
||||
GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
|
||||
|
||||
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
|
||||
gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
|
||||
@@ -6417,8 +6428,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
|
||||
tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
|
||||
|
||||
/* GPIO1 must be driven high for eeprom write protect */
|
||||
tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
|
||||
GRC_LCLCTRL_GPIO_OUTPUT1);
|
||||
if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
|
||||
tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
|
||||
GRC_LCLCTRL_GPIO_OUTPUT1);
|
||||
}
|
||||
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
|
||||
udelay(100);
|
||||
@@ -8656,7 +8668,9 @@ static int tg3_test_registers(struct tg3 *tp)
|
||||
return 0;
|
||||
|
||||
out:
|
||||
printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
|
||||
if (netif_msg_hw(tp))
|
||||
printk(KERN_ERR PFX "Register test failed at offset %x\n",
|
||||
offset);
|
||||
tw32(offset, save_val);
|
||||
return -EIO;
|
||||
}
|
||||
@@ -8781,17 +8795,20 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
|
||||
tg3_writephy(tp, 0x10, phy & ~0x4000);
|
||||
tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
|
||||
}
|
||||
}
|
||||
val = BMCR_LOOPBACK | BMCR_FULLDPLX;
|
||||
if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
|
||||
val |= BMCR_SPEED100;
|
||||
else
|
||||
val |= BMCR_SPEED1000;
|
||||
val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
|
||||
} else
|
||||
val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
|
||||
|
||||
tg3_writephy(tp, MII_BMCR, val);
|
||||
udelay(40);
|
||||
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
|
||||
|
||||
mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
|
||||
MAC_MODE_LINK_POLARITY;
|
||||
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
|
||||
tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
|
||||
mac_mode |= MAC_MODE_PORT_MODE_MII;
|
||||
} else
|
||||
mac_mode |= MAC_MODE_PORT_MODE_GMII;
|
||||
|
||||
/* reset to prevent losing 1st rx packet intermittently */
|
||||
if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
|
||||
@@ -8799,12 +8816,6 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
|
||||
udelay(10);
|
||||
tw32_f(MAC_RX_MODE, tp->rx_mode);
|
||||
}
|
||||
mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
|
||||
MAC_MODE_LINK_POLARITY;
|
||||
if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
|
||||
mac_mode |= MAC_MODE_PORT_MODE_MII;
|
||||
else
|
||||
mac_mode |= MAC_MODE_PORT_MODE_GMII;
|
||||
if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
|
||||
mac_mode &= ~MAC_MODE_LINK_POLARITY;
|
||||
tg3_writephy(tp, MII_TG3_EXT_CTRL,
|
||||
@@ -9456,16 +9467,12 @@ static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
|
||||
/* Chips other than 5700/5701 use the NVRAM for fetching info. */
|
||||
static void __devinit tg3_nvram_init(struct tg3 *tp)
|
||||
{
|
||||
int j;
|
||||
|
||||
tw32_f(GRC_EEPROM_ADDR,
|
||||
(EEPROM_ADDR_FSM_RESET |
|
||||
(EEPROM_DEFAULT_CLOCK_PERIOD <<
|
||||
EEPROM_ADDR_CLKPERD_SHIFT)));
|
||||
|
||||
/* XXX schedule_timeout() ... */
|
||||
for (j = 0; j < 100; j++)
|
||||
udelay(10);
|
||||
msleep(1);
|
||||
|
||||
/* Enable seeprom accesses. */
|
||||
tw32_f(GRC_LOCAL_CTRL,
|
||||
@@ -9526,12 +9533,12 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
|
||||
EEPROM_ADDR_ADDR_MASK) |
|
||||
EEPROM_ADDR_READ | EEPROM_ADDR_START);
|
||||
|
||||
for (i = 0; i < 10000; i++) {
|
||||
for (i = 0; i < 1000; i++) {
|
||||
tmp = tr32(GRC_EEPROM_ADDR);
|
||||
|
||||
if (tmp & EEPROM_ADDR_COMPLETE)
|
||||
break;
|
||||
udelay(100);
|
||||
msleep(1);
|
||||
}
|
||||
if (!(tmp & EEPROM_ADDR_COMPLETE))
|
||||
return -EBUSY;
|
||||
@@ -9656,12 +9663,12 @@ static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
|
||||
EEPROM_ADDR_START |
|
||||
EEPROM_ADDR_WRITE);
|
||||
|
||||
for (j = 0; j < 10000; j++) {
|
||||
for (j = 0; j < 1000; j++) {
|
||||
val = tr32(GRC_EEPROM_ADDR);
|
||||
|
||||
if (val & EEPROM_ADDR_COMPLETE)
|
||||
break;
|
||||
udelay(100);
|
||||
msleep(1);
|
||||
}
|
||||
if (!(val & EEPROM_ADDR_COMPLETE)) {
|
||||
rc = -EBUSY;
|
||||
@@ -9965,8 +9972,10 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
|
||||
tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
|
||||
|
||||
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
|
||||
if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM))
|
||||
if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
|
||||
tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
|
||||
tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -10066,10 +10075,17 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
|
||||
tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
|
||||
tp->led_ctrl = LED_CTRL_MODE_PHY_2;
|
||||
|
||||
if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
|
||||
if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
|
||||
tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
|
||||
else
|
||||
if ((tp->pdev->subsystem_vendor ==
|
||||
PCI_VENDOR_ID_ARIMA) &&
|
||||
(tp->pdev->subsystem_device == 0x205a ||
|
||||
tp->pdev->subsystem_device == 0x2063))
|
||||
tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
|
||||
} else {
|
||||
tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
|
||||
tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
|
||||
}
|
||||
|
||||
if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
|
||||
tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
|
||||
@@ -10147,7 +10163,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
|
||||
|
||||
if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
|
||||
!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
|
||||
u32 bmsr, adv_reg, tg3_ctrl;
|
||||
u32 bmsr, adv_reg, tg3_ctrl, mask;
|
||||
|
||||
tg3_readphy(tp, MII_BMSR, &bmsr);
|
||||
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
|
||||
@@ -10171,7 +10187,10 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
|
||||
MII_TG3_CTRL_ENABLE_AS_MASTER);
|
||||
}
|
||||
|
||||
if (!tg3_copper_is_advertising_all(tp)) {
|
||||
mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
|
||||
ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
|
||||
ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
|
||||
if (!tg3_copper_is_advertising_all(tp, mask)) {
|
||||
tg3_writephy(tp, MII_ADVERTISE, adv_reg);
|
||||
|
||||
if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
|
||||
@@ -10695,7 +10714,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
|
||||
tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
|
||||
|
||||
/* Get eeprom hw config before calling tg3_set_power_state().
|
||||
* In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
|
||||
* In particular, the TG3_FLG2_IS_NIC flag must be
|
||||
* determined before calling tg3_set_power_state() so that
|
||||
* we know whether or not to switch out of Vaux power.
|
||||
* When the flag is set, it means that GPIO1 is used for eeprom
|
||||
@@ -10862,7 +10881,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
|
||||
tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
|
||||
(tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
|
||||
(tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
|
||||
tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)) ||
|
||||
tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
|
||||
tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
|
||||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
|
||||
tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
|
||||
|
||||
@@ -11912,13 +11932,15 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
|
||||
|
||||
pci_set_drvdata(pdev, dev);
|
||||
|
||||
printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
|
||||
printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
|
||||
dev->name,
|
||||
tp->board_part_number,
|
||||
tp->pci_chip_rev_id,
|
||||
tg3_phy_string(tp),
|
||||
tg3_bus_string(tp, str),
|
||||
(tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
|
||||
((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
|
||||
((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
|
||||
"10/100/1000Base-T")));
|
||||
|
||||
for (i = 0; i < 6; i++)
|
||||
printk("%2.2x%c", dev->dev_addr[i],
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user