1
0
Fork 0

Staging: rtl8192e: remove unneeded ieee80211 files

These files are not even built or used, so just remove them.

Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
hifive-unleashed-5.1
Greg Kroah-Hartman 2009-08-12 16:50:57 -07:00
parent 96ed5846c4
commit cbe892f677
18 changed files with 0 additions and 2248 deletions

View File

@ -1,199 +0,0 @@
#ifndef __INC_ENDIANFREE_H
#define __INC_ENDIANFREE_H
/*
* Call endian free function when
* 1. Read/write packet content.
* 2. Before write integer to IO.
* 3. After read integer from IO.
*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
#ifndef bool
typedef enum{false = 0, true} bool;
#endif
#endif
#define __MACHINE_LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
#define __MACHINE_BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net, ppc */
#define BYTE_ORDER __MACHINE_LITTLE_ENDIAN
#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
// Convert data
#define EF1Byte(_val) ((u8)(_val))
#define EF2Byte(_val) ((u16)(_val))
#define EF4Byte(_val) ((u32)(_val))
#else
// Convert data
#define EF1Byte(_val) ((u8)(_val))
#define EF2Byte(_val) (((((u16)(_val))&0x00ff)<<8)|((((u16)(_val))&0xff00)>>8))
#define EF4Byte(_val) (((((u32)(_val))&0x000000ff)<<24)|\
((((u32)(_val))&0x0000ff00)<<8)|\
((((u32)(_val))&0x00ff0000)>>8)|\
((((u32)(_val))&0xff000000)>>24))
#endif
// Read data from memory
#define ReadEF1Byte(_ptr) EF1Byte(*((u8 *)(_ptr)))
#define ReadEF2Byte(_ptr) EF2Byte(*((u16 *)(_ptr)))
#define ReadEF4Byte(_ptr) EF4Byte(*((u32 *)(_ptr)))
// Write data to memory
#define WriteEF1Byte(_ptr, _val) (*((u8 *)(_ptr)))=EF1Byte(_val)
#define WriteEF2Byte(_ptr, _val) (*((u16 *)(_ptr)))=EF2Byte(_val)
#define WriteEF4Byte(_ptr, _val) (*((u32 *)(_ptr)))=EF4Byte(_val)
// Convert Host system specific byte ording (litten or big endia) to Network byte ording (big endian).
// 2006.05.07, by rcnjko.
#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
#define H2N1BYTE(_val) ((u8)(_val))
#define H2N2BYTE(_val) (((((u16)(_val))&0x00ff)<<8)|\
((((u16)(_val))&0xff00)>>8))
#define H2N4BYTE(_val) (((((u32)(_val))&0x000000ff)<<24)|\
((((u32)(_val))&0x0000ff00)<<8) |\
((((u32)(_val))&0x00ff0000)>>8) |\
((((u32)(_val))&0xff000000)>>24))
#else
#define H2N1BYTE(_val) ((u8)(_val))
#define H2N2BYTE(_val) ((u16)(_val))
#define H2N4BYTE(_val) ((u32)(_val))
#endif
// Convert from Network byte ording (big endian) to Host system specific byte ording (litten or big endia).
// 2006.05.07, by rcnjko.
#if BYTE_ORDER == __MACHINE_LITTLE_ENDIAN
#define N2H1BYTE(_val) ((u8)(_val))
#define N2H2BYTE(_val) (((((u16)(_val))&0x00ff)<<8)|\
((((u16)(_val))&0xff00)>>8))
#define N2H4BYTE(_val) (((((u32)(_val))&0x000000ff)<<24)|\
((((u32)(_val))&0x0000ff00)<<8) |\
((((u32)(_val))&0x00ff0000)>>8) |\
((((u32)(_val))&0xff000000)>>24))
#else
#define N2H1BYTE(_val) ((u8)(_val))
#define N2H2BYTE(_val) ((u16)(_val))
#define N2H4BYTE(_val) ((u32)(_val))
#endif
//
// Example:
// BIT_LEN_MASK_32(0) => 0x00000000
// BIT_LEN_MASK_32(1) => 0x00000001
// BIT_LEN_MASK_32(2) => 0x00000003
// BIT_LEN_MASK_32(32) => 0xFFFFFFFF
//
#define BIT_LEN_MASK_32(__BitLen) (0xFFFFFFFF >> (32 - (__BitLen)))
//
// Example:
// BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
// BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
//
#define BIT_OFFSET_LEN_MASK_32(__BitOffset, __BitLen) (BIT_LEN_MASK_32(__BitLen) << (__BitOffset))
//
// Description:
// Return 4-byte value in host byte ordering from
// 4-byte pointer in litten-endian system.
//
#define LE_P4BYTE_TO_HOST_4BYTE(__pStart) (EF4Byte(*((u32 *)(__pStart))))
//
// Description:
// Translate subfield (continuous bits in little-endian) of 4-byte value in litten byte to
// 4-byte value in host byte ordering.
//
#define LE_BITS_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
( \
( LE_P4BYTE_TO_HOST_4BYTE(__pStart) >> (__BitOffset) ) \
& \
BIT_LEN_MASK_32(__BitLen) \
)
//
// Description:
// Mask subfield (continuous bits in little-endian) of 4-byte value in litten byte oredering
// and return the result in 4-byte value in host byte ordering.
//
#define LE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
( \
LE_P4BYTE_TO_HOST_4BYTE(__pStart) \
& \
( ~BIT_OFFSET_LEN_MASK_32(__BitOffset, __BitLen) ) \
)
//
// Description:
// Set subfield of little-endian 4-byte value to specified value.
//
#define SET_BITS_TO_LE_4BYTE(__pStart, __BitOffset, __BitLen, __Value) \
*((u32 *)(__pStart)) = \
EF4Byte( \
LE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
| \
( (((u32)__Value) & BIT_LEN_MASK_32(__BitLen)) << (__BitOffset) ) \
);
#define BIT_LEN_MASK_16(__BitLen) \
(0xFFFF >> (16 - (__BitLen)))
#define BIT_OFFSET_LEN_MASK_16(__BitOffset, __BitLen) \
(BIT_LEN_MASK_16(__BitLen) << (__BitOffset))
#define LE_P2BYTE_TO_HOST_2BYTE(__pStart) \
(EF2Byte(*((u16 *)(__pStart))))
#define LE_BITS_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
( \
( LE_P2BYTE_TO_HOST_2BYTE(__pStart) >> (__BitOffset) ) \
& \
BIT_LEN_MASK_16(__BitLen) \
)
#define LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
( \
LE_P2BYTE_TO_HOST_2BYTE(__pStart) \
& \
( ~BIT_OFFSET_LEN_MASK_16(__BitOffset, __BitLen) ) \
)
#define SET_BITS_TO_LE_2BYTE(__pStart, __BitOffset, __BitLen, __Value) \
*((u16 *)(__pStart)) = \
EF2Byte( \
LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
| \
( (((u16)__Value) & BIT_LEN_MASK_16(__BitLen)) << (__BitOffset) ) \
);
#define BIT_LEN_MASK_8(__BitLen) \
(0xFF >> (8 - (__BitLen)))
#define BIT_OFFSET_LEN_MASK_8(__BitOffset, __BitLen) \
(BIT_LEN_MASK_8(__BitLen) << (__BitOffset))
#define LE_P1BYTE_TO_HOST_1BYTE(__pStart) \
(EF1Byte(*((u8 *)(__pStart))))
#define LE_BITS_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
( \
( LE_P1BYTE_TO_HOST_1BYTE(__pStart) >> (__BitOffset) ) \
& \
BIT_LEN_MASK_8(__BitLen) \
)
#define LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
( \
LE_P1BYTE_TO_HOST_1BYTE(__pStart) \
& \
( ~BIT_OFFSET_LEN_MASK_8(__BitOffset, __BitLen) ) \
)
#define SET_BITS_TO_LE_1BYTE(__pStart, __BitOffset, __BitLen, __Value) \
*((u8 *)(__pStart)) = \
EF1Byte( \
LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
| \
( (((u8)__Value) & BIT_LEN_MASK_8(__BitLen)) << (__BitOffset) ) \
);
#endif // #ifndef __INC_ENDIANFREE_H

View File

@ -1,469 +0,0 @@
/*
* Cryptographic API.
*
* AES Cipher Algorithm.
*
* Based on Brian Gladman's code.
*
* Linux developers:
* Alexander Kjeldaas <astor@fast.no>
* Herbert Valerio Riedel <hvr@hvrlab.org>
* Kyle McMartin <kyle@debian.org>
* Adam J. Richter <adam@yggdrasil.com> (conversion to 2.5 API).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* ---------------------------------------------------------------------------
* Copyright (c) 2002, Dr Brian Gladman <brg@gladman.me.uk>, Worcester, UK.
* All rights reserved.
*
* LICENSE TERMS
*
* The free distribution and use of this software in both source and binary
* form is allowed (with or without changes) provided that:
*
* 1. distributions of this source code include the above copyright
* notice, this list of conditions and the following disclaimer;
*
* 2. distributions in binary form include the above copyright
* notice, this list of conditions and the following disclaimer
* in the documentation and/or other associated materials;
*
* 3. the copyright holder's name is not used to endorse products
* built using this software without specific written permission.
*
* ALTERNATIVELY, provided that this notice is retained in full, this product
* may be distributed under the terms of the GNU General Public License (GPL),
* in which case the provisions of the GPL apply INSTEAD OF those given above.
*
* DISCLAIMER
*
* This software is provided 'as is' with no explicit or implied warranties
* in respect of its properties, including, but not limited to, correctness
* and/or fitness for purpose.
* ---------------------------------------------------------------------------
*/
/* Some changes from the Gladman version:
s/RIJNDAEL(e_key)/E_KEY/g
s/RIJNDAEL(d_key)/D_KEY/g
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
//#include <linux/crypto.h>
#include "rtl_crypto.h"
#include <asm/byteorder.h>
#define AES_MIN_KEY_SIZE 16
#define AES_MAX_KEY_SIZE 32
#define AES_BLOCK_SIZE 16
static inline
u32 generic_rotr32 (const u32 x, const unsigned bits)
{
const unsigned n = bits % 32;
return (x >> n) | (x << (32 - n));
}
static inline
u32 generic_rotl32 (const u32 x, const unsigned bits)
{
const unsigned n = bits % 32;
return (x << n) | (x >> (32 - n));
}
#define rotl generic_rotl32
#define rotr generic_rotr32
/*
* #define byte(x, nr) ((unsigned char)((x) >> (nr*8)))
*/
inline static u8
byte(const u32 x, const unsigned n)
{
return x >> (n << 3);
}
#define u32_in(x) le32_to_cpu(*(const u32 *)(x))
#define u32_out(to, from) (*(u32 *)(to) = cpu_to_le32(from))
struct aes_ctx {
int key_length;
u32 E[60];
u32 D[60];
};
#define E_KEY ctx->E
#define D_KEY ctx->D
static u8 pow_tab[256] __initdata;
static u8 log_tab[256] __initdata;
static u8 sbx_tab[256] __initdata;
static u8 isb_tab[256] __initdata;
static u32 rco_tab[10];
static u32 ft_tab[4][256];
static u32 it_tab[4][256];
static u32 fl_tab[4][256];
static u32 il_tab[4][256];
static inline u8 __init
f_mult (u8 a, u8 b)
{
u8 aa = log_tab[a], cc = aa + log_tab[b];
return pow_tab[cc + (cc < aa ? 1 : 0)];
}
#define ff_mult(a,b) (a && b ? f_mult(a, b) : 0)
#define f_rn(bo, bi, n, k) \
bo[n] = ft_tab[0][byte(bi[n],0)] ^ \
ft_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
ft_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
#define i_rn(bo, bi, n, k) \
bo[n] = it_tab[0][byte(bi[n],0)] ^ \
it_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
it_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
#define ls_box(x) \
( fl_tab[0][byte(x, 0)] ^ \
fl_tab[1][byte(x, 1)] ^ \
fl_tab[2][byte(x, 2)] ^ \
fl_tab[3][byte(x, 3)] )
#define f_rl(bo, bi, n, k) \
bo[n] = fl_tab[0][byte(bi[n],0)] ^ \
fl_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
fl_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
#define i_rl(bo, bi, n, k) \
bo[n] = il_tab[0][byte(bi[n],0)] ^ \
il_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
il_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
static void __init
gen_tabs (void)
{
u32 i, t;
u8 p, q;
/* log and power tables for GF(2**8) finite field with
0x011b as modular polynomial - the simplest primitive
root is 0x03, used here to generate the tables */
for (i = 0, p = 1; i < 256; ++i) {
pow_tab[i] = (u8) p;
log_tab[p] = (u8) i;
p ^= (p << 1) ^ (p & 0x80 ? 0x01b : 0);
}
log_tab[1] = 0;
for (i = 0, p = 1; i < 10; ++i) {
rco_tab[i] = p;
p = (p << 1) ^ (p & 0x80 ? 0x01b : 0);
}
for (i = 0; i < 256; ++i) {
p = (i ? pow_tab[255 - log_tab[i]] : 0);
q = ((p >> 7) | (p << 1)) ^ ((p >> 6) | (p << 2));
p ^= 0x63 ^ q ^ ((q >> 6) | (q << 2));
sbx_tab[i] = p;
isb_tab[p] = (u8) i;
}
for (i = 0; i < 256; ++i) {
p = sbx_tab[i];
t = p;
fl_tab[0][i] = t;
fl_tab[1][i] = rotl (t, 8);
fl_tab[2][i] = rotl (t, 16);
fl_tab[3][i] = rotl (t, 24);
t = ((u32) ff_mult (2, p)) |
((u32) p << 8) |
((u32) p << 16) | ((u32) ff_mult (3, p) << 24);
ft_tab[0][i] = t;
ft_tab[1][i] = rotl (t, 8);
ft_tab[2][i] = rotl (t, 16);
ft_tab[3][i] = rotl (t, 24);
p = isb_tab[i];
t = p;
il_tab[0][i] = t;
il_tab[1][i] = rotl (t, 8);
il_tab[2][i] = rotl (t, 16);
il_tab[3][i] = rotl (t, 24);
t = ((u32) ff_mult (14, p)) |
((u32) ff_mult (9, p) << 8) |
((u32) ff_mult (13, p) << 16) |
((u32) ff_mult (11, p) << 24);
it_tab[0][i] = t;
it_tab[1][i] = rotl (t, 8);
it_tab[2][i] = rotl (t, 16);
it_tab[3][i] = rotl (t, 24);
}
}
#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
#define imix_col(y,x) \
u = star_x(x); \
v = star_x(u); \
w = star_x(v); \
t = w ^ (x); \
(y) = u ^ v ^ w; \
(y) ^= rotr(u ^ t, 8) ^ \
rotr(v ^ t, 16) ^ \
rotr(t,24)
/* initialise the key schedule from the user supplied key */
#define loop4(i) \
{ t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \
t ^= E_KEY[4 * i]; E_KEY[4 * i + 4] = t; \
t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t; \
t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t; \
t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t; \
}
#define loop6(i) \
{ t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \
t ^= E_KEY[6 * i]; E_KEY[6 * i + 6] = t; \
t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t; \
t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t; \
t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t; \
t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t; \
t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t; \
}
#define loop8(i) \
{ t = rotr(t, 8); ; t = ls_box(t) ^ rco_tab[i]; \
t ^= E_KEY[8 * i]; E_KEY[8 * i + 8] = t; \
t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t; \
t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t; \
t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t; \
t = E_KEY[8 * i + 4] ^ ls_box(t); \
E_KEY[8 * i + 12] = t; \
t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t; \
t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t; \
t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
}
static int
aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
{
struct aes_ctx *ctx = ctx_arg;
u32 i, t, u, v, w;
if (key_len != 16 && key_len != 24 && key_len != 32) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
ctx->key_length = key_len;
E_KEY[0] = u32_in (in_key);
E_KEY[1] = u32_in (in_key + 4);
E_KEY[2] = u32_in (in_key + 8);
E_KEY[3] = u32_in (in_key + 12);
switch (key_len) {
case 16:
t = E_KEY[3];
for (i = 0; i < 10; ++i)
loop4 (i);
break;
case 24:
E_KEY[4] = u32_in (in_key + 16);
t = E_KEY[5] = u32_in (in_key + 20);
for (i = 0; i < 8; ++i)
loop6 (i);
break;
case 32:
E_KEY[4] = u32_in (in_key + 16);
E_KEY[5] = u32_in (in_key + 20);
E_KEY[6] = u32_in (in_key + 24);
t = E_KEY[7] = u32_in (in_key + 28);
for (i = 0; i < 7; ++i)
loop8 (i);
break;
}
D_KEY[0] = E_KEY[0];
D_KEY[1] = E_KEY[1];
D_KEY[2] = E_KEY[2];
D_KEY[3] = E_KEY[3];
for (i = 4; i < key_len + 24; ++i) {
imix_col (D_KEY[i], E_KEY[i]);
}
return 0;
}
/* encrypt a block of text */
#define f_nround(bo, bi, k) \
f_rn(bo, bi, 0, k); \
f_rn(bo, bi, 1, k); \
f_rn(bo, bi, 2, k); \
f_rn(bo, bi, 3, k); \
k += 4
#define f_lround(bo, bi, k) \
f_rl(bo, bi, 0, k); \
f_rl(bo, bi, 1, k); \
f_rl(bo, bi, 2, k); \
f_rl(bo, bi, 3, k)
static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in)
{
const struct aes_ctx *ctx = ctx_arg;
u32 b0[4], b1[4];
const u32 *kp = E_KEY + 4;
b0[0] = u32_in (in) ^ E_KEY[0];
b0[1] = u32_in (in + 4) ^ E_KEY[1];
b0[2] = u32_in (in + 8) ^ E_KEY[2];
b0[3] = u32_in (in + 12) ^ E_KEY[3];
if (ctx->key_length > 24) {
f_nround (b1, b0, kp);
f_nround (b0, b1, kp);
}
if (ctx->key_length > 16) {
f_nround (b1, b0, kp);
f_nround (b0, b1, kp);
}
f_nround (b1, b0, kp);
f_nround (b0, b1, kp);
f_nround (b1, b0, kp);
f_nround (b0, b1, kp);
f_nround (b1, b0, kp);
f_nround (b0, b1, kp);
f_nround (b1, b0, kp);
f_nround (b0, b1, kp);
f_nround (b1, b0, kp);
f_lround (b0, b1, kp);
u32_out (out, b0[0]);
u32_out (out + 4, b0[1]);
u32_out (out + 8, b0[2]);
u32_out (out + 12, b0[3]);
}
/* decrypt a block of text */
#define i_nround(bo, bi, k) \
i_rn(bo, bi, 0, k); \
i_rn(bo, bi, 1, k); \
i_rn(bo, bi, 2, k); \
i_rn(bo, bi, 3, k); \
k -= 4
#define i_lround(bo, bi, k) \
i_rl(bo, bi, 0, k); \
i_rl(bo, bi, 1, k); \
i_rl(bo, bi, 2, k); \
i_rl(bo, bi, 3, k)
static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in)
{
const struct aes_ctx *ctx = ctx_arg;
u32 b0[4], b1[4];
const int key_len = ctx->key_length;
const u32 *kp = D_KEY + key_len + 20;
b0[0] = u32_in (in) ^ E_KEY[key_len + 24];
b0[1] = u32_in (in + 4) ^ E_KEY[key_len + 25];
b0[2] = u32_in (in + 8) ^ E_KEY[key_len + 26];
b0[3] = u32_in (in + 12) ^ E_KEY[key_len + 27];
if (key_len > 24) {
i_nround (b1, b0, kp);
i_nround (b0, b1, kp);
}
if (key_len > 16) {
i_nround (b1, b0, kp);
i_nround (b0, b1, kp);
}
i_nround (b1, b0, kp);
i_nround (b0, b1, kp);
i_nround (b1, b0, kp);
i_nround (b0, b1, kp);
i_nround (b1, b0, kp);
i_nround (b0, b1, kp);
i_nround (b1, b0, kp);
i_nround (b0, b1, kp);
i_nround (b1, b0, kp);
i_lround (b0, b1, kp);
u32_out (out, b0[0]);
u32_out (out + 4, b0[1]);
u32_out (out + 8, b0[2]);
u32_out (out + 12, b0[3]);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
}
}
};
static int __init aes_init(void)
{
gen_tabs();
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -1,246 +0,0 @@
/*
* Scatterlist Cryptographic API.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 David S. Miller (davem@redhat.com)
*
* Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
* and Nettle, by Niels Mler.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include "kmap_types.h"
#include <linux/init.h>
#include <linux/module.h>
//#include <linux/crypto.h>
#include "rtl_crypto.h"
#include <linux/errno.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include "internal.h"
LIST_HEAD(crypto_alg_list);
DECLARE_RWSEM(crypto_alg_sem);
static inline int crypto_alg_get(struct crypto_alg *alg)
{
return try_inc_mod_count(alg->cra_module);
}
static inline void crypto_alg_put(struct crypto_alg *alg)
{
if (alg->cra_module)
__MOD_DEC_USE_COUNT(alg->cra_module);
}
struct crypto_alg *crypto_alg_lookup(const char *name)
{
struct crypto_alg *q, *alg = NULL;
if (!name)
return NULL;
down_read(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (!(strcmp(q->cra_name, name))) {
if (crypto_alg_get(q))
alg = q;
break;
}
}
up_read(&crypto_alg_sem);
return alg;
}
static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
{
tfm->crt_flags = 0;
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
return crypto_init_cipher_flags(tfm, flags);
case CRYPTO_ALG_TYPE_DIGEST:
return crypto_init_digest_flags(tfm, flags);
case CRYPTO_ALG_TYPE_COMPRESS:
return crypto_init_compress_flags(tfm, flags);
default:
break;
}
BUG();
return -EINVAL;
}
static int crypto_init_ops(struct crypto_tfm *tfm)
{
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
return crypto_init_cipher_ops(tfm);
case CRYPTO_ALG_TYPE_DIGEST:
return crypto_init_digest_ops(tfm);
case CRYPTO_ALG_TYPE_COMPRESS:
return crypto_init_compress_ops(tfm);
default:
break;
}
BUG();
return -EINVAL;
}
static void crypto_exit_ops(struct crypto_tfm *tfm)
{
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
crypto_exit_cipher_ops(tfm);
break;
case CRYPTO_ALG_TYPE_DIGEST:
crypto_exit_digest_ops(tfm);
break;
case CRYPTO_ALG_TYPE_COMPRESS:
crypto_exit_compress_ops(tfm);
break;
default:
BUG();
}
}
struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
{
struct crypto_tfm *tfm = NULL;
struct crypto_alg *alg;
alg = crypto_alg_mod_lookup(name);
if (alg == NULL)
goto out;
tfm = kmalloc(sizeof(*tfm) + alg->cra_ctxsize, GFP_KERNEL);
if (tfm == NULL)
goto out_put;
memset(tfm, 0, sizeof(*tfm) + alg->cra_ctxsize);
tfm->__crt_alg = alg;
if (crypto_init_flags(tfm, flags))
goto out_free_tfm;
if (crypto_init_ops(tfm)) {
crypto_exit_ops(tfm);
goto out_free_tfm;
}
goto out;
out_free_tfm:
kfree(tfm);
tfm = NULL;
out_put:
crypto_alg_put(alg);
out:
return tfm;
}
void crypto_free_tfm(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
int size = sizeof(*tfm) + alg->cra_ctxsize;
crypto_exit_ops(tfm);
crypto_alg_put(alg);
memset(tfm, 0, size);
kfree(tfm);
}
int crypto_register_alg(struct crypto_alg *alg)
{
int ret = 0;
struct crypto_alg *q;
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (!(strcmp(q->cra_name, alg->cra_name))) {
ret = -EEXIST;
goto out;
}
}
list_add_tail(&alg->cra_list, &crypto_alg_list);
out:
up_write(&crypto_alg_sem);
return ret;
}
int crypto_unregister_alg(struct crypto_alg *alg)
{
int ret = -ENOENT;
struct crypto_alg *q;
BUG_ON(!alg->cra_module);
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (alg == q) {
list_del(&alg->cra_list);
ret = 0;
goto out;
}
}
out:
up_write(&crypto_alg_sem);
return ret;
}
int crypto_alg_available(const char *name, u32 flags)
{
int ret = 0;
struct crypto_alg *alg = crypto_alg_mod_lookup(name);
if (alg) {
crypto_alg_put(alg);
ret = 1;
}
return ret;
}
static int __init init_crypto(void)
{
printk(KERN_INFO "Initializing Cryptographic API\n");
crypto_init_proc();
return 0;
}
__initcall(init_crypto);
/*
EXPORT_SYMBOL_GPL(crypto_register_alg);
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
EXPORT_SYMBOL_GPL(crypto_free_tfm);
EXPORT_SYMBOL_GPL(crypto_alg_available);
*/
EXPORT_SYMBOL_NOVERS(crypto_register_alg);
EXPORT_SYMBOL_NOVERS(crypto_unregister_alg);
EXPORT_SYMBOL_NOVERS(crypto_alloc_tfm);
EXPORT_SYMBOL_NOVERS(crypto_free_tfm);
EXPORT_SYMBOL_NOVERS(crypto_alg_available);

View File

@ -1,103 +0,0 @@
/*
* Cryptographic API
*
* ARC4 Cipher Algorithm
*
* Jon Oberheide <jon@oberheide.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include "rtl_crypto.h"
#define ARC4_MIN_KEY_SIZE 1
#define ARC4_MAX_KEY_SIZE 256
#define ARC4_BLOCK_SIZE 1
struct arc4_ctx {
u8 S[256];
u8 x, y;
};
static int arc4_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
{
struct arc4_ctx *ctx = ctx_arg;
int i, j = 0, k = 0;
ctx->x = 1;
ctx->y = 0;
for(i = 0; i < 256; i++)
ctx->S[i] = i;
for(i = 0; i < 256; i++)
{
u8 a = ctx->S[i];
j = (j + in_key[k] + a) & 0xff;
ctx->S[i] = ctx->S[j];
ctx->S[j] = a;
if((unsigned int)++k >= key_len)
k = 0;
}
return 0;
}
static void arc4_crypt(void *ctx_arg, u8 *out, const u8 *in)
{
struct arc4_ctx *ctx = ctx_arg;
u8 *const S = ctx->S;
u8 x = ctx->x;
u8 y = ctx->y;
u8 a, b;
a = S[x];
y = (y + a) & 0xff;
b = S[y];
S[x] = b;
S[y] = a;
x = (x + 1) & 0xff;
*out++ = *in ^ S[(a + b) & 0xff];
ctx->x = x;
ctx->y = y;
}
static struct crypto_alg arc4_alg = {
.cra_name = "arc4",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ARC4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct arc4_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(arc4_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = ARC4_MIN_KEY_SIZE,
.cia_max_keysize = ARC4_MAX_KEY_SIZE,
.cia_setkey = arc4_set_key,
.cia_encrypt = arc4_crypt,
.cia_decrypt = arc4_crypt } }
};
static int __init arc4_init(void)
{
return crypto_register_alg(&arc4_alg);
}
static void __exit arc4_exit(void)
{
crypto_unregister_alg(&arc4_alg);
}
module_init(arc4_init);
module_exit(arc4_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>");

View File

@ -1,40 +0,0 @@
/*
* Cryptographic API.
*
* Algorithm autoloader.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include "kmap_types.h"
#include <linux/kernel.h>
//#include <linux/crypto.h>
#include "rtl_crypto.h"
#include <linux/string.h>
#include <linux/kmod.h>
#include "internal.h"
/*
* A far more intelligent version of this is planned. For now, just
* try an exact match on the name of the algorithm.
*/
void crypto_alg_autoload(const char *name)
{
request_module(name);
}
struct crypto_alg *crypto_alg_mod_lookup(const char *name)
{
struct crypto_alg *alg = crypto_alg_lookup(name);
if (alg == NULL) {
crypto_alg_autoload(name);
alg = crypto_alg_lookup(name);
}
return alg;
}

View File

@ -1,299 +0,0 @@
/*
* Cryptographic API.
*
* Cipher operations.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/kernel.h>
//#include <linux/crypto.h>
#include "rtl_crypto.h"
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/scatterlist.h>
#include "internal.h"
#include "scatterwalk.h"
typedef void (cryptfn_t)(void *, u8 *, const u8 *);
typedef void (procfn_t)(struct crypto_tfm *, u8 *,
u8*, cryptfn_t, int enc, void *, int);
static inline void xor_64(u8 *a, const u8 *b)
{
((u32 *)a)[0] ^= ((u32 *)b)[0];
((u32 *)a)[1] ^= ((u32 *)b)[1];
}
static inline void xor_128(u8 *a, const u8 *b)
{
((u32 *)a)[0] ^= ((u32 *)b)[0];
((u32 *)a)[1] ^= ((u32 *)b)[1];
((u32 *)a)[2] ^= ((u32 *)b)[2];
((u32 *)a)[3] ^= ((u32 *)b)[3];
}
/*
* Generic encrypt/decrypt wrapper for ciphers, handles operations across
* multiple page boundaries by using temporary blocks. In user context,
* the kernel is given a chance to schedule us once per block.
*/
static int crypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes, cryptfn_t crfn,
procfn_t prfn, int enc, void *info)
{
struct scatter_walk walk_in, walk_out;
const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
u8 tmp_src[bsize];
u8 tmp_dst[bsize];
if (!nbytes)
return 0;
if (nbytes % bsize) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
return -EINVAL;
}
scatterwalk_start(&walk_in, src);
scatterwalk_start(&walk_out, dst);
for(;;) {
u8 *src_p, *dst_p;
int in_place;
scatterwalk_map(&walk_in, 0);
scatterwalk_map(&walk_out, 1);
src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src);
dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst);
in_place = scatterwalk_samebuf(&walk_in, &walk_out,
src_p, dst_p);
nbytes -= bsize;
scatterwalk_copychunks(src_p, &walk_in, bsize, 0);
prfn(tfm, dst_p, src_p, crfn, enc, info, in_place);
scatterwalk_done(&walk_in, 0, nbytes);
scatterwalk_copychunks(dst_p, &walk_out, bsize, 1);
scatterwalk_done(&walk_out, 1, nbytes);
if (!nbytes)
return 0;
crypto_yield(tfm);
}
}
static void cbc_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
cryptfn_t fn, int enc, void *info, int in_place)
{
u8 *iv = info;
/* Null encryption */
if (!iv)
return;
if (enc) {
tfm->crt_u.cipher.cit_xor_block(iv, src);
fn(crypto_tfm_ctx(tfm), dst, iv);
memcpy(iv, dst, crypto_tfm_alg_blocksize(tfm));
} else {
u8 stack[in_place ? crypto_tfm_alg_blocksize(tfm) : 0];
u8 *buf = in_place ? stack : dst;
fn(crypto_tfm_ctx(tfm), buf, src);
tfm->crt_u.cipher.cit_xor_block(buf, iv);
memcpy(iv, src, crypto_tfm_alg_blocksize(tfm));
if (buf != dst)
memcpy(dst, buf, crypto_tfm_alg_blocksize(tfm));
}
}
static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
cryptfn_t fn, int enc, void *info, int in_place)
{
fn(crypto_tfm_ctx(tfm), dst, src);
}
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
} else
return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen,
&tfm->crt_flags);
}
static int ecb_encrypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_encrypt,
ecb_process, 1, NULL);
}
static int ecb_decrypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_decrypt,
ecb_process, 1, NULL);
}
static int cbc_encrypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_encrypt,
cbc_process, 1, tfm->crt_cipher.cit_iv);
}
static int cbc_encrypt_iv(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes, u8 *iv)
{
return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_encrypt,
cbc_process, 1, iv);
}
static int cbc_decrypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_decrypt,
cbc_process, 0, tfm->crt_cipher.cit_iv);
}
static int cbc_decrypt_iv(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes, u8 *iv)
{
return crypt(tfm, dst, src, nbytes,
tfm->__crt_alg->cra_cipher.cia_decrypt,
cbc_process, 0, iv);
}
static int nocrypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return -ENOSYS;
}
static int nocrypt_iv(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes, u8 *iv)
{
return -ENOSYS;
}
int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
{
u32 mode = flags & CRYPTO_TFM_MODE_MASK;
tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
if (flags & CRYPTO_TFM_REQ_WEAK_KEY)
tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY;
return 0;
}
int crypto_init_cipher_ops(struct crypto_tfm *tfm)
{
int ret = 0;
struct cipher_tfm *ops = &tfm->crt_cipher;
ops->cit_setkey = setkey;
switch (tfm->crt_cipher.cit_mode) {
case CRYPTO_TFM_MODE_ECB:
ops->cit_encrypt = ecb_encrypt;
ops->cit_decrypt = ecb_decrypt;
break;
case CRYPTO_TFM_MODE_CBC:
ops->cit_encrypt = cbc_encrypt;
ops->cit_decrypt = cbc_decrypt;
ops->cit_encrypt_iv = cbc_encrypt_iv;
ops->cit_decrypt_iv = cbc_decrypt_iv;
break;
case CRYPTO_TFM_MODE_CFB:
ops->cit_encrypt = nocrypt;
ops->cit_decrypt = nocrypt;
ops->cit_encrypt_iv = nocrypt_iv;
ops->cit_decrypt_iv = nocrypt_iv;
break;
case CRYPTO_TFM_MODE_CTR:
ops->cit_encrypt = nocrypt;
ops->cit_decrypt = nocrypt;
ops->cit_encrypt_iv = nocrypt_iv;
ops->cit_decrypt_iv = nocrypt_iv;
break;
default:
BUG();
}
if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
switch (crypto_tfm_alg_blocksize(tfm)) {
case 8:
ops->cit_xor_block = xor_64;
break;
case 16:
ops->cit_xor_block = xor_128;
break;
default:
printk(KERN_WARNING "%s: block size %u not supported\n",
crypto_tfm_alg_name(tfm),
crypto_tfm_alg_blocksize(tfm));
ret = -EINVAL;
goto out;
}
ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL);
if (ops->cit_iv == NULL)
ret = -ENOMEM;
}
out:
return ret;
}
void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
{
if (tfm->crt_cipher.cit_iv)
kfree(tfm->crt_cipher.cit_iv);
}

View File

@ -1,64 +0,0 @@
/*
* Cryptographic API.
*
* Compression operations.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/types.h>
//#include <linux/crypto.h>
#include "rtl_crypto.h"
#include <linux/errno.h>
#include <asm/scatterlist.h>
#include <linux/string.h>
#include "internal.h"
static int crypto_compress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
return tfm->__crt_alg->cra_compress.coa_compress(crypto_tfm_ctx(tfm),
src, slen, dst,
dlen);
}
static int crypto_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
return tfm->__crt_alg->cra_compress.coa_decompress(crypto_tfm_ctx(tfm),
src, slen, dst,
dlen);
}
int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags)
{
return flags ? -EINVAL : 0;
}
int crypto_init_compress_ops(struct crypto_tfm *tfm)
{
int ret = 0;
struct compress_tfm *ops = &tfm->crt_compress;
ret = tfm->__crt_alg->cra_compress.coa_init(crypto_tfm_ctx(tfm));
if (ret)
goto out;
ops->cot_compress = crypto_compress;
ops->cot_decompress = crypto_decompress;
out:
return ret;
}
void crypto_exit_compress_ops(struct crypto_tfm *tfm)
{
tfm->__crt_alg->cra_compress.coa_exit(crypto_tfm_ctx(tfm));
}

View File

@ -1,90 +0,0 @@
/*
* Header file to maintain compatibility among different kernel versions.
*
* Copyright (c) 2004-2006 <lawrence_wang@realsil.com.cn>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. See README and COPYING for
* more details.
*/
#include <linux/crypto.h>
static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
}
static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
}
#if 0
/*
* crypto_free_tfm - Free crypto transform
* @tfm: Transform to free
*
* crypto_free_tfm() frees up the transform and any associated resources,
* then drops the refcount on the associated algorithm.
*/
void crypto_free_tfm(struct crypto_tfm *tfm)
{
struct crypto_alg *alg;
int size;
if (unlikely(!tfm))
return;
alg = tfm->__crt_alg;
size = sizeof(*tfm) + alg->cra_ctxsize;
if (alg->cra_exit)
alg->cra_exit(tfm);
crypto_exit_ops(tfm);
crypto_mod_put(alg);
memset(tfm, 0, size);
kfree(tfm);
}
#endif
#if 1
struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
{
struct crypto_tfm *tfm = NULL;
int err;
printk("call crypto_alloc_tfm!!!\n");
do {
struct crypto_alg *alg;
alg = crypto_alg_mod_lookup(name, 0, CRYPTO_ALG_ASYNC);
err = PTR_ERR(alg);
if (IS_ERR(alg))
continue;
tfm = __crypto_alloc_tfm(alg, flags);
err = 0;
if (IS_ERR(tfm)) {
crypto_mod_put(alg);
err = PTR_ERR(tfm);
tfm = NULL;
}
} while (err == -EAGAIN && !signal_pending(current));
return tfm;
}
#endif
//EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
//EXPORT_SYMBOL_GPL(crypto_free_tfm);

View File

@ -1,108 +0,0 @@
/*
* Cryptographic API.
*
* Digest operations.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
//#include <linux/crypto.h>
#include "rtl_crypto.h"
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/highmem.h>
#include <asm/scatterlist.h>
#include "internal.h"
static void init(struct crypto_tfm *tfm)
{
tfm->__crt_alg->cra_digest.dia_init(crypto_tfm_ctx(tfm));
}
static void update(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg)
{
unsigned int i;
for (i = 0; i < nsg; i++) {
struct page *pg = sg[i].page;
unsigned int offset = sg[i].offset;
unsigned int l = sg[i].length;
do {
unsigned int bytes_from_page = min(l, ((unsigned int)
(PAGE_SIZE)) -
offset);
char *p = crypto_kmap(pg, 0) + offset;
tfm->__crt_alg->cra_digest.dia_update
(crypto_tfm_ctx(tfm), p,
bytes_from_page);
crypto_kunmap(p, 0);
crypto_yield(tfm);
offset = 0;
pg++;
l -= bytes_from_page;
} while (l > 0);
}
}
static void final(struct crypto_tfm *tfm, u8 *out)
{
tfm->__crt_alg->cra_digest.dia_final(crypto_tfm_ctx(tfm), out);
}
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
u32 flags;
if (tfm->__crt_alg->cra_digest.dia_setkey == NULL)
return -ENOSYS;
return tfm->__crt_alg->cra_digest.dia_setkey(crypto_tfm_ctx(tfm),
key, keylen, &flags);
}
static void digest(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg, u8 *out)
{
unsigned int i;
tfm->crt_digest.dit_init(tfm);
for (i = 0; i < nsg; i++) {
char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset;
tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm),
p, sg[i].length);
crypto_kunmap(p, 0);
crypto_yield(tfm);
}
crypto_digest_final(tfm, out);
}
int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
{
return flags ? -EINVAL : 0;
}
int crypto_init_digest_ops(struct crypto_tfm *tfm)
{
struct digest_tfm *ops = &tfm->crt_digest;
ops->dit_init = init;
ops->dit_update = update;
ops->dit_final = final;
ops->dit_digest = digest;
ops->dit_setkey = setkey;
return crypto_alloc_hmac_block(tfm);
}
void crypto_exit_digest_ops(struct crypto_tfm *tfm)
{
crypto_free_hmac_block(tfm);
}

View File

@ -22,9 +22,6 @@
#include <asm/string.h>
#include "ieee80211.h"
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20))
//#include "crypto_compat.h"
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))

View File

@ -20,10 +20,6 @@
#include "ieee80211.h"
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20))
//#include "crypto_compat.h"
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
#include "rtl_crypto.h"

View File

@ -1,115 +0,0 @@
/*
* Cryptographic API.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_INTERNAL_H
#define _CRYPTO_INTERNAL_H
//#include <linux/crypto.h>
#include "rtl_crypto.h"
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <asm/hardirq.h>
#include <asm/softirq.h>
#include <asm/kmap_types.h>
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20))
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
prefetch(pos->member.next); \
&pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member), \
prefetch(pos->member.next))
static inline void cond_resched(void)
{
if (need_resched()) {
set_current_state(TASK_RUNNING);
schedule();
}
}
#endif
extern enum km_type crypto_km_types[];
static inline enum km_type crypto_kmap_type(int out)
{
return crypto_km_types[(in_softirq() ? 2 : 0) + out];
}
static inline void *crypto_kmap(struct page *page, int out)
{
return kmap_atomic(page, crypto_kmap_type(out));
}
static inline void crypto_kunmap(void *vaddr, int out)
{
kunmap_atomic(vaddr, crypto_kmap_type(out));
}
static inline void crypto_yield(struct crypto_tfm *tfm)
{
if (!in_softirq())
cond_resched();
}
static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
{
return (void *)&tfm[1];
}
struct crypto_alg *crypto_alg_lookup(const char *name);
#ifdef CONFIG_KMOD
void crypto_alg_autoload(const char *name);
struct crypto_alg *crypto_alg_mod_lookup(const char *name);
#else
static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name)
{
return crypto_alg_lookup(name);
}
#endif
#ifdef CONFIG_CRYPTO_HMAC
int crypto_alloc_hmac_block(struct crypto_tfm *tfm);
void crypto_free_hmac_block(struct crypto_tfm *tfm);
#else
static inline int crypto_alloc_hmac_block(struct crypto_tfm *tfm)
{
return 0;
}
static inline void crypto_free_hmac_block(struct crypto_tfm *tfm)
{ }
#endif
#ifdef CONFIG_PROC_FS
void __init crypto_init_proc(void);
#else
static inline void crypto_init_proc(void)
{ }
#endif
int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags);
int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags);
int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags);
int crypto_init_digest_ops(struct crypto_tfm *tfm);
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
int crypto_init_compress_ops(struct crypto_tfm *tfm);
void crypto_exit_digest_ops(struct crypto_tfm *tfm);
void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
void crypto_exit_compress_ops(struct crypto_tfm *tfm);
#endif /* _CRYPTO_INTERNAL_H */

View File

@ -1,20 +0,0 @@
#ifndef __KMAP_TYPES_H
#define __KMAP_TYPES_H
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
KM_BH_IRQ,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
#define _ASM_KMAP_TYPES_H
#endif

View File

@ -1,194 +0,0 @@
/*
* Cryptographic API
*
* Michael MIC (IEEE 802.11i/TKIP) keyed digest
*
* Copyright (c) 2004 Jouni Malinen <jkmaline@cc.hut.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
//#include <linux/crypto.h>
#include "rtl_crypto.h"
struct michael_mic_ctx {
u8 pending[4];
size_t pending_len;
u32 l, r;
};
static inline u32 rotl(u32 val, int bits)
{
return (val << bits) | (val >> (32 - bits));
}
static inline u32 rotr(u32 val, int bits)
{
return (val >> bits) | (val << (32 - bits));
}
static inline u32 xswap(u32 val)
{
return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
}
#define michael_block(l, r) \
do { \
r ^= rotl(l, 17); \
l += r; \
r ^= xswap(l); \
l += r; \
r ^= rotl(l, 3); \
l += r; \
r ^= rotr(l, 2); \
l += r; \
} while (0)
static inline u32 get_le32(const u8 *p)
{
return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
}
static inline void put_le32(u8 *p, u32 v)
{
p[0] = v;
p[1] = v >> 8;
p[2] = v >> 16;
p[3] = v >> 24;
}
static void michael_init(void *ctx)
{
struct michael_mic_ctx *mctx = ctx;
mctx->pending_len = 0;
}
static void michael_update(void *ctx, const u8 *data, unsigned int len)
{
struct michael_mic_ctx *mctx = ctx;
if (mctx->pending_len) {
int flen = 4 - mctx->pending_len;
if (flen > len)
flen = len;
memcpy(&mctx->pending[mctx->pending_len], data, flen);
mctx->pending_len += flen;
data += flen;
len -= flen;
if (mctx->pending_len < 4)
return;
mctx->l ^= get_le32(mctx->pending);
michael_block(mctx->l, mctx->r);
mctx->pending_len = 0;
}
while (len >= 4) {
mctx->l ^= get_le32(data);
michael_block(mctx->l, mctx->r);
data += 4;
len -= 4;
}
if (len > 0) {
mctx->pending_len = len;
memcpy(mctx->pending, data, len);
}
}
static void michael_final(void *ctx, u8 *out)
{
struct michael_mic_ctx *mctx = ctx;
u8 *data = mctx->pending;
/* Last block and padding (0x5a, 4..7 x 0) */
switch (mctx->pending_len) {
case 0:
mctx->l ^= 0x5a;
break;
case 1:
mctx->l ^= data[0] | 0x5a00;
break;
case 2:
mctx->l ^= data[0] | (data[1] << 8) | 0x5a0000;
break;
case 3:
mctx->l ^= data[0] | (data[1] << 8) | (data[2] << 16) |
0x5a000000;
break;
}
michael_block(mctx->l, mctx->r);
/* l ^= 0; */
michael_block(mctx->l, mctx->r);
put_le32(out, mctx->l);
put_le32(out + 4, mctx->r);
}
static int michael_setkey(void *ctx, const u8 *key, unsigned int keylen,
u32 *flags)
{
struct michael_mic_ctx *mctx = ctx;
if (keylen != 8) {
if (flags)
*flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
mctx->l = get_le32(key);
mctx->r = get_le32(key + 4);
return 0;
}
static struct crypto_alg michael_mic_alg = {
.cra_name = "michael_mic",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = 8,
.cra_ctxsize = sizeof(struct michael_mic_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(michael_mic_alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = 8,
.dia_init = michael_init,
.dia_update = michael_update,
.dia_final = michael_final,
.dia_setkey = michael_setkey } }
};
static int __init michael_mic_init(void)
{
return crypto_register_alg(&michael_mic_alg);
}
static void __exit michael_mic_exit(void)
{
crypto_unregister_alg(&michael_mic_alg);
}
module_init(michael_mic_init);
module_exit(michael_mic_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Michael MIC");
MODULE_AUTHOR("Jouni Malinen <jkmaline@cc.hut.fi>");

View File

@ -1,116 +0,0 @@
/*
* Scatterlist Cryptographic API.
*
* Procfs information.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
//#include <linux/crypto.h>
#include "rtl_crypto.h"
#include <linux/rwsem.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "internal.h"
extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
static void *c_start(struct seq_file *m, loff_t *pos)
{
struct list_head *v;
loff_t n = *pos;
down_read(&crypto_alg_sem);
list_for_each(v, &crypto_alg_list)
if (!n--)
return list_entry(v, struct crypto_alg, cra_list);
return NULL;
}
static void *c_next(struct seq_file *m, void *p, loff_t *pos)
{
struct list_head *v = p;
(*pos)++;
v = v->next;
return (v == &crypto_alg_list) ?
NULL : list_entry(v, struct crypto_alg, cra_list);
}
static void c_stop(struct seq_file *m, void *p)
{
up_read(&crypto_alg_sem);
}
static int c_show(struct seq_file *m, void *p)
{
struct crypto_alg *alg = (struct crypto_alg *)p;
seq_printf(m, "name : %s\n", alg->cra_name);
seq_printf(m, "module : %s\n",
(alg->cra_module ?
alg->cra_module->name :
"kernel"));
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_CIPHER:
seq_printf(m, "type : cipher\n");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "min keysize : %u\n",
alg->cra_cipher.cia_min_keysize);
seq_printf(m, "max keysize : %u\n",
alg->cra_cipher.cia_max_keysize);
break;
case CRYPTO_ALG_TYPE_DIGEST:
seq_printf(m, "type : digest\n");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n",
alg->cra_digest.dia_digestsize);
break;
case CRYPTO_ALG_TYPE_COMPRESS:
seq_printf(m, "type : compression\n");
break;
default:
seq_printf(m, "type : unknown\n");
break;
}
seq_putc(m, '\n');
return 0;
}
static struct seq_operations crypto_seq_ops = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = c_show
};
static int crypto_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &crypto_seq_ops);
}
static struct file_operations proc_crypto_ops = {
.open = crypto_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release
};
void __init crypto_init_proc(void)
{
struct proc_dir_entry *proc;
proc = create_proc_entry("crypto", 0, NULL);
if (proc)
proc->proc_fops = &proc_crypto_ops;
}

View File

@ -1,7 +1,6 @@
#ifndef __INC_QOS_TYPE_H
#define __INC_QOS_TYPE_H
//#include "EndianFree.h"
#define BIT0 0x00000001
#define BIT1 0x00000002
#define BIT2 0x00000004

View File

@ -1,126 +0,0 @@
/*
* Cryptographic API.
*
* Cipher operations.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* 2002 Adam J. Richter <adam@yggdrasil.com>
* 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include "kmap_types.h"
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <asm/scatterlist.h>
#include "internal.h"
#include "scatterwalk.h"
enum km_type crypto_km_types[] = {
KM_USER0,
KM_USER1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
};
void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
{
if (nbytes <= walk->len_this_page &&
(((unsigned long)walk->data) & (PAGE_CACHE_SIZE - 1)) + nbytes <=
PAGE_CACHE_SIZE)
return walk->data;
else
return scratch;
}
static void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
{
if (out)
memcpy(sgdata, buf, nbytes);
else
memcpy(buf, sgdata, nbytes);
}
void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
{
unsigned int rest_of_page;
walk->sg = sg;
walk->page = sg->page;
walk->len_this_segment = sg->length;
rest_of_page = PAGE_CACHE_SIZE - (sg->offset & (PAGE_CACHE_SIZE - 1));
walk->len_this_page = min(sg->length, rest_of_page);
walk->offset = sg->offset;
}
void scatterwalk_map(struct scatter_walk *walk, int out)
{
walk->data = crypto_kmap(walk->page, out) + walk->offset;
}
static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
unsigned int more)
{
/* walk->data may be pointing the first byte of the next page;
however, we know we transfered at least one byte. So,
walk->data - 1 will be a virtual address in the mapped page. */
if (out)
flush_dcache_page(walk->page);
if (more) {
walk->len_this_segment -= walk->len_this_page;
if (walk->len_this_segment) {
walk->page++;
walk->len_this_page = min(walk->len_this_segment,
(unsigned)PAGE_CACHE_SIZE);
walk->offset = 0;
}
else
scatterwalk_start(walk, sg_next(walk->sg));
}
}
void scatterwalk_done(struct scatter_walk *walk, int out, int more)
{
crypto_kunmap(walk->data, out);
if (walk->len_this_page == 0 || !more)
scatterwalk_pagedone(walk, out, more);
}
/*
* Do not call this unless the total length of all of the fragments
* has been verified as multiple of the block size.
*/
int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out)
{
if (buf != walk->data) {
while (nbytes > walk->len_this_page) {
memcpy_dir(buf, walk->data, walk->len_this_page, out);
buf += walk->len_this_page;
nbytes -= walk->len_this_page;
crypto_kunmap(walk->data, out);
scatterwalk_pagedone(walk, out, 1);
scatterwalk_map(walk, out);
}
memcpy_dir(buf, walk->data, nbytes, out);
}
walk->offset += nbytes;
walk->len_this_page -= nbytes;
walk->len_this_segment -= nbytes;
return 0;
}

View File

@ -1,51 +0,0 @@
/*
* Cryptographic API.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com>
* Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_SCATTERWALK_H
#define _CRYPTO_SCATTERWALK_H
#include <linux/mm.h>
#include <asm/scatterlist.h>
struct scatter_walk {
struct scatterlist *sg;
struct page *page;
void *data;
unsigned int len_this_page;
unsigned int len_this_segment;
unsigned int offset;
};
/* Define sg_next is an inline routine now in case we want to change
scatterlist to a linked list later. */
static inline struct scatterlist *sg_next(struct scatterlist *sg)
{
return sg + 1;
}
static inline int scatterwalk_samebuf(struct scatter_walk *walk_in,
struct scatter_walk *walk_out,
void *src_p, void *dst_p)
{
return walk_in->page == walk_out->page &&
walk_in->offset == walk_out->offset &&
walk_in->data == src_p && walk_out->data == dst_p;
}
void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch);
void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out);
void scatterwalk_map(struct scatter_walk *walk, int out);
void scatterwalk_done(struct scatter_walk *walk, int out, int more);
#endif /* _CRYPTO_SCATTERWALK_H */