*
* Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
*
- * Key expansion routine taken from crypto/aes.c
+ * Key expansion routine taken from crypto/aes_generic.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* ---------------------------------------------------------------------------
*/
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/crypto.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <asm/byteorder.h>
#include "padlock.h"
-#define AES_MIN_KEY_SIZE 16 /* in uint8_t units */
-#define AES_MAX_KEY_SIZE 32 /* ditto */
-#define AES_BLOCK_SIZE 16 /* ditto */
#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */
#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
return 0;
}
-static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
+static inline struct aes_ctx *aes_ctx_common(void *ctx)
{
- unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
+ unsigned long addr = (unsigned long)ctx;
unsigned long align = PADLOCK_ALIGNMENT;
if (align <= crypto_tfm_ctx_alignment())
return (struct aes_ctx *)ALIGN(addr, align);
}
+static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
+{
+ return aes_ctx_common(crypto_tfm_ctx(tfm));
+}
+
+static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
+{
+ return aes_ctx_common(crypto_blkcipher_ctx(tfm));
+}
+
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len, u32 *flags)
+ unsigned int key_len)
{
struct aes_ctx *ctx = aes_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
+ u32 *flags = &tfm->crt_flags;
uint32_t i, t, u, v, w;
uint32_t P[AES_EXTENDED_KEY_SIZE];
uint32_t rounds;
- if (key_len != 16 && key_len != 24 && key_len != 32) {
+ if (key_len % 8) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/* ====== Encryption/decryption routines ====== */
/* These are the real call to PadLock. */
+static inline void padlock_reset_key(void)
+{
+ asm volatile ("pushfl; popfl");
+}
+
+static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
+ void *control_word)
+{
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
+ : "+S"(input), "+D"(output)
+ : "d"(control_word), "b"(key), "c"(1));
+}
+
+static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
+{
+ u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1];
+ u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+
+ memcpy(tmp, in, AES_BLOCK_SIZE);
+ padlock_xcrypt(tmp, out, key, cword);
+}
+
+static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
+ struct cword *cword)
+{
+ /* padlock_xcrypt requires at least two blocks of data. */
+ if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
+ (PAGE_SIZE - 1)))) {
+ aes_crypt_copy(in, out, key, cword);
+ return;
+ }
+
+ padlock_xcrypt(in, out, key, cword);
+}
+
static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
void *control_word, u32 count)
{
- asm volatile ("pushfl; popfl"); /* enforce key reload. */
- asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
+ if (count == 1) {
+ aes_crypt(input, output, key, control_word);
+ return;
+ }
+
+ asm volatile ("test $1, %%cl;"
+ "je 1f;"
+ "lea -1(%%ecx), %%eax;"
+ "mov $1, %%ecx;"
+ ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
+ "mov %%eax, %%ecx;"
+ "1:"
+ ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
- : "d"(control_word), "b"(key), "c"(count));
+ : "d"(control_word), "b"(key), "c"(count)
+ : "ax");
}
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
u8 *iv, void *control_word, u32 count)
{
- /* Enforce key reload. */
- asm volatile ("pushfl; popfl");
/* rep xcryptcbc */
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
: "+S" (input), "+D" (output), "+a" (iv)
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
- padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1);
+ padlock_reset_key();
+ aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
- padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1);
+ padlock_reset_key();
+ aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
}
-static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
- const u8 *in, unsigned int nbytes)
+static struct crypto_alg aes_alg = {
+ .cra_name = "aes",
+ .cra_driver_name = "aes-padlock",
+ .cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aes_ctx),
+ .cra_alignmask = PADLOCK_ALIGNMENT - 1,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = aes_set_key,
+ .cia_encrypt = aes_encrypt,
+ .cia_decrypt = aes_decrypt,
+ }
+ }
+};
+
+static int ecb_aes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
{
- struct aes_ctx *ctx = aes_ctx(desc->tfm);
- padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt,
- nbytes / AES_BLOCK_SIZE);
- return nbytes & ~(AES_BLOCK_SIZE - 1);
+ struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ padlock_reset_key();
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
+ ctx->E, &ctx->cword.encrypt,
+ nbytes / AES_BLOCK_SIZE);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ return err;
}
-static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
- const u8 *in, unsigned int nbytes)
+static int ecb_aes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
{
- struct aes_ctx *ctx = aes_ctx(desc->tfm);
- padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt,
- nbytes / AES_BLOCK_SIZE);
- return nbytes & ~(AES_BLOCK_SIZE - 1);
+ struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ padlock_reset_key();
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
+ ctx->D, &ctx->cword.decrypt,
+ nbytes / AES_BLOCK_SIZE);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ return err;
}
-static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
- const u8 *in, unsigned int nbytes)
-{
- struct aes_ctx *ctx = aes_ctx(desc->tfm);
- u8 *iv;
+static struct crypto_alg ecb_aes_alg = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-padlock",
+ .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aes_ctx),
+ .cra_alignmask = PADLOCK_ALIGNMENT - 1,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_set_key,
+ .encrypt = ecb_aes_encrypt,
+ .decrypt = ecb_aes_decrypt,
+ }
+ }
+};
- iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info,
- &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE);
- memcpy(desc->info, iv, AES_BLOCK_SIZE);
+static int cbc_aes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ padlock_reset_key();
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
+ walk.dst.virt.addr, ctx->E,
+ walk.iv, &ctx->cword.encrypt,
+ nbytes / AES_BLOCK_SIZE);
+ memcpy(walk.iv, iv, AES_BLOCK_SIZE);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
- return nbytes & ~(AES_BLOCK_SIZE - 1);
+ return err;
}
-static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
- const u8 *in, unsigned int nbytes)
+static int cbc_aes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
{
- struct aes_ctx *ctx = aes_ctx(desc->tfm);
- padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt,
- nbytes / AES_BLOCK_SIZE);
- return nbytes & ~(AES_BLOCK_SIZE - 1);
+ struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ padlock_reset_key();
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
+ ctx->D, walk.iv, &ctx->cword.decrypt,
+ nbytes / AES_BLOCK_SIZE);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ return err;
}
-static struct crypto_alg aes_alg = {
- .cra_name = "aes",
- .cra_driver_name = "aes-padlock",
- .cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+static struct crypto_alg cbc_aes_alg = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-padlock",
+ .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
+ .cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
+ .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
.cra_u = {
- .cipher = {
- .cia_min_keysize = AES_MIN_KEY_SIZE,
- .cia_max_keysize = AES_MAX_KEY_SIZE,
- .cia_setkey = aes_set_key,
- .cia_encrypt = aes_encrypt,
- .cia_decrypt = aes_decrypt,
- .cia_encrypt_ecb = aes_encrypt_ecb,
- .cia_decrypt_ecb = aes_decrypt_ecb,
- .cia_encrypt_cbc = aes_encrypt_cbc,
- .cia_decrypt_cbc = aes_decrypt_cbc,
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key,
+ .encrypt = cbc_aes_encrypt,
+ .decrypt = cbc_aes_decrypt,
}
}
};
}
gen_tabs();
- if ((ret = crypto_register_alg(&aes_alg))) {
- printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
- return ret;
- }
+ if ((ret = crypto_register_alg(&aes_alg)))
+ goto aes_err;
+
+ if ((ret = crypto_register_alg(&ecb_aes_alg)))
+ goto ecb_aes_err;
+
+ if ((ret = crypto_register_alg(&cbc_aes_alg)))
+ goto cbc_aes_err;
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
+out:
return ret;
+
+cbc_aes_err:
+ crypto_unregister_alg(&ecb_aes_alg);
+ecb_aes_err:
+ crypto_unregister_alg(&aes_alg);
+aes_err:
+ printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
+ goto out;
}
static void __exit padlock_fini(void)
{
+ crypto_unregister_alg(&cbc_aes_alg);
+ crypto_unregister_alg(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");
-MODULE_ALIAS("aes-padlock");
+MODULE_ALIAS("aes");