]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
ENGR00141399-3 DCP: add dcp driver support
authorFrank Li <Frank.Li@freescale.com>
Thu, 24 Mar 2011 06:12:39 +0000 (14:12 +0800)
committerOliver Wendt <ow@karo-electronics.de>
Mon, 30 Sep 2013 12:08:50 +0000 (14:08 +0200)
upgrade to 2.6.38

Signed-off-by: Frank Li <Frank.Li@freescale.com>
drivers/crypto/Kconfig
drivers/crypto/Makefile
drivers/crypto/dcp.c [new file with mode: 0644]
drivers/crypto/dcp.h [new file with mode: 0644]
drivers/crypto/dcp_bootstream_ioctl.h [new file with mode: 0644]

index 98caccfdf217925e71fd2a9362e643ac79223045..626b132278f6f0d062a6ee29d64071571f51bf5d 100644 (file)
@@ -292,5 +292,15 @@ config CRYPTO_DEV_S5P
          This option allows you to have support for S5P crypto acceleration.
          Select this to offload Samsung S5PV210 or S5PC110 from AES
          algorithms execution.
+config CRYPTO_DEV_DCP
+       tristate "Support for the DCP engine"
+       depends on SOC_IMX28 || SOC_IMX23 || SOC_IMX50
+       select CRYPTO_ALGAPI
+       select CRYPTO_BLKCIPHER
+       help
+        Say 'Y' here to use the DCP AES and SHA
+        engine for the CryptoAPI algorithms.
+        To compile this driver as a module, choose M here: the module
+        will be called geode-aes.
 
 endif # CRYPTO_HW
index 53ea5015531997b3a48b0e536f562d9a144bdb0c..b1ef08980616474505ed6b011f85ce6641f61283 100644 (file)
@@ -13,3 +13,4 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
 obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
 obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
 obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
+obj-$(CONFIG_CRYPTO_DEV_DCP) += dcp.o
diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c
new file mode 100644 (file)
index 0000000..7ab5955
--- /dev/null
@@ -0,0 +1,1775 @@
+/*
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+/*
+ * Based on geode-aes.c
+ * Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/sysdev.h>
+#include <linux/bitops.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/fs.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <asm/cacheflush.h>
+#include <mach/hardware.h>
+#include "dcp.h"
+#include "dcp_bootstream_ioctl.h"
+
+/* Following data only used by DCP bootstream interface */
+struct dcpboot_dma_area {
+       struct dcp_hw_packet hw_packet;
+       uint16_t block[16];
+};
+
+struct dcp {
+       struct device *dev;
+       spinlock_t lock;
+       struct mutex op_mutex[DCP_NUM_CHANNELS];
+       struct completion op_wait[DCP_NUM_CHANNELS];
+       int wait[DCP_NUM_CHANNELS];
+       int dcp_vmi_irq;
+       int dcp_irq;
+       u32 dcp_regs_base;
+       ulong clock_state;
+       bool chan_in_use[DCP_NUM_CHANNELS];
+
+       /* Following buffers used in hashing to meet 64-byte len alignment */
+       char *buf1;
+       char *buf2;
+       dma_addr_t buf1_phys;
+       dma_addr_t buf2_phys;
+       struct dcp_hash_coherent_block *buf1_desc;
+       struct dcp_hash_coherent_block *buf2_desc;
+       struct dcp_hash_coherent_block *user_buf_desc;
+
+       /* Following data only used by DCP bootstream interface */
+       struct dcpboot_dma_area *dcpboot_dma_area;
+       dma_addr_t dcpboot_dma_area_phys;
+};
+
+/* cipher flags */
+#define DCP_ENC        0x0001
+#define DCP_DEC        0x0002
+#define DCP_ECB        0x0004
+#define DCP_CBC        0x0008
+#define DCP_CBC_INIT   0x0010
+#define DCP_OTPKEY     0x0020
+
+/* hash flags */
+#define DCP_INIT       0x0001
+#define DCP_UPDATE     0x0002
+#define DCP_FINAL      0x0004
+
+#define DCP_AES        0x1000
+#define DCP_SHA1       0x2000
+#define DCP_CRC32      0x3000
+#define DCP_COPY       0x4000
+#define DCP_FILL       0x5000
+#define DCP_MODE_MASK  0xf000
+
+/* clock defines */
+#define CLOCK_ON       1
+#define CLOCK_OFF      0
+
+struct dcp_op {
+
+       unsigned int flags;
+
+       void *src;
+       dma_addr_t src_phys;
+
+       void *dst;
+       dma_addr_t dst_phys;
+
+       int len;
+
+       /* the key contains the IV for block modes */
+       union {
+               struct {
+                       u8 key[2 * AES_KEYSIZE_128]
+                               __attribute__ ((__aligned__(32)));
+                       dma_addr_t key_phys;
+                       int keylen;
+               } cipher;
+               struct {
+                       u8 digest[SHA256_DIGEST_SIZE]
+                               __attribute__ ((__aligned__(32)));
+                       dma_addr_t digest_phys;
+                       int digestlen;
+                       int init;
+               } hash;
+       };
+
+       union {
+               struct crypto_blkcipher *blk;
+               struct crypto_cipher *cip;
+               struct crypto_hash *hash;
+       } fallback;
+
+       struct dcp_hw_packet pkt
+               __attribute__ ((__aligned__(32)));
+};
+
+struct dcp_hash_coherent_block {
+       struct dcp_hw_packet pkt[1]
+               __attribute__ ((__aligned__(32)));
+       u8 digest[SHA256_DIGEST_SIZE]
+               __attribute__ ((__aligned__(32)));
+       unsigned int len;
+       dma_addr_t src_phys;
+       void *src;
+       void *dst;
+       dma_addr_t my_phys;
+       u32 hash_sel;
+       struct dcp_hash_coherent_block *next;
+};
+
+struct dcp_hash_op {
+
+       unsigned int flags;
+
+       /* the key contains the IV for block modes */
+       union {
+               struct {
+                       u8 key[2 * AES_KEYSIZE_128]
+                               __attribute__ ((__aligned__(32)));
+                       dma_addr_t key_phys;
+                       int keylen;
+               } cipher;
+               struct {
+                       u8 digest[SHA256_DIGEST_SIZE]
+                               __attribute__ ((__aligned__(32)));
+                       dma_addr_t digest_phys;
+                       int digestlen;
+                       int init;
+               } hash;
+       };
+
+       u32 length;
+       struct dcp_hash_coherent_block *head_desc;
+       struct dcp_hash_coherent_block *tail_desc;
+};
+
+/* only one */
+static struct dcp *global_sdcp;
+
+static void dcp_clock(struct dcp *sdcp,  ulong state, bool force)
+{
+       u32 chan;
+       struct clk *clk = clk_get(sdcp->dev, "dcp_clk");
+
+       /* unless force is true (used during suspend/resume), if any
+         * channel is running, then clk is already on, and must stay on */
+       if (!force)
+               for (chan = 0; chan < DCP_NUM_CHANNELS; chan++)
+                       if (sdcp->chan_in_use[chan])
+                               goto exit;
+
+       if (state == CLOCK_OFF) {
+               /* gate at clock source */
+               if (!IS_ERR(clk))
+                       clk_disable(clk);
+               /* gate at DCP */
+               else
+                       __raw_writel(BM_DCP_CTRL_CLKGATE,
+                               sdcp->dcp_regs_base + HW_DCP_CTRL_SET);
+
+               sdcp->clock_state = CLOCK_OFF;
+
+       } else {
+               /* ungate at clock source */
+               if (!IS_ERR(clk))
+                       clk_enable(clk);
+               /* ungate at DCP */
+               else
+                       __raw_writel(BM_DCP_CTRL_CLKGATE,
+                               sdcp->dcp_regs_base + HW_DCP_CTRL_CLR);
+
+               sdcp->clock_state = CLOCK_ON;
+       }
+
+exit:
+       return;
+}
+
+static void dcp_perform_op(struct dcp_op *op)
+{
+       struct dcp *sdcp = global_sdcp;
+       struct mutex *mutex;
+       struct dcp_hw_packet *pkt;
+       int chan;
+       u32 pkt1, pkt2;
+       unsigned long timeout;
+       dma_addr_t pkt_phys;
+       u32 stat;
+
+       pkt1 = BM_DCP_PACKET1_DECR_SEMAPHORE | BM_DCP_PACKET1_INTERRUPT;
+
+       switch (op->flags & DCP_MODE_MASK) {
+
+       case DCP_AES:
+
+               chan = CIPHER_CHAN;
+
+               /* key is at the payload */
+               pkt1 |= BM_DCP_PACKET1_ENABLE_CIPHER;
+               if ((op->flags & DCP_OTPKEY) == 0)
+                       pkt1 |= BM_DCP_PACKET1_PAYLOAD_KEY;
+               if (op->flags & DCP_ENC)
+                       pkt1 |= BM_DCP_PACKET1_CIPHER_ENCRYPT;
+               if (op->flags & DCP_CBC_INIT)
+                       pkt1 |= BM_DCP_PACKET1_CIPHER_INIT;
+
+               pkt2 = BF(0, DCP_PACKET2_CIPHER_CFG) |
+                      BF(0, DCP_PACKET2_KEY_SELECT) |
+                      BF(BV_DCP_PACKET2_CIPHER_SELECT__AES128,
+                      DCP_PACKET2_CIPHER_SELECT);
+
+               if (op->flags & DCP_ECB)
+                       pkt2 |= BF(BV_DCP_PACKET2_CIPHER_MODE__ECB,
+                               DCP_PACKET2_CIPHER_MODE);
+               else if (op->flags & DCP_CBC)
+                       pkt2 |= BF(BV_DCP_PACKET2_CIPHER_MODE__CBC,
+                               DCP_PACKET2_CIPHER_MODE);
+
+               break;
+
+       case DCP_SHA1:
+
+               chan = HASH_CHAN;
+
+               pkt1 |= BM_DCP_PACKET1_ENABLE_HASH;
+               if (op->flags & DCP_INIT)
+                       pkt1 |= BM_DCP_PACKET1_HASH_INIT;
+               if (op->flags & DCP_FINAL) {
+                       pkt1 |= BM_DCP_PACKET1_HASH_TERM;
+                       BUG_ON(op->hash.digest == NULL);
+               }
+
+               pkt2 = BF(BV_DCP_PACKET2_HASH_SELECT__SHA1,
+                       DCP_PACKET2_HASH_SELECT);
+               break;
+
+       default:
+               dev_err(sdcp->dev, "Unsupported mode\n");
+               return;
+       }
+
+       mutex = &sdcp->op_mutex[chan];
+       pkt = &op->pkt;
+
+       pkt->pNext = 0;
+       pkt->pkt1 = pkt1;
+       pkt->pkt2 = pkt2;
+       pkt->pSrc = (u32)op->src_phys;
+       pkt->pDst = (u32)op->dst_phys;
+       pkt->size = op->len;
+       pkt->pPayload = chan == CIPHER_CHAN ?
+               (u32)op->cipher.key_phys : (u32)op->hash.digest_phys;
+       pkt->stat = 0;
+
+       pkt_phys = dma_map_single(sdcp->dev, pkt, sizeof(*pkt),
+                       DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(sdcp->dev, pkt_phys)) {
+               dev_err(sdcp->dev, "Unable to map packet descriptor\n");
+               return;
+       }
+
+       /* submit the work */
+       mutex_lock(mutex);
+       dcp_clock(sdcp, CLOCK_ON, false);
+       sdcp->chan_in_use[chan] = true;
+
+       __raw_writel(-1, sdcp->dcp_regs_base + HW_DCP_CHnSTAT_CLR(chan));
+
+       /* Load the work packet pointer and bump the channel semaphore */
+       __raw_writel((u32)pkt_phys, sdcp->dcp_regs_base +
+               HW_DCP_CHnCMDPTR(chan));
+
+       /* XXX wake from interrupt instead of looping */
+       timeout = jiffies + msecs_to_jiffies(1000);
+
+       sdcp->wait[chan] = 0;
+       __raw_writel(BF(1, DCP_CHnSEMA_INCREMENT), sdcp->dcp_regs_base
+               + HW_DCP_CHnSEMA(chan));
+       while (time_before(jiffies, timeout) && sdcp->wait[chan] == 0)
+               cpu_relax();
+
+       if (!time_before(jiffies, timeout)) {
+               dev_err(sdcp->dev, "Timeout while waiting STAT 0x%08x\n",
+                               __raw_readl(sdcp->dcp_regs_base + HW_DCP_STAT));
+               goto out;
+       }
+
+       stat = __raw_readl(sdcp->dcp_regs_base + HW_DCP_CHnSTAT(chan));
+       if ((stat & 0xff) != 0)
+               dev_err(sdcp->dev, "Channel stat error 0x%02x\n",
+                               __raw_readl(sdcp->dcp_regs_base +
+                               HW_DCP_CHnSTAT(chan)) & 0xff);
+out:
+       sdcp->chan_in_use[chan] = false;
+       dcp_clock(sdcp, CLOCK_OFF, false);
+       mutex_unlock(mutex);
+       dma_unmap_single(sdcp->dev, pkt_phys, sizeof(*pkt), DMA_TO_DEVICE);
+}
+
+static int dcp_aes_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
+               unsigned int len)
+{
+       struct dcp_op *op = crypto_tfm_ctx(tfm);
+       unsigned int ret;
+
+       op->cipher.keylen = len;
+
+       if (len == AES_KEYSIZE_128) {
+               memcpy(op->cipher.key, key, len);
+               return 0;
+       }
+
+       if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+               /* not supported at all */
+               tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+
+       /*
+        * The requested key size is not supported by HW, do a fallback
+        */
+       op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+       op->fallback.blk->base.crt_flags |= (tfm->crt_flags &
+                                               CRYPTO_TFM_REQ_MASK);
+
+       ret = crypto_cipher_setkey(op->fallback.cip, key, len);
+       if (ret) {
+               tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+               tfm->crt_flags |= (op->fallback.blk->base.crt_flags &
+                                       CRYPTO_TFM_RES_MASK);
+       }
+       return ret;
+}
+
+static void dcp_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+       struct dcp *sdcp = global_sdcp;
+       struct dcp_op *op = crypto_tfm_ctx(tfm);
+
+       if (unlikely(op->cipher.keylen != AES_KEYSIZE_128)) {
+               crypto_cipher_encrypt_one(op->fallback.cip, out, in);
+               return;
+       }
+
+       op->src = (void *) in;
+       op->dst = (void *) out;
+       op->flags = DCP_AES | DCP_ENC | DCP_ECB;
+       op->len = AES_KEYSIZE_128;
+
+       /* map the data */
+       op->src_phys = dma_map_single(sdcp->dev, (void *)in, AES_KEYSIZE_128,
+                                       DMA_TO_DEVICE);
+       if (dma_mapping_error(sdcp->dev, op->src_phys)) {
+               dev_err(sdcp->dev, "Unable to map source\n");
+               return;
+       }
+
+       op->dst_phys = dma_map_single(sdcp->dev, out, AES_KEYSIZE_128,
+                                       DMA_FROM_DEVICE);
+       if (dma_mapping_error(sdcp->dev, op->dst_phys)) {
+               dev_err(sdcp->dev, "Unable to map dest\n");
+               goto err_unmap_src;
+       }
+
+       op->cipher.key_phys = dma_map_single(sdcp->dev, op->cipher.key,
+                                       AES_KEYSIZE_128, DMA_TO_DEVICE);
+       if (dma_mapping_error(sdcp->dev, op->cipher.key_phys)) {
+               dev_err(sdcp->dev, "Unable to map key\n");
+               goto err_unmap_dst;
+       }
+
+       /* perform the operation */
+       dcp_perform_op(op);
+
+       dma_unmap_single(sdcp->dev, op->cipher.key_phys, AES_KEYSIZE_128,
+                       DMA_TO_DEVICE);
+err_unmap_dst:
+       dma_unmap_single(sdcp->dev, op->dst_phys, op->len, DMA_FROM_DEVICE);
+err_unmap_src:
+       dma_unmap_single(sdcp->dev, op->src_phys, op->len, DMA_TO_DEVICE);
+}
+
+static void dcp_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+       struct dcp *sdcp = global_sdcp;
+       struct dcp_op *op = crypto_tfm_ctx(tfm);
+
+       if (unlikely(op->cipher.keylen != AES_KEYSIZE_128)) {
+               crypto_cipher_decrypt_one(op->fallback.cip, out, in);
+               return;
+       }
+
+       op->src = (void *) in;
+       op->dst = (void *) out;
+       op->flags = DCP_AES | DCP_DEC | DCP_ECB;
+       op->len = AES_KEYSIZE_128;
+
+       /* map the data */
+       op->src_phys = dma_map_single(sdcp->dev, (void *)in, AES_KEYSIZE_128,
+                                       DMA_TO_DEVICE);
+       if (dma_mapping_error(sdcp->dev, op->src_phys)) {
+               dev_err(sdcp->dev, "Unable to map source\n");
+               return;
+       }
+
+       op->dst_phys = dma_map_single(sdcp->dev, out, AES_KEYSIZE_128,
+                                       DMA_FROM_DEVICE);
+       if (dma_mapping_error(sdcp->dev, op->dst_phys)) {
+               dev_err(sdcp->dev, "Unable to map dest\n");
+               goto err_unmap_src;
+       }
+
+       op->cipher.key_phys = dma_map_single(sdcp->dev, op->cipher.key,
+                                       AES_KEYSIZE_128, DMA_TO_DEVICE);
+       if (dma_mapping_error(sdcp->dev, op->cipher.key_phys)) {
+               dev_err(sdcp->dev, "Unable to map key\n");
+               goto err_unmap_dst;
+       }
+
+       /* perform the operation */
+       dcp_perform_op(op);
+
+       dma_unmap_single(sdcp->dev, op->cipher.key_phys, AES_KEYSIZE_128,
+                       DMA_TO_DEVICE);
+err_unmap_dst:
+       dma_unmap_single(sdcp->dev, op->dst_phys, op->len, DMA_FROM_DEVICE);
+err_unmap_src:
+       dma_unmap_single(sdcp->dev, op->src_phys, op->len, DMA_TO_DEVICE);
+}
+
+static int fallback_init_cip(struct crypto_tfm *tfm)
+{
+       const char *name = tfm->__crt_alg->cra_name;
+       struct dcp_op *op = crypto_tfm_ctx(tfm);
+
+       op->fallback.cip = crypto_alloc_cipher(name, 0,
+                               CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+       if (IS_ERR(op->fallback.cip)) {
+               printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+               return PTR_ERR(op->fallback.cip);
+       }
+
+       return 0;
+}
+
+static void fallback_exit_cip(struct crypto_tfm *tfm)
+{
+       struct dcp_op *op = crypto_tfm_ctx(tfm);
+
+       crypto_free_cipher(op->fallback.cip);
+       op->fallback.cip = NULL;
+}
+
+static struct crypto_alg dcp_aes_alg = {
+       .cra_name               = "aes",
+       .cra_driver_name        = "dcp-aes",
+       .cra_priority           = 300,
+       .cra_alignmask          = 15,
+       .cra_flags              = CRYPTO_ALG_TYPE_CIPHER |
+                                 CRYPTO_ALG_NEED_FALLBACK,
+       .cra_init               = fallback_init_cip,
+       .cra_exit               = fallback_exit_cip,
+       .cra_blocksize          = AES_KEYSIZE_128,
+       .cra_ctxsize            = sizeof(struct dcp_op),
+       .cra_module             = THIS_MODULE,
+       .cra_list               = LIST_HEAD_INIT(dcp_aes_alg.cra_list),
+       .cra_u                  = {
+               .cipher = {
+                       .cia_min_keysize        = AES_MIN_KEY_SIZE,
+                       .cia_max_keysize        = AES_MAX_KEY_SIZE,
+                       .cia_setkey             = dcp_aes_setkey_cip,
+                       .cia_encrypt            = dcp_aes_encrypt,
+                       .cia_decrypt            = dcp_aes_decrypt
+               }
+       }
+};
+
+static int dcp_aes_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
+               unsigned int len)
+{
+       struct dcp_op *op = crypto_tfm_ctx(tfm);
+       unsigned int ret;
+
+       op->cipher.keylen = len;
+
+       if (len == AES_KEYSIZE_128) {
+               memcpy(op->cipher.key, key, len);
+               return 0;
+       }
+
+       if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+               /* not supported at all */
+               tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+
+       /*
+        * The requested key size is not supported by HW, do a fallback
+        */
+       op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+       op->fallback.blk->base.crt_flags |= (tfm->crt_flags &
+                                               CRYPTO_TFM_REQ_MASK);
+
+       ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
+       if (ret) {
+               tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+               tfm->crt_flags |= (op->fallback.blk->base.crt_flags &
+                                       CRYPTO_TFM_RES_MASK);
+       }
+       return ret;
+}
+
+static int fallback_blk_dec(struct blkcipher_desc *desc,
+               struct scatterlist *dst, struct scatterlist *src,
+               unsigned int nbytes)
+{
+       unsigned int ret;
+       struct crypto_blkcipher *tfm;
+       struct dcp_op *op = crypto_blkcipher_ctx(desc->tfm);
+
+       tfm = desc->tfm;
+       desc->tfm = op->fallback.blk;
+
+       ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
+
+       desc->tfm = tfm;
+       return ret;
+}
+
+static int fallback_blk_enc(struct blkcipher_desc *desc,
+               struct scatterlist *dst, struct scatterlist *src,
+               unsigned int nbytes)
+{
+       unsigned int ret;
+       struct crypto_blkcipher *tfm;
+       struct dcp_op *op = crypto_blkcipher_ctx(desc->tfm);
+
+       tfm = desc->tfm;
+       desc->tfm = op->fallback.blk;
+
+       ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
+
+       desc->tfm = tfm;
+       return ret;
+}
+
+static int fallback_init_blk(struct crypto_tfm *tfm)
+{
+       const char *name = tfm->__crt_alg->cra_name;
+       struct dcp_op *op = crypto_tfm_ctx(tfm);
+
+       op->fallback.blk = crypto_alloc_blkcipher(name, 0,
+                       CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+       if (IS_ERR(op->fallback.blk)) {
+               printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+               return PTR_ERR(op->fallback.blk);
+       }
+
+       return 0;
+}
+
+static void fallback_exit_blk(struct crypto_tfm *tfm)
+{
+       struct dcp_op *op = crypto_tfm_ctx(tfm);
+
+       crypto_free_blkcipher(op->fallback.blk);
+       op->fallback.blk = NULL;
+}
+
+static int
+dcp_aes_ecb_decrypt(struct blkcipher_desc *desc,
+                 struct scatterlist *dst, struct scatterlist *src,
+                 unsigned int nbytes)
+{
+       struct dcp *sdcp = global_sdcp;
+       struct dcp_op *op = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       if (unlikely(op->cipher.keylen != AES_KEYSIZE_128))
+               return fallback_blk_dec(desc, dst, src, nbytes);
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+
+       /* key needs to be mapped only once */
+       op->cipher.key_phys = dma_map_single(sdcp->dev, op->cipher.key,
+                               AES_KEYSIZE_128, DMA_TO_DEVICE);
+       if (dma_mapping_error(sdcp->dev, op->cipher.key_phys)) {
+               dev_err(sdcp->dev, "Unable to map key\n");
+               return -ENOMEM;
+       }
+
+       err = blkcipher_walk_virt(desc, &walk);
+       while (err == 0 && (nbytes = walk.nbytes) > 0) {
+               op->src = walk.src.virt.addr,
+               op->dst = walk.dst.virt.addr;
+               op->flags = DCP_AES | DCP_DEC |
+                               DCP_ECB;
+               op->len = nbytes - (nbytes % AES_KEYSIZE_128);
+
+               /* map the data */
+               op->src_phys = dma_map_single(sdcp->dev, op->src, op->len,
+                                               DMA_TO_DEVICE);
+               if (dma_mapping_error(sdcp->dev, op->src_phys)) {
+                       dev_err(sdcp->dev, "Unable to map source\n");
+                       err = -ENOMEM;
+                       break;
+               }
+
+               op->dst_phys = dma_map_single(sdcp->dev, op->dst, op->len,
+                                               DMA_FROM_DEVICE);
+               if (dma_mapping_error(sdcp->dev, op->dst_phys)) {
+                       dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+                                               DMA_TO_DEVICE);
+                       dev_err(sdcp->dev, "Unable to map dest\n");
+                       err = -ENOMEM;
+                       break;
+               }
+
+               /* perform! */
+               dcp_perform_op(op);
+
+               dma_unmap_single(sdcp->dev, op->dst_phys, op->len,
+                                       DMA_FROM_DEVICE);
+               dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+                                       DMA_TO_DEVICE);
+
+               nbytes -= op->len;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       dma_unmap_single(sdcp->dev, op->cipher.key_phys, AES_KEYSIZE_128,
+                               DMA_TO_DEVICE);
+
+       return err;
+}
+
+static int
+dcp_aes_ecb_encrypt(struct blkcipher_desc *desc,
+                 struct scatterlist *dst, struct scatterlist *src,
+                 unsigned int nbytes)
+{
+       struct dcp *sdcp = global_sdcp;
+       struct dcp_op *op = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err, ret;
+
+       if (unlikely(op->cipher.keylen != AES_KEYSIZE_128))
+               return fallback_blk_enc(desc, dst, src, nbytes);
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+
+       /* key needs to be mapped only once */
+       op->cipher.key_phys = dma_map_single(sdcp->dev, op->cipher.key,
+                               AES_KEYSIZE_128, DMA_TO_DEVICE);
+       if (dma_mapping_error(sdcp->dev, op->cipher.key_phys)) {
+               dev_err(sdcp->dev, "Unable to map key\n");
+               return -ENOMEM;
+       }
+
+       err = blkcipher_walk_virt(desc, &walk);
+
+       err = 0;
+       while (err == 0 && (nbytes = walk.nbytes) > 0) {
+               op->src = walk.src.virt.addr,
+               op->dst = walk.dst.virt.addr;
+               op->flags = DCP_AES | DCP_ENC |
+                           DCP_ECB;
+               op->len = nbytes - (nbytes % AES_KEYSIZE_128);
+
+               /* map the data */
+               op->src_phys = dma_map_single(sdcp->dev, op->src, op->len,
+                               DMA_TO_DEVICE);
+               if (dma_mapping_error(sdcp->dev, op->src_phys)) {
+                       dev_err(sdcp->dev, "Unable to map source\n");
+                       err = -ENOMEM;
+                       break;
+               }
+
+               op->dst_phys = dma_map_single(sdcp->dev, op->dst, op->len,
+                               DMA_FROM_DEVICE);
+               if (dma_mapping_error(sdcp->dev, op->dst_phys)) {
+                       dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+                                       DMA_TO_DEVICE);
+                       dev_err(sdcp->dev, "Unable to map dest\n");
+                       err = -ENOMEM;
+                       break;
+               }
+
+               /* perform! */
+               dcp_perform_op(op);
+
+               dma_unmap_single(sdcp->dev, op->dst_phys, op->len,
+                               DMA_FROM_DEVICE);
+               dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+                               DMA_TO_DEVICE);
+
+               nbytes -= op->len;
+               ret =  blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       dma_unmap_single(sdcp->dev, op->cipher.key_phys, AES_KEYSIZE_128,
+                       DMA_TO_DEVICE);
+
+       return err;
+}
+
+
+static struct crypto_alg dcp_aes_ecb_alg = {
+       .cra_name               = "ecb(aes)",
+       .cra_driver_name        = "dcp-ecb-aes",
+       .cra_priority           = 400,
+       .cra_alignmask          = 15,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
+                                 CRYPTO_ALG_NEED_FALLBACK,
+       .cra_init               = fallback_init_blk,
+       .cra_exit               = fallback_exit_blk,
+       .cra_blocksize          = AES_KEYSIZE_128,
+       .cra_ctxsize            = sizeof(struct dcp_op),
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_list               = LIST_HEAD_INIT(dcp_aes_ecb_alg.cra_list),
+       .cra_u                  = {
+               .blkcipher      = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .setkey         = dcp_aes_setkey_blk,
+                       .encrypt        = dcp_aes_ecb_encrypt,
+                       .decrypt        = dcp_aes_ecb_decrypt
+               }
+       }
+};
+
+static int
+dcp_aes_cbc_decrypt(struct blkcipher_desc *desc,
+                 struct scatterlist *dst, struct scatterlist *src,
+                 unsigned int nbytes)
+{
+       struct dcp *sdcp = global_sdcp;
+       struct dcp_op *op = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err, blockno;
+
+       if (unlikely(op->cipher.keylen != AES_KEYSIZE_128))
+               return fallback_blk_dec(desc, dst, src, nbytes);
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+
+       blockno = 0;
+       err = blkcipher_walk_virt(desc, &walk);
+       while (err == 0 && (nbytes = walk.nbytes) > 0) {
+               op->src = walk.src.virt.addr,
+               op->dst = walk.dst.virt.addr;
+               op->flags = DCP_AES | DCP_DEC |
+                           DCP_CBC;
+               if (blockno == 0) {
+                       op->flags |= DCP_CBC_INIT;
+                       memcpy(op->cipher.key + AES_KEYSIZE_128, walk.iv,
+                               AES_KEYSIZE_128);
+               }
+               op->len = nbytes - (nbytes % AES_KEYSIZE_128);
+
+               /* key (+iv) needs to be mapped only once */
+               op->cipher.key_phys = dma_map_single(sdcp->dev, op->cipher.key,
+                                       AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(sdcp->dev, op->cipher.key_phys)) {
+                       dev_err(sdcp->dev, "Unable to map key\n");
+                       err = -ENOMEM;
+                       break;
+               }
+
+               /* map the data */
+               op->src_phys = dma_map_single(sdcp->dev, op->src, op->len,
+                                       DMA_TO_DEVICE);
+               if (dma_mapping_error(sdcp->dev, op->src_phys)) {
+                       dma_unmap_single(sdcp->dev, op->cipher.key_phys,
+                                       AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+                       dev_err(sdcp->dev, "Unable to map source\n");
+                       err = -ENOMEM;
+                       break;
+               }
+
+               op->dst_phys = dma_map_single(sdcp->dev, op->dst, op->len,
+                                               DMA_FROM_DEVICE);
+               if (dma_mapping_error(sdcp->dev, op->dst_phys)) {
+                       dma_unmap_single(sdcp->dev, op->cipher.key_phys,
+                                       AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+                       dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+                                       DMA_TO_DEVICE);
+                       dev_err(sdcp->dev, "Unable to map dest\n");
+                       err = -ENOMEM;
+                       break;
+               }
+
+               /* perform! */
+               dcp_perform_op(op);
+
+               dma_unmap_single(sdcp->dev, op->cipher.key_phys,
+                                       AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+               dma_unmap_single(sdcp->dev, op->dst_phys, op->len,
+                                       DMA_FROM_DEVICE);
+               dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+                                       DMA_TO_DEVICE);
+
+               nbytes -= op->len;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+
+               blockno++;
+       }
+
+       return err;
+}
+
+static int
+dcp_aes_cbc_encrypt(struct blkcipher_desc *desc,
+                 struct scatterlist *dst, struct scatterlist *src,
+                 unsigned int nbytes)
+{
+       struct dcp *sdcp = global_sdcp;
+       struct dcp_op *op = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err, ret, blockno;
+
+       if (unlikely(op->cipher.keylen != AES_KEYSIZE_128))
+               return fallback_blk_enc(desc, dst, src, nbytes);
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+
+       blockno = 0;
+
+       err = blkcipher_walk_virt(desc, &walk);
+       while (err == 0 && (nbytes = walk.nbytes) > 0) {
+               op->src = walk.src.virt.addr,
+               op->dst = walk.dst.virt.addr;
+               op->flags = DCP_AES | DCP_ENC |
+                           DCP_CBC;
+               if (blockno == 0) {
+                       op->flags |= DCP_CBC_INIT;
+                       memcpy(op->cipher.key + AES_KEYSIZE_128, walk.iv,
+                               AES_KEYSIZE_128);
+               }
+               op->len = nbytes - (nbytes % AES_KEYSIZE_128);
+
+               /* key needs to be mapped only once */
+               op->cipher.key_phys = dma_map_single(sdcp->dev, op->cipher.key,
+                                       AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(sdcp->dev, op->cipher.key_phys)) {
+                       dev_err(sdcp->dev, "Unable to map key\n");
+                       return -ENOMEM;
+               }
+
+               /* map the data */
+               op->src_phys = dma_map_single(sdcp->dev, op->src, op->len,
+                               DMA_TO_DEVICE);
+               if (dma_mapping_error(sdcp->dev, op->src_phys)) {
+                       dma_unmap_single(sdcp->dev, op->cipher.key_phys,
+                               AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+                       dev_err(sdcp->dev, "Unable to map source\n");
+                       err = -ENOMEM;
+                       break;
+               }
+
+               op->dst_phys = dma_map_single(sdcp->dev, op->dst, op->len,
+                               DMA_FROM_DEVICE);
+               if (dma_mapping_error(sdcp->dev, op->dst_phys)) {
+                       dma_unmap_single(sdcp->dev, op->cipher.key_phys,
+                                       AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+                       dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+                                       DMA_TO_DEVICE);
+                       dev_err(sdcp->dev, "Unable to map dest\n");
+                       err = -ENOMEM;
+                       break;
+               }
+
+               /* perform! */
+               dcp_perform_op(op);
+
+               dma_unmap_single(sdcp->dev, op->cipher.key_phys,
+                               AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+               dma_unmap_single(sdcp->dev, op->dst_phys, op->len,
+                               DMA_FROM_DEVICE);
+               dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+                               DMA_TO_DEVICE);
+
+               nbytes -= op->len;
+               ret =  blkcipher_walk_done(desc, &walk, nbytes);
+
+               blockno++;
+       }
+
+       return err;
+}
+
+static struct crypto_alg dcp_aes_cbc_alg = {
+       .cra_name               = "cbc(aes)",
+       .cra_driver_name        = "dcp-cbc-aes",
+       .cra_priority           = 400,
+       .cra_alignmask          = 15,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
+                                 CRYPTO_ALG_NEED_FALLBACK,
+       .cra_init               = fallback_init_blk,
+       .cra_exit               = fallback_exit_blk,
+       .cra_blocksize          = AES_KEYSIZE_128,
+       .cra_ctxsize            = sizeof(struct dcp_op),
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_list               = LIST_HEAD_INIT(dcp_aes_cbc_alg.cra_list),
+       .cra_u                  = {
+               .blkcipher      = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .setkey         = dcp_aes_setkey_blk,
+                       .encrypt        = dcp_aes_cbc_encrypt,
+                       .decrypt        = dcp_aes_cbc_decrypt,
+                       .ivsize         = AES_KEYSIZE_128,
+               }
+       }
+};
+
+static int dcp_perform_hash_op(
+       struct dcp_hash_coherent_block *input,
+       u32 num_desc, bool init, bool terminate)
+{
+       struct dcp *sdcp = global_sdcp;
+       int chan;
+       struct dcp_hw_packet *pkt;
+       struct dcp_hash_coherent_block *hw;
+       unsigned long timeout;
+       u32 stat;
+       int descno, mapped;
+
+       chan = HASH_CHAN;
+
+       hw = input;
+       pkt = hw->pkt;
+
+       for (descno = 0; descno < num_desc; descno++) {
+
+               if (descno != 0) {
+
+                       /* set next ptr and CHAIN bit in last packet */
+                       pkt->pNext = hw->next->my_phys + offsetof(
+                               struct dcp_hash_coherent_block,
+                               pkt[0]);
+                       pkt->pkt1 |= BM_DCP_PACKET1_CHAIN;
+
+                       /* iterate to next descriptor */
+                       hw = hw->next;
+                       pkt = hw->pkt;
+               }
+
+               pkt->pkt1 = BM_DCP_PACKET1_DECR_SEMAPHORE |
+                                       BM_DCP_PACKET1_ENABLE_HASH;
+
+               if (init && descno == 0)
+                       pkt->pkt1 |= BM_DCP_PACKET1_HASH_INIT;
+
+               pkt->pkt2 = BF(hw->hash_sel,
+                               DCP_PACKET2_HASH_SELECT);
+
+               /* no need to flush buf1 or buf2, which are uncached */
+               if (hw->src != sdcp->buf1 && hw->src != sdcp->buf2) {
+
+                       /* we have to flush the cache for the buffer */
+                       hw->src_phys = dma_map_single(sdcp->dev,
+                               hw->src, hw->len, DMA_TO_DEVICE);
+
+                       if (dma_mapping_error(sdcp->dev, hw->src_phys)) {
+                               dev_err(sdcp->dev, "Unable to map source\n");
+
+                               /* unmap any previous mapped buffers */
+                               for (mapped = 0, hw = input; mapped < descno;
+                                       mapped++) {
+
+                                       if (mapped != 0)
+                                               hw = hw->next;
+                                       if (hw->src != sdcp->buf1 &&
+                                               hw->src != sdcp->buf2)
+                                               dma_unmap_single(sdcp->dev,
+                                                       hw->src_phys, hw->len,
+                                                       DMA_TO_DEVICE);
+                               }
+
+                               return -EFAULT;
+                       }
+               }
+
+               pkt->pSrc = (u32)hw->src_phys;
+               pkt->pDst = 0;
+               pkt->size = hw->len;
+               pkt->pPayload = 0;
+               pkt->stat = 0;
+
+               /* set HASH_TERM bit on last buf if terminate was set */
+               if (terminate && (descno == (num_desc - 1))) {
+                       pkt->pkt1 |= BM_DCP_PACKET1_HASH_TERM;
+
+                       memset(input->digest, 0, sizeof(input->digest));
+
+                       /* set payload ptr to the 1st buffer's digest */
+                       pkt->pPayload = (u32)input->my_phys +
+                               offsetof(
+                               struct dcp_hash_coherent_block,
+                               digest);
+               }
+       }
+
+       /* submit the work */
+
+       __raw_writel(-1, sdcp->dcp_regs_base + HW_DCP_CHnSTAT_CLR(chan));
+
+       mb();
+       /* Load the 1st descriptor's physical address */
+       __raw_writel((u32)input->my_phys +
+               offsetof(struct dcp_hash_coherent_block,
+               pkt[0]), sdcp->dcp_regs_base + HW_DCP_CHnCMDPTR(chan));
+
+       /* XXX wake from interrupt instead of looping */
+       timeout = jiffies + msecs_to_jiffies(1000);
+
+       /* write num_desc into sema register */
+       __raw_writel(BF(num_desc, DCP_CHnSEMA_INCREMENT),
+               sdcp->dcp_regs_base + HW_DCP_CHnSEMA(chan));
+
+       while (time_before(jiffies, timeout) &&
+               ((__raw_readl(sdcp->dcp_regs_base +
+               HW_DCP_CHnSEMA(chan)) >> 16) & 0xff) != 0) {
+
+               cpu_relax();
+       }
+
+       if (!time_before(jiffies, timeout)) {
+               dev_err(sdcp->dev,
+                       "Timeout while waiting STAT 0x%08x\n",
+                       __raw_readl(sdcp->dcp_regs_base + HW_DCP_STAT));
+       }
+
+       stat = __raw_readl(sdcp->dcp_regs_base + HW_DCP_CHnSTAT(chan));
+       if ((stat & 0xff) != 0)
+               dev_err(sdcp->dev, "Channel stat error 0x%02x\n",
+                               __raw_readl(sdcp->dcp_regs_base +
+                               HW_DCP_CHnSTAT(chan)) & 0xff);
+
+       /* unmap all src buffers */
+       for (descno = 0, hw = input; descno < num_desc; descno++) {
+               if (descno != 0)
+                       hw = hw->next;
+               if (hw->src != sdcp->buf1 && hw->src != sdcp->buf2)
+                       dma_unmap_single(sdcp->dev, hw->src_phys, hw->len,
+                               DMA_TO_DEVICE);
+       }
+
+       return 0;
+
+}
+
+static int dcp_sha_init(struct shash_desc *desc)
+{
+       struct dcp *sdcp = global_sdcp;
+       struct dcp_hash_op *op = shash_desc_ctx(desc);
+       struct mutex *mutex = &sdcp->op_mutex[HASH_CHAN];
+
+       mutex_lock(mutex);
+       dcp_clock(sdcp, CLOCK_ON, false);
+       sdcp->chan_in_use[HASH_CHAN] = true;
+
+       op->length = 0;
+
+       /* reset the lengths and the pointers of buffer descriptors */
+       sdcp->buf1_desc->len = 0;
+       sdcp->buf1_desc->src = sdcp->buf1;
+       sdcp->buf2_desc->len = 0;
+       sdcp->buf2_desc->src = sdcp->buf2;
+       op->head_desc = sdcp->buf1_desc;
+       op->tail_desc = sdcp->buf2_desc;
+
+       return 0;
+}
+
+static int dcp_sha_update(struct shash_desc *desc, const u8 *data,
+                     unsigned int length)
+{
+       struct dcp *sdcp = global_sdcp;
+       struct dcp_hash_op *op = shash_desc_ctx(desc);
+       struct dcp_hash_coherent_block *temp;
+       u32 rem_bytes, bytes_borrowed, hash_sel;
+       int ret = 0;
+
+       if (strcmp(desc->tfm->base.__crt_alg->cra_name, "sha1") == 0)
+               hash_sel = BV_DCP_PACKET2_HASH_SELECT__SHA1;
+       else
+               hash_sel = BV_DCP_PACKET2_HASH_SELECT__SHA256;
+
+       sdcp->user_buf_desc->src = (void *)data;
+       sdcp->user_buf_desc->len = length;
+
+       op->tail_desc->len = 0;
+
+       /* check if any pending data from previous updates */
+       if (op->head_desc->len) {
+
+                       /* borrow from this buffer to make it 64 bytes */
+                       bytes_borrowed = min(64 - op->head_desc->len,
+                                       sdcp->user_buf_desc->len);
+
+                       /* copy n bytes to head */
+                       memcpy(op->head_desc->src + op->head_desc->len,
+                               sdcp->user_buf_desc->src, bytes_borrowed);
+                       op->head_desc->len += bytes_borrowed;
+
+                       /* update current buffer's src and len */
+                       sdcp->user_buf_desc->src += bytes_borrowed;
+                       sdcp->user_buf_desc->len -= bytes_borrowed;
+       }
+
+       /* Is current buffer unaligned to 64 byte length?
+         * Each buffer's length must be a multiple of 64 bytes for DCP
+         */
+       rem_bytes = sdcp->user_buf_desc->len % 64;
+
+       /* if length is unaligned, copy remainder to tail */
+       if (rem_bytes) {
+
+               memcpy(op->tail_desc->src, (sdcp->user_buf_desc->src +
+                       sdcp->user_buf_desc->len - rem_bytes),
+                       rem_bytes);
+
+               /* update length of current buffer */
+               sdcp->user_buf_desc->len -= rem_bytes;
+
+               op->tail_desc->len = rem_bytes;
+       }
+
+       /* do not send to DCP if length is < 64 */
+       if ((op->head_desc->len + sdcp->user_buf_desc->len) >= 64) {
+
+               /* set hash alg to be used (SHA1 or SHA256) */
+               op->head_desc->hash_sel = hash_sel;
+               sdcp->user_buf_desc->hash_sel = hash_sel;
+
+               if (op->head_desc->len) {
+                       op->head_desc->next = sdcp->user_buf_desc;
+
+                       ret = dcp_perform_hash_op(op->head_desc,
+                               sdcp->user_buf_desc->len ? 2 : 1,
+                               op->length == 0, false);
+               } else {
+                       ret = dcp_perform_hash_op(sdcp->user_buf_desc, 1,
+                               op->length == 0, false);
+               }
+
+               op->length += op->head_desc->len + sdcp->user_buf_desc->len;
+               op->head_desc->len = 0;
+       }
+
+       /* if tail has bytes, make it the head for next time */
+       if (op->tail_desc->len) {
+               temp = op->head_desc;
+               op->head_desc = op->tail_desc;
+               op->tail_desc = temp;
+       }
+
+       /* hash_sel to be used by final function */
+       op->head_desc->hash_sel = hash_sel;
+
+       return ret;
+}
+
+static int dcp_sha_final(struct shash_desc *desc, u8 *out)
+{
+       struct dcp_hash_op *op = shash_desc_ctx(desc);
+       const uint8_t *digest;
+       struct dcp *sdcp = global_sdcp;
+       u32 i, digest_len;
+       struct mutex *mutex = &sdcp->op_mutex[HASH_CHAN];
+       int ret = 0;
+
+       /* Send the leftover bytes in head, which can be length 0,
+         * but DCP still produces hash result in payload ptr.
+         * Last data bytes need not be 64-byte multiple.
+         */
+       ret = dcp_perform_hash_op(op->head_desc, 1, op->length == 0, true);
+
+       op->length += op->head_desc->len;
+
+       digest_len = (op->head_desc->hash_sel ==
+               BV_DCP_PACKET2_HASH_SELECT__SHA1) ? SHA1_DIGEST_SIZE :
+               SHA256_DIGEST_SIZE;
+
+       /* hardware reverses the digest (for some unexplicable reason) */
+       digest = op->head_desc->digest + digest_len;
+       for (i = 0; i < digest_len; i++)
+               *out++ = *--digest;
+
+       sdcp->chan_in_use[HASH_CHAN] = false;
+       dcp_clock(sdcp, CLOCK_OFF, false);
+       mutex_unlock(mutex);
+
+       return ret;
+}
+
+static struct shash_alg dcp_sha1_alg = {
+       .init                   =       dcp_sha_init,
+       .update                 =       dcp_sha_update,
+       .final                  =       dcp_sha_final,
+       .descsize               =       sizeof(struct dcp_hash_op),
+       .digestsize             =       SHA1_DIGEST_SIZE,
+       .base                   =       {
+               .cra_name               =       "sha1",
+               .cra_driver_name        =       "sha1-dcp",
+               .cra_priority           =       300,
+               .cra_blocksize          =       SHA1_BLOCK_SIZE,
+               .cra_ctxsize            =
+                       sizeof(struct dcp_hash_op),
+               .cra_module             =       THIS_MODULE,
+       }
+};
+
+static struct shash_alg dcp_sha256_alg = {
+       .init                   =       dcp_sha_init,
+       .update                 =       dcp_sha_update,
+       .final                  =       dcp_sha_final,
+       .descsize               =       sizeof(struct dcp_hash_op),
+       .digestsize             =       SHA256_DIGEST_SIZE,
+       .base                   =       {
+               .cra_name               =       "sha256",
+               .cra_driver_name        =       "sha256-dcp",
+               .cra_priority           =       300,
+               .cra_blocksize          =       SHA256_BLOCK_SIZE,
+               .cra_ctxsize            =
+                       sizeof(struct dcp_hash_op),
+               .cra_module             =       THIS_MODULE,
+       }
+};
+
+static irqreturn_t dcp_common_irq(int irq, void *context)
+{
+       struct dcp *sdcp = context;
+       u32 msk;
+
+       /* check */
+       msk = __raw_readl(sdcp->dcp_regs_base + HW_DCP_STAT) &
+               BF(0x0f, DCP_STAT_IRQ);
+       if (msk == 0)
+               return IRQ_NONE;
+
+       /* clear this channel */
+       __raw_writel(msk, sdcp->dcp_regs_base + HW_DCP_STAT_CLR);
+       if (msk & BF(0x01, DCP_STAT_IRQ))
+               sdcp->wait[0]++;
+       if (msk & BF(0x02, DCP_STAT_IRQ))
+               sdcp->wait[1]++;
+       if (msk & BF(0x04, DCP_STAT_IRQ))
+               sdcp->wait[2]++;
+       if (msk & BF(0x08, DCP_STAT_IRQ))
+               sdcp->wait[3]++;
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t dcp_vmi_irq(int irq, void *context)
+{
+       return dcp_common_irq(irq, context);
+}
+
+static irqreturn_t dcp_irq(int irq, void *context)
+{
+       return dcp_common_irq(irq, context);
+}
+
+/* DCP bootstream verification interface: uses OTP key for crypto */
+static int dcp_bootstream_ioctl(struct inode *inode, struct file *file,
+                                        unsigned int cmd, unsigned long arg)
+{
+       struct dcp *sdcp = global_sdcp;
+       struct dcpboot_dma_area *da = sdcp->dcpboot_dma_area;
+       void __user *argp = (void __user *)arg;
+       int chan = ROM_DCP_CHAN;
+       unsigned long timeout;
+       struct mutex *mutex;
+       int retVal;
+
+       /* be paranoid */
+       if (sdcp == NULL)
+               return -EBADF;
+
+       if (cmd != DBS_ENC && cmd != DBS_DEC)
+               return -EINVAL;
+
+       /* copy to (aligned) block */
+       if (copy_from_user(da->block, argp, 16))
+               return -EFAULT;
+
+       mutex = &sdcp->op_mutex[chan];
+       mutex_lock(mutex);
+       dcp_clock(sdcp, CLOCK_ON, false);
+       sdcp->chan_in_use[chan] = true;
+
+       __raw_writel(-1, sdcp->dcp_regs_base +
+               HW_DCP_CHnSTAT_CLR(ROM_DCP_CHAN));
+       __raw_writel(BF(ROM_DCP_CHAN_MASK, DCP_STAT_IRQ),
+               sdcp->dcp_regs_base + HW_DCP_STAT_CLR);
+
+       da->hw_packet.pNext = 0;
+       da->hw_packet.pkt1 = BM_DCP_PACKET1_DECR_SEMAPHORE |
+           BM_DCP_PACKET1_ENABLE_CIPHER | BM_DCP_PACKET1_OTP_KEY |
+           BM_DCP_PACKET1_INTERRUPT |
+           (cmd == DBS_ENC ? BM_DCP_PACKET1_CIPHER_ENCRYPT : 0);
+       da->hw_packet.pkt2 = BF(0, DCP_PACKET2_CIPHER_CFG) |
+           BF(0, DCP_PACKET2_KEY_SELECT) |
+           BF(BV_DCP_PACKET2_CIPHER_MODE__ECB, DCP_PACKET2_CIPHER_MODE) |
+           BF(BV_DCP_PACKET2_CIPHER_SELECT__AES128, DCP_PACKET2_CIPHER_SELECT);
+       da->hw_packet.pSrc = sdcp->dcpboot_dma_area_phys +
+           offsetof(struct dcpboot_dma_area, block);
+       da->hw_packet.pDst = da->hw_packet.pSrc;        /* in-place */
+       da->hw_packet.size = 16;
+       da->hw_packet.pPayload = 0;
+       da->hw_packet.stat = 0;
+
+       /* Load the work packet pointer and bump the channel semaphore */
+       __raw_writel(sdcp->dcpboot_dma_area_phys +
+                    offsetof(struct dcpboot_dma_area, hw_packet),
+                    sdcp->dcp_regs_base + HW_DCP_CHnCMDPTR(ROM_DCP_CHAN));
+
+       sdcp->wait[chan] = 0;
+       __raw_writel(BF(1, DCP_CHnSEMA_INCREMENT),
+                    sdcp->dcp_regs_base + HW_DCP_CHnSEMA(ROM_DCP_CHAN));
+
+       timeout = jiffies + msecs_to_jiffies(100);
+
+       while (time_before(jiffies, timeout) && sdcp->wait[chan] == 0)
+               cpu_relax();
+
+       if (!time_before(jiffies, timeout)) {
+               dev_err(sdcp->dev,
+                       "Timeout while waiting for operation to complete\n");
+               retVal = -ETIMEDOUT;
+               goto exit;
+       }
+
+       if ((__raw_readl(sdcp->dcp_regs_base + HW_DCP_CHnSTAT(ROM_DCP_CHAN))
+                       & 0xff) != 0) {
+               dev_err(sdcp->dev, "Channel stat error 0x%02x\n",
+                       __raw_readl(sdcp->dcp_regs_base +
+                                   HW_DCP_CHnSTAT(ROM_DCP_CHAN)) & 0xff);
+               retVal = -EFAULT;
+               goto exit;
+       }
+
+       if (copy_to_user(argp, da->block, 16)) {
+               retVal =  -EFAULT;
+               goto exit;
+       }
+
+       retVal = 0;
+
+exit:
+       sdcp->chan_in_use[chan] = false;
+       dcp_clock(sdcp, CLOCK_OFF, false);
+       mutex_unlock(mutex);
+       return retVal;
+}
+
+static const struct file_operations dcp_bootstream_fops = {
+       .owner =        THIS_MODULE,
+       .unlocked_ioctl =       dcp_bootstream_ioctl,
+};
+
+static struct miscdevice dcp_bootstream_misc = {
+       .minor  = MISC_DYNAMIC_MINOR,
+       .name   = "dcpboot",
+       .fops = &dcp_bootstream_fops,
+};
+
+static int dcp_probe(struct platform_device *pdev)
+{
+       struct dcp *sdcp = NULL;
+       struct resource *r;
+       int i, ret;
+       dma_addr_t hw_phys;
+
+       if (global_sdcp != NULL) {
+               dev_err(&pdev->dev, "Only one instance allowed\n");
+               ret = -ENODEV;
+               goto err;
+       }
+
+       /* allocate memory */
+       sdcp = kzalloc(sizeof(*sdcp), GFP_KERNEL);
+       if (sdcp == NULL) {
+               dev_err(&pdev->dev, "Failed to allocate structure\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       sdcp->dev = &pdev->dev;
+       spin_lock_init(&sdcp->lock);
+
+       for (i = 0; i < DCP_NUM_CHANNELS; i++) {
+               mutex_init(&sdcp->op_mutex[i]);
+               init_completion(&sdcp->op_wait[i]);
+               sdcp->chan_in_use[i] = false;
+       }
+
+       platform_set_drvdata(pdev, sdcp);
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!r) {
+               dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n");
+               ret = -ENXIO;
+               goto err_kfree;
+       }
+       sdcp->dcp_regs_base = (u32) ioremap(r->start, r->end - r->start + 1);
+       dcp_clock(sdcp, CLOCK_ON, true);
+
+       /* Soft reset and remove the clock gate */
+       __raw_writel(BM_DCP_CTRL_SFTRST, sdcp->dcp_regs_base + HW_DCP_CTRL_SET);
+
+       /* At 24Mhz, it takes no more than 4 clocks (160 ns) Maximum for
+        * the part to reset, reading the register twice should
+        * be sufficient to get 4 clks delay.
+        */
+       __raw_readl(sdcp->dcp_regs_base + HW_DCP_CTRL);
+       __raw_readl(sdcp->dcp_regs_base + HW_DCP_CTRL);
+
+       __raw_writel(BM_DCP_CTRL_SFTRST | BM_DCP_CTRL_CLKGATE,
+               sdcp->dcp_regs_base + HW_DCP_CTRL_CLR);
+
+       /* Initialize control registers */
+       __raw_writel(DCP_CTRL_INIT, sdcp->dcp_regs_base + HW_DCP_CTRL);
+       __raw_writel(DCP_CHANNELCTRL_INIT, sdcp->dcp_regs_base +
+               HW_DCP_CHANNELCTRL);
+
+       /* We do not enable context switching. Give the context
+        * buffer pointer an illegal address so if context switching is
+        * inadvertantly enabled, the dcp will return an error instead of
+        * trashing good memory. The dcp dma cannot access rom, so any rom
+        * address will do.
+        */
+       __raw_writel(0xFFFF0000, sdcp->dcp_regs_base + HW_DCP_CONTEXT);
+
+       for (i = 0; i < DCP_NUM_CHANNELS; i++)
+               __raw_writel(-1, sdcp->dcp_regs_base + HW_DCP_CHnSTAT_CLR(i));
+       __raw_writel(-1, sdcp->dcp_regs_base + HW_DCP_STAT_CLR);
+
+       r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!r) {
+               dev_err(&pdev->dev, "can't get IRQ resource (0)\n");
+               ret = -EIO;
+               goto err_gate_clk;
+       }
+       sdcp->dcp_vmi_irq = r->start;
+       ret = request_irq(sdcp->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp",
+                               sdcp);
+       if (ret != 0) {
+               dev_err(&pdev->dev, "can't request_irq (0)\n");
+               goto err_gate_clk;
+       }
+
+       r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+       if (!r) {
+               dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
+               ret = -EIO;
+               goto err_free_irq0;
+       }
+       sdcp->dcp_irq = r->start;
+       ret = request_irq(sdcp->dcp_irq, dcp_irq, 0, "dcp", sdcp);
+       if (ret != 0) {
+               dev_err(&pdev->dev, "can't request_irq (1)\n");
+               goto err_free_irq0;
+       }
+
+       global_sdcp = sdcp;
+
+       ret = crypto_register_alg(&dcp_aes_alg);
+       if (ret != 0)  {
+               dev_err(&pdev->dev, "Failed to register aes crypto\n");
+               goto err_free_irq1;
+       }
+
+       ret = crypto_register_alg(&dcp_aes_ecb_alg);
+       if (ret != 0)  {
+               dev_err(&pdev->dev, "Failed to register aes ecb crypto\n");
+               goto err_unregister_aes;
+       }
+
+       ret = crypto_register_alg(&dcp_aes_cbc_alg);
+       if (ret != 0)  {
+               dev_err(&pdev->dev, "Failed to register aes cbc crypto\n");
+               goto err_unregister_aes_ecb;
+       }
+
+       /* Allocate the descriptor to be used for user buffer
+         * passed in by the "update" function from Crypto API
+         */
+       sdcp->user_buf_desc = dma_alloc_coherent(sdcp->dev,
+               sizeof(struct dcp_hash_coherent_block),  &hw_phys,
+               GFP_KERNEL);
+       if (sdcp->user_buf_desc == NULL) {
+               printk(KERN_ERR "Error allocating coherent block\n");
+               ret = -ENOMEM;
+               goto err_unregister_aes_cbc;
+       }
+
+       sdcp->user_buf_desc->my_phys = hw_phys;
+
+       /* Allocate 2 buffers (head & tail) & its descriptors to deal with
+         * buffer lengths that are not 64 byte aligned, except for the
+         * last one.
+         */
+       sdcp->buf1 = dma_alloc_coherent(sdcp->dev,
+               64, &sdcp->buf1_phys, GFP_KERNEL);
+       if (sdcp->buf1 == NULL) {
+               printk(KERN_ERR "Error allocating coherent block\n");
+               ret = -ENOMEM;
+               goto err_unregister_aes_cbc;
+       }
+
+       sdcp->buf2 = dma_alloc_coherent(sdcp->dev,
+               64,  &sdcp->buf2_phys, GFP_KERNEL);
+       if (sdcp->buf2 == NULL) {
+               printk(KERN_ERR "Error allocating coherent block\n");
+               ret = -ENOMEM;
+               goto err_unregister_aes_cbc;
+       }
+
+       sdcp->buf1_desc = dma_alloc_coherent(sdcp->dev,
+               sizeof(struct dcp_hash_coherent_block), &hw_phys,
+               GFP_KERNEL);
+       if (sdcp->buf1_desc == NULL) {
+               printk(KERN_ERR "Error allocating coherent block\n");
+               ret = -ENOMEM;
+               goto err_unregister_aes_cbc;
+       }
+
+       sdcp->buf1_desc->my_phys = hw_phys;
+       sdcp->buf1_desc->src = (void *)sdcp->buf1;
+       sdcp->buf1_desc->src_phys = sdcp->buf1_phys;
+
+       sdcp->buf2_desc = dma_alloc_coherent(sdcp->dev,
+               sizeof(struct dcp_hash_coherent_block), &hw_phys,
+               GFP_KERNEL);
+       if (sdcp->buf2_desc == NULL) {
+               printk(KERN_ERR "Error allocating coherent block\n");
+               ret = -ENOMEM;
+               goto err_unregister_aes_cbc;
+       }
+
+       sdcp->buf2_desc->my_phys = hw_phys;
+       sdcp->buf2_desc->src = (void *)sdcp->buf2;
+       sdcp->buf2_desc->src_phys = sdcp->buf2_phys;
+
+
+       ret = crypto_register_shash(&dcp_sha1_alg);
+       if (ret != 0)  {
+               dev_err(&pdev->dev, "Failed to register sha1 hash\n");
+               goto err_unregister_aes_cbc;
+       }
+
+       if (__raw_readl(sdcp->dcp_regs_base + HW_DCP_CAPABILITY1) &
+               BF_DCP_CAPABILITY1_HASH_ALGORITHMS(
+               BV_DCP_CAPABILITY1_HASH_ALGORITHMS__SHA256)) {
+
+               ret = crypto_register_shash(&dcp_sha256_alg);
+               if (ret != 0)  {
+                       dev_err(&pdev->dev, "Failed to register sha256 hash\n");
+                       goto err_unregister_sha1;
+               }
+       }
+
+       /* register dcpboot interface to allow apps (such as kobs-ng) to
+        * verify files (such as the bootstream) using the OTP key for crypto */
+       ret = misc_register(&dcp_bootstream_misc);
+       if (ret != 0) {
+               dev_err(&pdev->dev, "Unable to register misc device\n");
+               goto err_unregister_sha1;
+       }
+
+       sdcp->dcpboot_dma_area = dma_alloc_coherent(&pdev->dev,
+               sizeof(*sdcp->dcpboot_dma_area), &sdcp->dcpboot_dma_area_phys,
+               GFP_KERNEL);
+       if (sdcp->dcpboot_dma_area == NULL) {
+               dev_err(&pdev->dev,
+                       "Unable to allocate DMAable memory \
+                        for dcpboot interface\n");
+               goto err_dereg;
+       }
+
+       dcp_clock(sdcp, CLOCK_OFF, false);
+       dev_notice(&pdev->dev, "DCP crypto enabled.!\n");
+       return 0;
+
+err_dereg:
+       misc_deregister(&dcp_bootstream_misc);
+err_unregister_sha1:
+       crypto_unregister_shash(&dcp_sha1_alg);
+err_unregister_aes_cbc:
+       crypto_unregister_alg(&dcp_aes_cbc_alg);
+err_unregister_aes_ecb:
+       crypto_unregister_alg(&dcp_aes_ecb_alg);
+err_unregister_aes:
+       crypto_unregister_alg(&dcp_aes_alg);
+err_free_irq1:
+       free_irq(sdcp->dcp_irq, sdcp);
+err_free_irq0:
+       free_irq(sdcp->dcp_vmi_irq, sdcp);
+err_gate_clk:
+       dcp_clock(sdcp, CLOCK_OFF, false);
+err_kfree:
+       kfree(sdcp);
+err:
+
+       return ret;
+}
+
+static int dcp_remove(struct platform_device *pdev)
+{
+       struct dcp *sdcp;
+
+       sdcp = platform_get_drvdata(pdev);
+       platform_set_drvdata(pdev, NULL);
+
+       dcp_clock(sdcp, CLOCK_ON, false);
+
+       free_irq(sdcp->dcp_irq, sdcp);
+       free_irq(sdcp->dcp_vmi_irq, sdcp);
+
+       /* if head and tail buffers were allocated, free them */
+       if (sdcp->buf1) {
+               dma_free_coherent(sdcp->dev, 64, sdcp->buf1, sdcp->buf1_phys);
+               dma_free_coherent(sdcp->dev, 64, sdcp->buf2, sdcp->buf2_phys);
+
+               dma_free_coherent(sdcp->dev,
+                               sizeof(struct dcp_hash_coherent_block),
+                               sdcp->buf1_desc, sdcp->buf1_desc->my_phys);
+
+               dma_free_coherent(sdcp->dev,
+                               sizeof(struct dcp_hash_coherent_block),
+                               sdcp->buf2_desc, sdcp->buf2_desc->my_phys);
+
+               dma_free_coherent(sdcp->dev,
+                       sizeof(struct dcp_hash_coherent_block),
+                       sdcp->user_buf_desc, sdcp->user_buf_desc->my_phys);
+       }
+
+       if (sdcp->dcpboot_dma_area) {
+               dma_free_coherent(&pdev->dev, sizeof(*sdcp->dcpboot_dma_area),
+                         sdcp->dcpboot_dma_area, sdcp->dcpboot_dma_area_phys);
+               misc_deregister(&dcp_bootstream_misc);
+       }
+
+
+       crypto_unregister_shash(&dcp_sha1_alg);
+
+       if (__raw_readl(sdcp->dcp_regs_base + HW_DCP_CAPABILITY1) &
+               BF_DCP_CAPABILITY1_HASH_ALGORITHMS(
+               BV_DCP_CAPABILITY1_HASH_ALGORITHMS__SHA256))
+               crypto_unregister_shash(&dcp_sha256_alg);
+
+       crypto_unregister_alg(&dcp_aes_cbc_alg);
+       crypto_unregister_alg(&dcp_aes_ecb_alg);
+       crypto_unregister_alg(&dcp_aes_alg);
+
+       dcp_clock(sdcp, CLOCK_OFF, true);
+       iounmap((void *) sdcp->dcp_regs_base);
+       kfree(sdcp);
+       global_sdcp = NULL;
+
+       return 0;
+}
+
+static int dcp_suspend(struct platform_device *pdev,
+               pm_message_t state)
+{
+#ifdef CONFIG_PM
+       struct dcp *sdcp = platform_get_drvdata(pdev);
+
+       if (sdcp->clock_state == CLOCK_ON) {
+               dcp_clock(sdcp, CLOCK_OFF, true);
+               /* indicate that clock needs to be turned on upon resume */
+               sdcp->clock_state = CLOCK_ON;
+       }
+#endif
+       return 0;
+}
+
+static int dcp_resume(struct platform_device *pdev)
+{
+#ifdef CONFIG_PM
+       struct dcp *sdcp = platform_get_drvdata(pdev);
+
+       /* if clock was on prior to suspend, turn it back on */
+       if (sdcp->clock_state == CLOCK_ON)
+               dcp_clock(sdcp, CLOCK_ON, true);
+#endif
+       return 0;
+}
+
+static struct platform_driver dcp_driver = {
+       .probe          = dcp_probe,
+       .remove         = dcp_remove,
+       .suspend        = dcp_suspend,
+       .resume         = dcp_resume,
+       .driver         = {
+               .name   = "dcp",
+               .owner  = THIS_MODULE,
+       },
+};
+
+static int __init
+dcp_init(void)
+{
+       return platform_driver_register(&dcp_driver);
+}
+
+static void __exit
+dcp_exit(void)
+{
+       platform_driver_unregister(&dcp_driver);
+}
+
+MODULE_AUTHOR("Pantelis Antoniou <pantelis@embeddedalley.com>");
+MODULE_DESCRIPTION("DCP Crypto Driver");
+MODULE_LICENSE("GPL");
+
+module_init(dcp_init);
+module_exit(dcp_exit);
diff --git a/drivers/crypto/dcp.h b/drivers/crypto/dcp.h
new file mode 100644 (file)
index 0000000..f96b9ea
--- /dev/null
@@ -0,0 +1,717 @@
+/*
+ * Copyright 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#ifndef DCP_H_
+#define DCP_H_
+
+#define CIPHER_CHAN    1
+#define CIPHER_MASK    (1 << CIPHER_CHAN)
+
+#define HASH_CHAN      0
+#define HASH_MASK      (1 << HASH_CHAN)
+
+/* DCP boostream interface uses this channel (same as the ROM) */
+#define ROM_DCP_CHAN 3
+#define ROM_DCP_CHAN_MASK (1 << ROM_DCP_CHAN)
+
+
+#define ALL_MASK       (CIPHER_MASK | HASH_MASK | ROM_DCP_CHAN_MASK)
+
+/* Defines the initialization value for the dcp control register */
+#define DCP_CTRL_INIT \
+   (BM_DCP_CTRL_GATHER_RESIDUAL_WRITES | \
+    BM_DCP_CTRL_ENABLE_CONTEXT_CACHING | \
+    BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH0 | \
+    BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH1 | \
+    BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH2 | \
+    BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH3)
+
+/* Defines the initialization value for the dcp channel control register */
+#define DCP_CHANNELCTRL_INIT \
+    BF(ALL_MASK, DCP_CHANNELCTRL_ENABLE_CHANNEL)
+
+/* DCP work packet 1 value for encryption */
+#define DCP_PKT1_ENCRYPT \
+   (BM_DCP_PACKET1_DECR_SEMAPHORE | \
+    BM_DCP_PACKET1_ENABLE_CIPHER | \
+    BM_DCP_PACKET1_CIPHER_ENCRYPT | \
+    BM_DCP_PACKET1_CIPHER_INIT)
+
+/* DCP work packet 1 value for decryption */
+#define DCP_PKT1_DECRYPT \
+   (BM_DCP_PACKET1_DECR_SEMAPHORE | \
+    BM_DCP_PACKET1_ENABLE_CIPHER | \
+    BM_DCP_PACKET1_CIPHER_INIT)
+
+/* DCP (decryption) work packet definition */
+struct dcp_hw_packet {
+       uint32_t pNext;     /* next dcp work packet address */
+       uint32_t pkt1;      /* dcp work packet 1 (control 0) */
+       uint32_t pkt2;      /* dcp work packet 2 (control 1) */
+       uint32_t pSrc;      /* source buffer address */
+       uint32_t pDst;      /* destination buffer address */
+       uint32_t size;      /* buffer size in bytes */
+       uint32_t pPayload;  /* payload buffer address */
+       uint32_t stat;      /* dcp status (written by dcp) */
+};
+
+#define DCP_NUM_CHANNELS 4
+
+/* DCP Register definitions */
+
+#ifndef BF
+#define BF(value, field) (((value) << BP_##field) & BM_##field)
+#endif
+
+#define REGS_DCP_SIZE 0x00002000
+
+#define HW_DCP_CTRL    (0x00000000)
+#define HW_DCP_CTRL_SET        (0x00000004)
+#define HW_DCP_CTRL_CLR        (0x00000008)
+#define HW_DCP_CTRL_TOG        (0x0000000c)
+
+#define BM_DCP_CTRL_SFTRST     0x80000000
+#define BM_DCP_CTRL_CLKGATE    0x40000000
+#define BM_DCP_CTRL_PRESENT_CRYPTO     0x20000000
+#define BV_DCP_CTRL_PRESENT_CRYPTO__Present 0x1
+#define BV_DCP_CTRL_PRESENT_CRYPTO__Absent  0x0
+#define BM_DCP_CTRL_PRESENT_CSC        0x10000000
+#define BV_DCP_CTRL_PRESENT_CSC__Present 0x1
+#define BV_DCP_CTRL_PRESENT_CSC__Absent  0x0
+#define BP_DCP_CTRL_RSVD1      24
+#define BM_DCP_CTRL_RSVD1      0x0F000000
+#define BF_DCP_CTRL_RSVD1(v)  \
+               (((v) << 24) & BM_DCP_CTRL_RSVD1)
+#define BM_DCP_CTRL_GATHER_RESIDUAL_WRITES     0x00800000
+#define BM_DCP_CTRL_ENABLE_CONTEXT_CACHING     0x00400000
+#define BM_DCP_CTRL_ENABLE_CONTEXT_SWITCHING   0x00200000
+#define BP_DCP_CTRL_RSVD0      9
+#define BM_DCP_CTRL_RSVD0      0x001FFE00
+#define BF_DCP_CTRL_RSVD0(v)  \
+               (((v) << 9) & BM_DCP_CTRL_RSVD0)
+#define BM_DCP_CTRL_CSC_INTERRUPT_ENABLE       0x00000100
+#define BP_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE   0
+#define BM_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE   0x000000FF
+#define BF_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE(v)  \
+               (((v) << 0) & BM_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE)
+#define BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH0 0x01
+#define BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH1 0x02
+#define BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH2 0x04
+#define BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH3 0x08
+
+#define HW_DCP_STAT    (0x00000010)
+#define HW_DCP_STAT_SET        (0x00000014)
+#define HW_DCP_STAT_CLR        (0x00000018)
+#define HW_DCP_STAT_TOG        (0x0000001c)
+
+#define BP_DCP_STAT_RSVD2      29
+#define BM_DCP_STAT_RSVD2      0xE0000000
+#define BF_DCP_STAT_RSVD2(v) \
+               (((v) << 29) & BM_DCP_STAT_RSVD2)
+#define BM_DCP_STAT_OTP_KEY_READY      0x10000000
+#define BP_DCP_STAT_CUR_CHANNEL        24
+#define BM_DCP_STAT_CUR_CHANNEL        0x0F000000
+#define BF_DCP_STAT_CUR_CHANNEL(v)  \
+               (((v) << 24) & BM_DCP_STAT_CUR_CHANNEL)
+#define BV_DCP_STAT_CUR_CHANNEL__None 0x0
+#define BV_DCP_STAT_CUR_CHANNEL__CH0  0x1
+#define BV_DCP_STAT_CUR_CHANNEL__CH1  0x2
+#define BV_DCP_STAT_CUR_CHANNEL__CH2  0x3
+#define BV_DCP_STAT_CUR_CHANNEL__CH3  0x4
+#define BV_DCP_STAT_CUR_CHANNEL__CSC  0x8
+#define BP_DCP_STAT_READY_CHANNELS     16
+#define BM_DCP_STAT_READY_CHANNELS     0x00FF0000
+#define BF_DCP_STAT_READY_CHANNELS(v)  \
+               (((v) << 16) & BM_DCP_STAT_READY_CHANNELS)
+#define BV_DCP_STAT_READY_CHANNELS__CH0 0x01
+#define BV_DCP_STAT_READY_CHANNELS__CH1 0x02
+#define BV_DCP_STAT_READY_CHANNELS__CH2 0x04
+#define BV_DCP_STAT_READY_CHANNELS__CH3 0x08
+#define BP_DCP_STAT_RSVD1      9
+#define BM_DCP_STAT_RSVD1      0x0000FE00
+#define BF_DCP_STAT_RSVD1(v)  \
+               (((v) << 9) & BM_DCP_STAT_RSVD1)
+#define BM_DCP_STAT_CSCIRQ     0x00000100
+#define BP_DCP_STAT_RSVD0      4
+#define BM_DCP_STAT_RSVD0      0x000000F0
+#define BF_DCP_STAT_RSVD0(v)  \
+               (((v) << 4) & BM_DCP_STAT_RSVD0)
+#define BP_DCP_STAT_IRQ        0
+#define BM_DCP_STAT_IRQ        0x0000000F
+#define BF_DCP_STAT_IRQ(v)  \
+               (((v) << 0) & BM_DCP_STAT_IRQ)
+
+#define HW_DCP_CHANNELCTRL     (0x00000020)
+#define HW_DCP_CHANNELCTRL_SET (0x00000024)
+#define HW_DCP_CHANNELCTRL_CLR (0x00000028)
+#define HW_DCP_CHANNELCTRL_TOG (0x0000002c)
+
+#define BP_DCP_CHANNELCTRL_RSVD        19
+#define BM_DCP_CHANNELCTRL_RSVD        0xFFF80000
+#define BF_DCP_CHANNELCTRL_RSVD(v) \
+               (((v) << 19) & BM_DCP_CHANNELCTRL_RSVD)
+#define BP_DCP_CHANNELCTRL_CSC_PRIORITY        17
+#define BM_DCP_CHANNELCTRL_CSC_PRIORITY        0x00060000
+#define BF_DCP_CHANNELCTRL_CSC_PRIORITY(v)  \
+               (((v) << 17) & BM_DCP_CHANNELCTRL_CSC_PRIORITY)
+#define BV_DCP_CHANNELCTRL_CSC_PRIORITY__HIGH       0x3
+#define BV_DCP_CHANNELCTRL_CSC_PRIORITY__MED        0x2
+#define BV_DCP_CHANNELCTRL_CSC_PRIORITY__LOW        0x1
+#define BV_DCP_CHANNELCTRL_CSC_PRIORITY__BACKGROUND 0x0
+#define BM_DCP_CHANNELCTRL_CH0_IRQ_MERGED      0x00010000
+#define BP_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL       8
+#define BM_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL       0x0000FF00
+#define BF_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL(v)  \
+               (((v) << 8) & BM_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL)
+#define BV_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL__CH0 0x01
+#define BV_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL__CH1 0x02
+#define BV_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL__CH2 0x04
+#define BV_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL__CH3 0x08
+#define BP_DCP_CHANNELCTRL_ENABLE_CHANNEL      0
+#define BM_DCP_CHANNELCTRL_ENABLE_CHANNEL      0x000000FF
+#define BF_DCP_CHANNELCTRL_ENABLE_CHANNEL(v)  \
+               (((v) << 0) & BM_DCP_CHANNELCTRL_ENABLE_CHANNEL)
+#define BV_DCP_CHANNELCTRL_ENABLE_CHANNEL__CH0 0x01
+#define BV_DCP_CHANNELCTRL_ENABLE_CHANNEL__CH1 0x02
+#define BV_DCP_CHANNELCTRL_ENABLE_CHANNEL__CH2 0x04
+#define BV_DCP_CHANNELCTRL_ENABLE_CHANNEL__CH3 0x08
+
+#define HW_DCP_CAPABILITY0     (0x00000030)
+
+#define BM_DCP_CAPABILITY0_DISABLE_DECRYPT     0x80000000
+#define BM_DCP_CAPABILITY0_ENABLE_TZONE        0x40000000
+#define BP_DCP_CAPABILITY0_RSVD        12
+#define BM_DCP_CAPABILITY0_RSVD        0x3FFFF000
+#define BF_DCP_CAPABILITY0_RSVD(v)  \
+               (((v) << 12) & BM_DCP_CAPABILITY0_RSVD)
+#define BP_DCP_CAPABILITY0_NUM_CHANNELS        8
+#define BM_DCP_CAPABILITY0_NUM_CHANNELS        0x00000F00
+#define BF_DCP_CAPABILITY0_NUM_CHANNELS(v)  \
+               (((v) << 8) & BM_DCP_CAPABILITY0_NUM_CHANNELS)
+#define BP_DCP_CAPABILITY0_NUM_KEYS    0
+#define BM_DCP_CAPABILITY0_NUM_KEYS    0x000000FF
+#define BF_DCP_CAPABILITY0_NUM_KEYS(v)  \
+               (((v) << 0) & BM_DCP_CAPABILITY0_NUM_KEYS)
+
+#define HW_DCP_CAPABILITY1     (0x00000040)
+
+#define BP_DCP_CAPABILITY1_HASH_ALGORITHMS     16
+#define BM_DCP_CAPABILITY1_HASH_ALGORITHMS     0xFFFF0000
+#define BF_DCP_CAPABILITY1_HASH_ALGORITHMS(v) \
+               (((v) << 16) & BM_DCP_CAPABILITY1_HASH_ALGORITHMS)
+#define BV_DCP_CAPABILITY1_HASH_ALGORITHMS__SHA1  0x0001
+#define BV_DCP_CAPABILITY1_HASH_ALGORITHMS__CRC32 0x0002
+#define BV_DCP_CAPABILITY1_HASH_ALGORITHMS__SHA256  0x0004
+#define BP_DCP_CAPABILITY1_CIPHER_ALGORITHMS   0
+#define BM_DCP_CAPABILITY1_CIPHER_ALGORITHMS   0x0000FFFF
+#define BF_DCP_CAPABILITY1_CIPHER_ALGORITHMS(v)  \
+               (((v) << 0) & BM_DCP_CAPABILITY1_CIPHER_ALGORITHMS)
+#define BV_DCP_CAPABILITY1_CIPHER_ALGORITHMS__AES128 0x0001
+
+#define HW_DCP_CONTEXT (0x00000050)
+
+#define BP_DCP_CONTEXT_ADDR    0
+#define BM_DCP_CONTEXT_ADDR    0xFFFFFFFF
+#define BF_DCP_CONTEXT_ADDR(v) (v)
+
+#define HW_DCP_KEY     (0x00000060)
+
+#define BP_DCP_KEY_RSVD        8
+#define BM_DCP_KEY_RSVD        0xFFFFFF00
+#define BF_DCP_KEY_RSVD(v) \
+               (((v) << 8) & BM_DCP_KEY_RSVD)
+#define BP_DCP_KEY_RSVD_INDEX  6
+#define BM_DCP_KEY_RSVD_INDEX  0x000000C0
+#define BF_DCP_KEY_RSVD_INDEX(v)  \
+               (((v) << 6) & BM_DCP_KEY_RSVD_INDEX)
+#define BP_DCP_KEY_INDEX       4
+#define BM_DCP_KEY_INDEX       0x00000030
+#define BF_DCP_KEY_INDEX(v)  \
+               (((v) << 4) & BM_DCP_KEY_INDEX)
+#define BP_DCP_KEY_RSVD_SUBWORD        2
+#define BM_DCP_KEY_RSVD_SUBWORD        0x0000000C
+#define BF_DCP_KEY_RSVD_SUBWORD(v)  \
+               (((v) << 2) & BM_DCP_KEY_RSVD_SUBWORD)
+#define BP_DCP_KEY_SUBWORD     0
+#define BM_DCP_KEY_SUBWORD     0x00000003
+#define BF_DCP_KEY_SUBWORD(v)  \
+               (((v) << 0) & BM_DCP_KEY_SUBWORD)
+
+#define HW_DCP_KEYDATA (0x00000070)
+
+#define BP_DCP_KEYDATA_DATA    0
+#define BM_DCP_KEYDATA_DATA    0xFFFFFFFF
+#define BF_DCP_KEYDATA_DATA(v) (v)
+
+#define HW_DCP_PACKET0 (0x00000080)
+
+#define BP_DCP_PACKET0_ADDR    0
+#define BM_DCP_PACKET0_ADDR    0xFFFFFFFF
+#define BF_DCP_PACKET0_ADDR(v) (v)
+
+#define HW_DCP_PACKET1 (0x00000090)
+
+#define BP_DCP_PACKET1_TAG     24
+#define BM_DCP_PACKET1_TAG     0xFF000000
+#define BF_DCP_PACKET1_TAG(v) \
+               (((v) << 24) & BM_DCP_PACKET1_TAG)
+#define BM_DCP_PACKET1_OUTPUT_WORDSWAP 0x00800000
+#define BM_DCP_PACKET1_OUTPUT_BYTESWAP 0x00400000
+#define BM_DCP_PACKET1_INPUT_WORDSWAP  0x00200000
+#define BM_DCP_PACKET1_INPUT_BYTESWAP  0x00100000
+#define BM_DCP_PACKET1_KEY_WORDSWAP    0x00080000
+#define BM_DCP_PACKET1_KEY_BYTESWAP    0x00040000
+#define BM_DCP_PACKET1_TEST_SEMA_IRQ   0x00020000
+#define BM_DCP_PACKET1_CONSTANT_FILL   0x00010000
+#define BM_DCP_PACKET1_HASH_OUTPUT     0x00008000
+#define BV_DCP_PACKET1_HASH_OUTPUT__INPUT  0x00
+#define BV_DCP_PACKET1_HASH_OUTPUT__OUTPUT 0x01
+#define BM_DCP_PACKET1_CHECK_HASH      0x00004000
+#define BM_DCP_PACKET1_HASH_TERM       0x00002000
+#define BM_DCP_PACKET1_HASH_INIT       0x00001000
+#define BM_DCP_PACKET1_PAYLOAD_KEY     0x00000800
+#define BM_DCP_PACKET1_OTP_KEY 0x00000400
+#define BM_DCP_PACKET1_CIPHER_INIT     0x00000200
+#define BM_DCP_PACKET1_CIPHER_ENCRYPT  0x00000100
+#define BV_DCP_PACKET1_CIPHER_ENCRYPT__ENCRYPT 0x01
+#define BV_DCP_PACKET1_CIPHER_ENCRYPT__DECRYPT 0x00
+#define BM_DCP_PACKET1_ENABLE_BLIT     0x00000080
+#define BM_DCP_PACKET1_ENABLE_HASH     0x00000040
+#define BM_DCP_PACKET1_ENABLE_CIPHER   0x00000020
+#define BM_DCP_PACKET1_ENABLE_MEMCOPY  0x00000010
+#define BM_DCP_PACKET1_CHAIN_CONTIGUOUS        0x00000008
+#define BM_DCP_PACKET1_CHAIN   0x00000004
+#define BM_DCP_PACKET1_DECR_SEMAPHORE  0x00000002
+#define BM_DCP_PACKET1_INTERRUPT       0x00000001
+
+#define HW_DCP_PACKET2 (0x000000a0)
+
+#define BP_DCP_PACKET2_CIPHER_CFG      24
+#define BM_DCP_PACKET2_CIPHER_CFG      0xFF000000
+#define BF_DCP_PACKET2_CIPHER_CFG(v) \
+               (((v) << 24) & BM_DCP_PACKET2_CIPHER_CFG)
+#define BP_DCP_PACKET2_RSVD    20
+#define BM_DCP_PACKET2_RSVD    0x00F00000
+#define BF_DCP_PACKET2_RSVD(v)  \
+               (((v) << 20) & BM_DCP_PACKET2_RSVD)
+#define BP_DCP_PACKET2_HASH_SELECT     16
+#define BM_DCP_PACKET2_HASH_SELECT     0x000F0000
+#define BF_DCP_PACKET2_HASH_SELECT(v)  \
+               (((v) << 16) & BM_DCP_PACKET2_HASH_SELECT)
+#define BV_DCP_PACKET2_HASH_SELECT__SHA1  0x00
+#define BV_DCP_PACKET2_HASH_SELECT__CRC32 0x01
+#define BV_DCP_PACKET2_HASH_SELECT__SHA256  0x02
+#define BP_DCP_PACKET2_KEY_SELECT      8
+#define BM_DCP_PACKET2_KEY_SELECT      0x0000FF00
+#define BF_DCP_PACKET2_KEY_SELECT(v)  \
+               (((v) << 8) & BM_DCP_PACKET2_KEY_SELECT)
+#define BP_DCP_PACKET2_CIPHER_MODE     4
+#define BM_DCP_PACKET2_CIPHER_MODE     0x000000F0
+#define BF_DCP_PACKET2_CIPHER_MODE(v)  \
+               (((v) << 4) & BM_DCP_PACKET2_CIPHER_MODE)
+#define BV_DCP_PACKET2_CIPHER_MODE__ECB 0x00
+#define BV_DCP_PACKET2_CIPHER_MODE__CBC 0x01
+#define BP_DCP_PACKET2_CIPHER_SELECT   0
+#define BM_DCP_PACKET2_CIPHER_SELECT   0x0000000F
+#define BF_DCP_PACKET2_CIPHER_SELECT(v)  \
+               (((v) << 0) & BM_DCP_PACKET2_CIPHER_SELECT)
+#define BV_DCP_PACKET2_CIPHER_SELECT__AES128 0x00
+
+#define HW_DCP_PACKET3 (0x000000b0)
+
+#define BP_DCP_PACKET3_ADDR    0
+#define BM_DCP_PACKET3_ADDR    0xFFFFFFFF
+#define BF_DCP_PACKET3_ADDR(v) (v)
+
+#define HW_DCP_PACKET4 (0x000000c0)
+
+#define BP_DCP_PACKET4_ADDR    0
+#define BM_DCP_PACKET4_ADDR    0xFFFFFFFF
+#define BF_DCP_PACKET4_ADDR(v) (v)
+
+#define HW_DCP_PACKET5 (0x000000d0)
+
+#define BP_DCP_PACKET5_COUNT   0
+#define BM_DCP_PACKET5_COUNT   0xFFFFFFFF
+#define BF_DCP_PACKET5_COUNT(v)        (v)
+
+#define HW_DCP_PACKET6 (0x000000e0)
+
+#define BP_DCP_PACKET6_ADDR    0
+#define BM_DCP_PACKET6_ADDR    0xFFFFFFFF
+#define BF_DCP_PACKET6_ADDR(v) (v)
+
+/*
+ *  multi-register-define name HW_DCP_CHnCMDPTR
+ *              base 0x00000100
+ *              count 4
+ *              offset 0x40
+ */
+#define HW_DCP_CHnCMDPTR(n)    (0x00000100 + (n) * 0x40)
+
+#define BP_DCP_CHnCMDPTR_ADDR  0
+#define BM_DCP_CHnCMDPTR_ADDR  0xFFFFFFFF
+#define BF_DCP_CHnCMDPTR_ADDR(v)       (v)
+
+/*
+ *  multi-register-define name HW_DCP_CHnSEMA
+ *              base 0x00000110
+ *              count 4
+ *              offset 0x40
+ */
+#define HW_DCP_CHnSEMA(n)      (0x00000110 + (n) * 0x40)
+
+#define BP_DCP_CHnSEMA_RSVD2   24
+#define BM_DCP_CHnSEMA_RSVD2   0xFF000000
+#define BF_DCP_CHnSEMA_RSVD2(v) \
+               (((v) << 24) & BM_DCP_CHnSEMA_RSVD2)
+#define BP_DCP_CHnSEMA_VALUE   16
+#define BM_DCP_CHnSEMA_VALUE   0x00FF0000
+#define BF_DCP_CHnSEMA_VALUE(v)  \
+               (((v) << 16) & BM_DCP_CHnSEMA_VALUE)
+#define BP_DCP_CHnSEMA_RSVD1   8
+#define BM_DCP_CHnSEMA_RSVD1   0x0000FF00
+#define BF_DCP_CHnSEMA_RSVD1(v)  \
+               (((v) << 8) & BM_DCP_CHnSEMA_RSVD1)
+#define BP_DCP_CHnSEMA_INCREMENT       0
+#define BM_DCP_CHnSEMA_INCREMENT       0x000000FF
+#define BF_DCP_CHnSEMA_INCREMENT(v)  \
+               (((v) << 0) & BM_DCP_CHnSEMA_INCREMENT)
+
+/*
+ *  multi-register-define name HW_DCP_CHnSTAT
+ *              base 0x00000120
+ *              count 4
+ *              offset 0x40
+ */
+#define HW_DCP_CHnSTAT(n)      (0x00000120 + (n) * 0x40)
+#define HW_DCP_CHnSTAT_SET(n)  (0x00000124 + (n) * 0x40)
+#define HW_DCP_CHnSTAT_CLR(n)  (0x00000128 + (n) * 0x40)
+#define HW_DCP_CHnSTAT_TOG(n)  (0x0000012c + (n) * 0x40)
+
+#define BP_DCP_CHnSTAT_TAG     24
+#define BM_DCP_CHnSTAT_TAG     0xFF000000
+#define BF_DCP_CHnSTAT_TAG(v) \
+               (((v) << 24) & BM_DCP_CHnSTAT_TAG)
+#define BP_DCP_CHnSTAT_ERROR_CODE      16
+#define BM_DCP_CHnSTAT_ERROR_CODE      0x00FF0000
+#define BF_DCP_CHnSTAT_ERROR_CODE(v)  \
+               (((v) << 16) & BM_DCP_CHnSTAT_ERROR_CODE)
+#define BV_DCP_CHnSTAT_ERROR_CODE__NEXT_CHAIN_IS_0 0x01
+#define BV_DCP_CHnSTAT_ERROR_CODE__NO_CHAIN        0x02
+#define BV_DCP_CHnSTAT_ERROR_CODE__CONTEXT_ERROR   0x03
+#define BV_DCP_CHnSTAT_ERROR_CODE__PAYLOAD_ERROR   0x04
+#define BV_DCP_CHnSTAT_ERROR_CODE__INVALID_MODE    0x05
+#define BP_DCP_CHnSTAT_RSVD0   7
+#define BM_DCP_CHnSTAT_RSVD0   0x0000FF80
+#define BF_DCP_CHnSTAT_RSVD0(v)  \
+               (((v) << 7) & BM_DCP_CHnSTAT_RSVD0)
+#define BM_DCP_CHnSTAT_ERROR_PAGEFAULT 0x00000040
+#define BM_DCP_CHnSTAT_ERROR_DST       0x00000020
+#define BM_DCP_CHnSTAT_ERROR_SRC       0x00000010
+#define BM_DCP_CHnSTAT_ERROR_PACKET    0x00000008
+#define BM_DCP_CHnSTAT_ERROR_SETUP     0x00000004
+#define BM_DCP_CHnSTAT_HASH_MISMATCH   0x00000002
+#define BM_DCP_CHnSTAT_RSVD_COMPLETE   0x00000001
+
+/*
+ *  multi-register-define name HW_DCP_CHnOPTS
+ *              base 0x00000130
+ *              count 4
+ *              offset 0x40
+ */
+#define HW_DCP_CHnOPTS(n)      (0x00000130 + (n) * 0x40)
+#define HW_DCP_CHnOPTS_SET(n)  (0x00000134 + (n) * 0x40)
+#define HW_DCP_CHnOPTS_CLR(n)  (0x00000138 + (n) * 0x40)
+#define HW_DCP_CHnOPTS_TOG(n)  (0x0000013c + (n) * 0x40)
+
+#define BP_DCP_CHnOPTS_RSVD    16
+#define BM_DCP_CHnOPTS_RSVD    0xFFFF0000
+#define BF_DCP_CHnOPTS_RSVD(v) \
+               (((v) << 16) & BM_DCP_CHnOPTS_RSVD)
+#define BP_DCP_CHnOPTS_RECOVERY_TIMER  0
+#define BM_DCP_CHnOPTS_RECOVERY_TIMER  0x0000FFFF
+#define BF_DCP_CHnOPTS_RECOVERY_TIMER(v)  \
+               (((v) << 0) & BM_DCP_CHnOPTS_RECOVERY_TIMER)
+
+#define HW_DCP_CSCCTRL0        (0x00000300)
+#define HW_DCP_CSCCTRL0_SET    (0x00000304)
+#define HW_DCP_CSCCTRL0_CLR    (0x00000308)
+#define HW_DCP_CSCCTRL0_TOG    (0x0000030c)
+
+#define BP_DCP_CSCCTRL0_RSVD1  16
+#define BM_DCP_CSCCTRL0_RSVD1  0xFFFF0000
+#define BF_DCP_CSCCTRL0_RSVD1(v) \
+               (((v) << 16) & BM_DCP_CSCCTRL0_RSVD1)
+#define BM_DCP_CSCCTRL0_CLIP   0x00008000
+#define BM_DCP_CSCCTRL0_UPSAMPLE       0x00004000
+#define BM_DCP_CSCCTRL0_SCALE  0x00002000
+#define BM_DCP_CSCCTRL0_ROTATE 0x00001000
+#define BM_DCP_CSCCTRL0_SUBSAMPLE      0x00000800
+#define BM_DCP_CSCCTRL0_DELTA  0x00000400
+#define BP_DCP_CSCCTRL0_RGB_FORMAT     8
+#define BM_DCP_CSCCTRL0_RGB_FORMAT     0x00000300
+#define BF_DCP_CSCCTRL0_RGB_FORMAT(v)  \
+               (((v) << 8) & BM_DCP_CSCCTRL0_RGB_FORMAT)
+#define BV_DCP_CSCCTRL0_RGB_FORMAT__RGB16_565 0x0
+#define BV_DCP_CSCCTRL0_RGB_FORMAT__YCbCrI    0x1
+#define BV_DCP_CSCCTRL0_RGB_FORMAT__RGB24     0x2
+#define BV_DCP_CSCCTRL0_RGB_FORMAT__YUV422I   0x3
+#define BP_DCP_CSCCTRL0_YUV_FORMAT     4
+#define BM_DCP_CSCCTRL0_YUV_FORMAT     0x000000F0
+#define BF_DCP_CSCCTRL0_YUV_FORMAT(v)  \
+               (((v) << 4) & BM_DCP_CSCCTRL0_YUV_FORMAT)
+#define BV_DCP_CSCCTRL0_YUV_FORMAT__YUV420 0x0
+#define BV_DCP_CSCCTRL0_YUV_FORMAT__YUV422 0x2
+#define BP_DCP_CSCCTRL0_RSVD0  1
+#define BM_DCP_CSCCTRL0_RSVD0  0x0000000E
+#define BF_DCP_CSCCTRL0_RSVD0(v)  \
+               (((v) << 1) & BM_DCP_CSCCTRL0_RSVD0)
+#define BM_DCP_CSCCTRL0_ENABLE 0x00000001
+
+#define HW_DCP_CSCSTAT (0x00000310)
+#define HW_DCP_CSCSTAT_SET     (0x00000314)
+#define HW_DCP_CSCSTAT_CLR     (0x00000318)
+#define HW_DCP_CSCSTAT_TOG     (0x0000031c)
+
+#define BP_DCP_CSCSTAT_RSVD3   24
+#define BM_DCP_CSCSTAT_RSVD3   0xFF000000
+#define BF_DCP_CSCSTAT_RSVD3(v) \
+               (((v) << 24) & BM_DCP_CSCSTAT_RSVD3)
+#define BP_DCP_CSCSTAT_ERROR_CODE      16
+#define BM_DCP_CSCSTAT_ERROR_CODE      0x00FF0000
+#define BF_DCP_CSCSTAT_ERROR_CODE(v)  \
+               (((v) << 16) & BM_DCP_CSCSTAT_ERROR_CODE)
+#define BV_DCP_CSCSTAT_ERROR_CODE__LUMA0_FETCH_ERROR_Y0 0x01
+#define BV_DCP_CSCSTAT_ERROR_CODE__LUMA1_FETCH_ERROR_Y1 0x02
+#define BV_DCP_CSCSTAT_ERROR_CODE__CHROMA_FETCH_ERROR_U 0x03
+#define BV_DCP_CSCSTAT_ERROR_CODE__CHROMA_FETCH_ERROR_V 0x04
+#define BP_DCP_CSCSTAT_RSVD2   7
+#define BM_DCP_CSCSTAT_RSVD2   0x0000FF80
+#define BF_DCP_CSCSTAT_RSVD2(v)  \
+               (((v) << 7) & BM_DCP_CSCSTAT_RSVD2)
+#define BM_DCP_CSCSTAT_ERROR_PAGEFAULT 0x00000040
+#define BM_DCP_CSCSTAT_ERROR_DST       0x00000020
+#define BM_DCP_CSCSTAT_ERROR_SRC       0x00000010
+#define BM_DCP_CSCSTAT_RSVD1   0x00000008
+#define BM_DCP_CSCSTAT_ERROR_SETUP     0x00000004
+#define BM_DCP_CSCSTAT_RSVD0   0x00000002
+#define BM_DCP_CSCSTAT_COMPLETE        0x00000001
+
+#define HW_DCP_CSCOUTBUFPARAM  (0x00000320)
+
+#define BP_DCP_CSCOUTBUFPARAM_RSVD1    24
+#define BM_DCP_CSCOUTBUFPARAM_RSVD1    0xFF000000
+#define BF_DCP_CSCOUTBUFPARAM_RSVD1(v) \
+               (((v) << 24) & BM_DCP_CSCOUTBUFPARAM_RSVD1)
+#define BP_DCP_CSCOUTBUFPARAM_FIELD_SIZE       12
+#define BM_DCP_CSCOUTBUFPARAM_FIELD_SIZE       0x00FFF000
+#define BF_DCP_CSCOUTBUFPARAM_FIELD_SIZE(v)  \
+               (((v) << 12) & BM_DCP_CSCOUTBUFPARAM_FIELD_SIZE)
+#define BP_DCP_CSCOUTBUFPARAM_LINE_SIZE        0
+#define BM_DCP_CSCOUTBUFPARAM_LINE_SIZE        0x00000FFF
+#define BF_DCP_CSCOUTBUFPARAM_LINE_SIZE(v)  \
+               (((v) << 0) & BM_DCP_CSCOUTBUFPARAM_LINE_SIZE)
+
+#define HW_DCP_CSCINBUFPARAM   (0x00000330)
+
+#define BP_DCP_CSCINBUFPARAM_RSVD1     12
+#define BM_DCP_CSCINBUFPARAM_RSVD1     0xFFFFF000
+#define BF_DCP_CSCINBUFPARAM_RSVD1(v) \
+               (((v) << 12) & BM_DCP_CSCINBUFPARAM_RSVD1)
+#define BP_DCP_CSCINBUFPARAM_LINE_SIZE 0
+#define BM_DCP_CSCINBUFPARAM_LINE_SIZE 0x00000FFF
+#define BF_DCP_CSCINBUFPARAM_LINE_SIZE(v)  \
+               (((v) << 0) & BM_DCP_CSCINBUFPARAM_LINE_SIZE)
+
+#define HW_DCP_CSCRGB  (0x00000340)
+
+#define BP_DCP_CSCRGB_ADDR     0
+#define BM_DCP_CSCRGB_ADDR     0xFFFFFFFF
+#define BF_DCP_CSCRGB_ADDR(v)  (v)
+
+#define HW_DCP_CSCLUMA (0x00000350)
+
+#define BP_DCP_CSCLUMA_ADDR    0
+#define BM_DCP_CSCLUMA_ADDR    0xFFFFFFFF
+#define BF_DCP_CSCLUMA_ADDR(v) (v)
+
+#define HW_DCP_CSCCHROMAU      (0x00000360)
+
+#define BP_DCP_CSCCHROMAU_ADDR 0
+#define BM_DCP_CSCCHROMAU_ADDR 0xFFFFFFFF
+#define BF_DCP_CSCCHROMAU_ADDR(v)      (v)
+
+#define HW_DCP_CSCCHROMAV      (0x00000370)
+
+#define BP_DCP_CSCCHROMAV_ADDR 0
+#define BM_DCP_CSCCHROMAV_ADDR 0xFFFFFFFF
+#define BF_DCP_CSCCHROMAV_ADDR(v)      (v)
+
+#define HW_DCP_CSCCOEFF0       (0x00000380)
+
+#define BP_DCP_CSCCOEFF0_RSVD1 26
+#define BM_DCP_CSCCOEFF0_RSVD1 0xFC000000
+#define BF_DCP_CSCCOEFF0_RSVD1(v) \
+               (((v) << 26) & BM_DCP_CSCCOEFF0_RSVD1)
+#define BP_DCP_CSCCOEFF0_C0    16
+#define BM_DCP_CSCCOEFF0_C0    0x03FF0000
+#define BF_DCP_CSCCOEFF0_C0(v)  \
+               (((v) << 16) & BM_DCP_CSCCOEFF0_C0)
+#define BP_DCP_CSCCOEFF0_UV_OFFSET     8
+#define BM_DCP_CSCCOEFF0_UV_OFFSET     0x0000FF00
+#define BF_DCP_CSCCOEFF0_UV_OFFSET(v)  \
+               (((v) << 8) & BM_DCP_CSCCOEFF0_UV_OFFSET)
+#define BP_DCP_CSCCOEFF0_Y_OFFSET      0
+#define BM_DCP_CSCCOEFF0_Y_OFFSET      0x000000FF
+#define BF_DCP_CSCCOEFF0_Y_OFFSET(v)  \
+               (((v) << 0) & BM_DCP_CSCCOEFF0_Y_OFFSET)
+
+#define HW_DCP_CSCCOEFF1       (0x00000390)
+
+#define BP_DCP_CSCCOEFF1_RSVD1 26
+#define BM_DCP_CSCCOEFF1_RSVD1 0xFC000000
+#define BF_DCP_CSCCOEFF1_RSVD1(v) \
+               (((v) << 26) & BM_DCP_CSCCOEFF1_RSVD1)
+#define BP_DCP_CSCCOEFF1_C1    16
+#define BM_DCP_CSCCOEFF1_C1    0x03FF0000
+#define BF_DCP_CSCCOEFF1_C1(v)  \
+               (((v) << 16) & BM_DCP_CSCCOEFF1_C1)
+#define BP_DCP_CSCCOEFF1_RSVD0 10
+#define BM_DCP_CSCCOEFF1_RSVD0 0x0000FC00
+#define BF_DCP_CSCCOEFF1_RSVD0(v)  \
+               (((v) << 10) & BM_DCP_CSCCOEFF1_RSVD0)
+#define BP_DCP_CSCCOEFF1_C4    0
+#define BM_DCP_CSCCOEFF1_C4    0x000003FF
+#define BF_DCP_CSCCOEFF1_C4(v)  \
+               (((v) << 0) & BM_DCP_CSCCOEFF1_C4)
+
+#define HW_DCP_CSCCOEFF2       (0x000003a0)
+
+#define BP_DCP_CSCCOEFF2_RSVD1 26
+#define BM_DCP_CSCCOEFF2_RSVD1 0xFC000000
+#define BF_DCP_CSCCOEFF2_RSVD1(v) \
+               (((v) << 26) & BM_DCP_CSCCOEFF2_RSVD1)
+#define BP_DCP_CSCCOEFF2_C2    16
+#define BM_DCP_CSCCOEFF2_C2    0x03FF0000
+#define BF_DCP_CSCCOEFF2_C2(v)  \
+               (((v) << 16) & BM_DCP_CSCCOEFF2_C2)
+#define BP_DCP_CSCCOEFF2_RSVD0 10
+#define BM_DCP_CSCCOEFF2_RSVD0 0x0000FC00
+#define BF_DCP_CSCCOEFF2_RSVD0(v)  \
+               (((v) << 10) & BM_DCP_CSCCOEFF2_RSVD0)
+#define BP_DCP_CSCCOEFF2_C3    0
+#define BM_DCP_CSCCOEFF2_C3    0x000003FF
+#define BF_DCP_CSCCOEFF2_C3(v)  \
+               (((v) << 0) & BM_DCP_CSCCOEFF2_C3)
+
+#define HW_DCP_CSCCLIP (0x000003d0)
+
+#define BP_DCP_CSCCLIP_RSVD1   24
+#define BM_DCP_CSCCLIP_RSVD1   0xFF000000
+#define BF_DCP_CSCCLIP_RSVD1(v) \
+               (((v) << 24) & BM_DCP_CSCCLIP_RSVD1)
+#define BP_DCP_CSCCLIP_HEIGHT  12
+#define BM_DCP_CSCCLIP_HEIGHT  0x00FFF000
+#define BF_DCP_CSCCLIP_HEIGHT(v)  \
+               (((v) << 12) & BM_DCP_CSCCLIP_HEIGHT)
+#define BP_DCP_CSCCLIP_WIDTH   0
+#define BM_DCP_CSCCLIP_WIDTH   0x00000FFF
+#define BF_DCP_CSCCLIP_WIDTH(v)  \
+               (((v) << 0) & BM_DCP_CSCCLIP_WIDTH)
+
+#define HW_DCP_CSCXSCALE       (0x000003e0)
+
+#define BP_DCP_CSCXSCALE_RSVD1 26
+#define BM_DCP_CSCXSCALE_RSVD1 0xFC000000
+#define BF_DCP_CSCXSCALE_RSVD1(v) \
+               (((v) << 26) & BM_DCP_CSCXSCALE_RSVD1)
+#define BP_DCP_CSCXSCALE_INT   24
+#define BM_DCP_CSCXSCALE_INT   0x03000000
+#define BF_DCP_CSCXSCALE_INT(v)  \
+               (((v) << 24) & BM_DCP_CSCXSCALE_INT)
+#define BP_DCP_CSCXSCALE_FRAC  12
+#define BM_DCP_CSCXSCALE_FRAC  0x00FFF000
+#define BF_DCP_CSCXSCALE_FRAC(v)  \
+               (((v) << 12) & BM_DCP_CSCXSCALE_FRAC)
+#define BP_DCP_CSCXSCALE_WIDTH 0
+#define BM_DCP_CSCXSCALE_WIDTH 0x00000FFF
+#define BF_DCP_CSCXSCALE_WIDTH(v)  \
+               (((v) << 0) & BM_DCP_CSCXSCALE_WIDTH)
+
+#define HW_DCP_CSCYSCALE       (0x000003f0)
+
+#define BP_DCP_CSCYSCALE_RSVD1 26
+#define BM_DCP_CSCYSCALE_RSVD1 0xFC000000
+#define BF_DCP_CSCYSCALE_RSVD1(v) \
+               (((v) << 26) & BM_DCP_CSCYSCALE_RSVD1)
+#define BP_DCP_CSCYSCALE_INT   24
+#define BM_DCP_CSCYSCALE_INT   0x03000000
+#define BF_DCP_CSCYSCALE_INT(v)  \
+               (((v) << 24) & BM_DCP_CSCYSCALE_INT)
+#define BP_DCP_CSCYSCALE_FRAC  12
+#define BM_DCP_CSCYSCALE_FRAC  0x00FFF000
+#define BF_DCP_CSCYSCALE_FRAC(v)  \
+               (((v) << 12) & BM_DCP_CSCYSCALE_FRAC)
+#define BP_DCP_CSCYSCALE_HEIGHT        0
+#define BM_DCP_CSCYSCALE_HEIGHT        0x00000FFF
+#define BF_DCP_CSCYSCALE_HEIGHT(v)  \
+               (((v) << 0) & BM_DCP_CSCYSCALE_HEIGHT)
+
+#define HW_DCP_DBGSELECT       (0x00000400)
+
+#define BP_DCP_DBGSELECT_RSVD  8
+#define BM_DCP_DBGSELECT_RSVD  0xFFFFFF00
+#define BF_DCP_DBGSELECT_RSVD(v) \
+               (((v) << 8) & BM_DCP_DBGSELECT_RSVD)
+#define BP_DCP_DBGSELECT_INDEX 0
+#define BM_DCP_DBGSELECT_INDEX 0x000000FF
+#define BF_DCP_DBGSELECT_INDEX(v)  \
+               (((v) << 0) & BM_DCP_DBGSELECT_INDEX)
+#define BV_DCP_DBGSELECT_INDEX__CONTROL 0x01
+#define BV_DCP_DBGSELECT_INDEX__OTPKEY0 0x10
+#define BV_DCP_DBGSELECT_INDEX__OTPKEY1 0x11
+#define BV_DCP_DBGSELECT_INDEX__OTPKEY2 0x12
+#define BV_DCP_DBGSELECT_INDEX__OTPKEY3 0x13
+
+#define HW_DCP_DBGDATA (0x00000410)
+
+#define BP_DCP_DBGDATA_DATA    0
+#define BM_DCP_DBGDATA_DATA    0xFFFFFFFF
+#define BF_DCP_DBGDATA_DATA(v) (v)
+
+#define HW_DCP_PAGETABLE       (0x00000420)
+
+#define BP_DCP_PAGETABLE_BASE  2
+#define BM_DCP_PAGETABLE_BASE  0xFFFFFFFC
+#define BF_DCP_PAGETABLE_BASE(v) \
+               (((v) << 2) & BM_DCP_PAGETABLE_BASE)
+#define BM_DCP_PAGETABLE_FLUSH 0x00000002
+#define BM_DCP_PAGETABLE_ENABLE        0x00000001
+
+#define HW_DCP_VERSION (0x00000430)
+
+#define BP_DCP_VERSION_MAJOR   24
+#define BM_DCP_VERSION_MAJOR   0xFF000000
+#define BF_DCP_VERSION_MAJOR(v) \
+               (((v) << 24) & BM_DCP_VERSION_MAJOR)
+#define BP_DCP_VERSION_MINOR   16
+#define BM_DCP_VERSION_MINOR   0x00FF0000
+#define BF_DCP_VERSION_MINOR(v)  \
+               (((v) << 16) & BM_DCP_VERSION_MINOR)
+#define BP_DCP_VERSION_STEP    0
+#define BM_DCP_VERSION_STEP    0x0000FFFF
+#define BF_DCP_VERSION_STEP(v)  \
+               (((v) << 0) & BM_DCP_VERSION_STEP)
+
+
+#endif
diff --git a/drivers/crypto/dcp_bootstream_ioctl.h b/drivers/crypto/dcp_bootstream_ioctl.h
new file mode 100644 (file)
index 0000000..4fa118f
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Freescale DCP driver for bootstream update. Only handles the OTP KEY
+ * case and can only encrypt/decrypt.
+ *
+ * Author: Pantelis Antoniou <pantelis@embeddedalley.com>
+ *
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#ifndef DCP_BOOTSTREAM_IOCTL_H
+#define DCP_BOOTSTREAM_IOCTL_H
+
+/* remember to have included the proper _IO definition
+ * file before hand.
+ * For user space it's <sys/ioctl.h>
+ */
+
+#define DBS_IOCTL_BASE   'd'
+
+#define DBS_ENC        _IOW(DBS_IOCTL_BASE, 0x00, uint8_t[16])
+#define DBS_DEC _IOW(DBS_IOCTL_BASE, 0x01, uint8_t[16])
+
+#endif