2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_platform.h>
36 #include <linux/dma-mapping.h>
38 #include <linux/spinlock.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/slab.h>
42 #include <crypto/algapi.h>
43 #include <crypto/aes.h>
44 #include <crypto/des.h>
45 #include <crypto/sha.h>
46 #include <crypto/md5.h>
47 #include <crypto/aead.h>
48 #include <crypto/authenc.h>
49 #include <crypto/skcipher.h>
50 #include <crypto/hash.h>
51 #include <crypto/internal/hash.h>
52 #include <crypto/scatterwalk.h>
56 #define TALITOS_TIMEOUT 100000
57 #define TALITOS_MAX_DATA_LEN 65535
59 #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
60 #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
61 #define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
63 /* descriptor pointer entry */
65 __be16 len; /* length */
66 u8 j_extent; /* jump to sg link table and/or extent */
67 u8 eptr; /* extended address */
68 __be32 ptr; /* address */
71 static const struct talitos_ptr zero_entry = {
80 __be32 hdr; /* header high bits */
81 __be32 hdr_lo; /* header low bits */
82 struct talitos_ptr ptr[7]; /* ptr/len pair array */
86 * talitos_request - descriptor submission request
87 * @desc: descriptor pointer (kernel virtual)
88 * @dma_desc: descriptor's physical bus address
89 * @callback: whom to call when descriptor processing is done
90 * @context: caller context (optional)
92 struct talitos_request {
93 struct talitos_desc *desc;
95 void (*callback) (struct device *dev, struct talitos_desc *desc,
96 void *context, int error);
100 /* per-channel fifo management */
101 struct talitos_channel {
103 struct talitos_request *fifo;
105 /* number of requests pending in channel h/w fifo */
106 atomic_t submit_count ____cacheline_aligned;
108 /* request submission (head) lock */
109 spinlock_t head_lock ____cacheline_aligned;
110 /* index to next free descriptor request */
113 /* request release (tail) lock */
114 spinlock_t tail_lock ____cacheline_aligned;
115 /* index to next in-progress/done descriptor request */
119 struct talitos_private {
121 struct platform_device *ofdev;
125 /* SEC version geometry (from device tree node) */
126 unsigned int num_channels;
127 unsigned int chfifo_len;
128 unsigned int exec_units;
129 unsigned int desc_types;
131 /* SEC Compatibility info */
132 unsigned long features;
135 * length of the request fifo
136 * fifo_len is chfifo_len rounded up to next power of 2
137 * so we can use bitwise ops to wrap
139 unsigned int fifo_len;
141 struct talitos_channel *chan;
143 /* next channel to be assigned next incoming descriptor */
144 atomic_t last_chan ____cacheline_aligned;
146 /* request callback tasklet */
147 struct tasklet_struct done_task;
149 /* list of registered algorithms */
150 struct list_head alg_list;
157 #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
158 #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
159 #define TALITOS_FTR_SHA224_HWINIT 0x00000004
161 static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
163 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
164 talitos_ptr->eptr = upper_32_bits(dma_addr);
168 * map virtual single (contiguous) pointer to h/w descriptor pointer
170 static void map_single_talitos_ptr(struct device *dev,
171 struct talitos_ptr *talitos_ptr,
172 unsigned short len, void *data,
173 unsigned char extent,
174 enum dma_data_direction dir)
176 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
178 talitos_ptr->len = cpu_to_be16(len);
179 to_talitos_ptr(talitos_ptr, dma_addr);
180 talitos_ptr->j_extent = extent;
184 * unmap bus single (contiguous) h/w descriptor pointer
186 static void unmap_single_talitos_ptr(struct device *dev,
187 struct talitos_ptr *talitos_ptr,
188 enum dma_data_direction dir)
190 dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
191 be16_to_cpu(talitos_ptr->len), dir);
194 static int reset_channel(struct device *dev, int ch)
196 struct talitos_private *priv = dev_get_drvdata(dev);
197 unsigned int timeout = TALITOS_TIMEOUT;
199 setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
201 while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
206 dev_err(dev, "failed to reset channel %d\n", ch);
210 /* set 36-bit addressing, done writeback enable and done IRQ enable */
211 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
212 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
214 /* and ICCR writeback, if available */
215 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
216 setbits32(priv->reg + TALITOS_CCCR_LO(ch),
217 TALITOS_CCCR_LO_IWSE);
222 static int reset_device(struct device *dev)
224 struct talitos_private *priv = dev_get_drvdata(dev);
225 unsigned int timeout = TALITOS_TIMEOUT;
227 setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
229 while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
234 dev_err(dev, "failed to reset device\n");
242 * Reset and initialize the device
244 static int init_device(struct device *dev)
246 struct talitos_private *priv = dev_get_drvdata(dev);
251 * errata documentation: warning: certain SEC interrupts
252 * are not fully cleared by writing the MCR:SWR bit,
253 * set bit twice to completely reset
255 err = reset_device(dev);
259 err = reset_device(dev);
264 for (ch = 0; ch < priv->num_channels; ch++) {
265 err = reset_channel(dev, ch);
270 /* enable channel done and error interrupts */
271 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
272 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
274 /* disable integrity check error interrupts (use writeback instead) */
275 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
276 setbits32(priv->reg + TALITOS_MDEUICR_LO,
277 TALITOS_MDEUICR_LO_ICE);
283 * talitos_submit - submits a descriptor to the device for processing
284 * @dev: the SEC device to be used
285 * @ch: the SEC device channel to be used
286 * @desc: the descriptor to be processed by the device
287 * @callback: whom to call when processing is complete
288 * @context: a handle for use by caller (optional)
290 * desc must contain valid dma-mapped (bus physical) address pointers.
291 * callback must check err and feedback in descriptor header
292 * for device processing status.
294 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
295 void (*callback)(struct device *dev,
296 struct talitos_desc *desc,
297 void *context, int error),
300 struct talitos_private *priv = dev_get_drvdata(dev);
301 struct talitos_request *request;
305 /* select done notification */
306 desc->hdr |= DESC_HDR_DONE_NOTIFY;
308 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
310 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
311 /* h/w fifo is full */
312 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
316 head = priv->chan[ch].head;
317 request = &priv->chan[ch].fifo[head];
319 /* map descriptor and save caller data */
320 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
322 request->callback = callback;
323 request->context = context;
325 /* increment fifo head */
326 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
329 request->desc = desc;
333 out_be32(priv->reg + TALITOS_FF(ch), upper_32_bits(request->dma_desc));
334 out_be32(priv->reg + TALITOS_FF_LO(ch),
335 lower_32_bits(request->dma_desc));
337 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
343 * process what was done, notify callback of error if not
345 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
347 struct talitos_private *priv = dev_get_drvdata(dev);
348 struct talitos_request *request, saved_req;
352 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
354 tail = priv->chan[ch].tail;
355 while (priv->chan[ch].fifo[tail].desc) {
356 request = &priv->chan[ch].fifo[tail];
358 /* descriptors with their done bits set don't get the error */
360 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
368 dma_unmap_single(dev, request->dma_desc,
369 sizeof(struct talitos_desc),
372 /* copy entries so we can call callback outside lock */
373 saved_req.desc = request->desc;
374 saved_req.callback = request->callback;
375 saved_req.context = request->context;
377 /* release request entry in fifo */
379 request->desc = NULL;
381 /* increment fifo tail */
382 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
384 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
386 atomic_dec(&priv->chan[ch].submit_count);
388 saved_req.callback(dev, saved_req.desc, saved_req.context,
390 /* channel may resume processing in single desc error case */
391 if (error && !reset_ch && status == error)
393 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
394 tail = priv->chan[ch].tail;
397 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
401 * process completed requests for channels that have done status
403 static void talitos_done(unsigned long data)
405 struct device *dev = (struct device *)data;
406 struct talitos_private *priv = dev_get_drvdata(dev);
409 for (ch = 0; ch < priv->num_channels; ch++)
410 flush_channel(dev, ch, 0, 0);
412 /* At this point, all completed channels have been processed.
413 * Unmask done interrupts for channels completed later on.
415 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
416 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
420 * locate current (offending) descriptor
422 static struct talitos_desc *current_desc(struct device *dev, int ch)
424 struct talitos_private *priv = dev_get_drvdata(dev);
425 int tail = priv->chan[ch].tail;
428 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
430 while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
431 tail = (tail + 1) & (priv->fifo_len - 1);
432 if (tail == priv->chan[ch].tail) {
433 dev_err(dev, "couldn't locate current descriptor\n");
438 return priv->chan[ch].fifo[tail].desc;
442 * user diagnostics; report root cause of error based on execution unit status
444 static void report_eu_error(struct device *dev, int ch,
445 struct talitos_desc *desc)
447 struct talitos_private *priv = dev_get_drvdata(dev);
450 switch (desc->hdr & DESC_HDR_SEL0_MASK) {
451 case DESC_HDR_SEL0_AFEU:
452 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
453 in_be32(priv->reg + TALITOS_AFEUISR),
454 in_be32(priv->reg + TALITOS_AFEUISR_LO));
456 case DESC_HDR_SEL0_DEU:
457 dev_err(dev, "DEUISR 0x%08x_%08x\n",
458 in_be32(priv->reg + TALITOS_DEUISR),
459 in_be32(priv->reg + TALITOS_DEUISR_LO));
461 case DESC_HDR_SEL0_MDEUA:
462 case DESC_HDR_SEL0_MDEUB:
463 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
464 in_be32(priv->reg + TALITOS_MDEUISR),
465 in_be32(priv->reg + TALITOS_MDEUISR_LO));
467 case DESC_HDR_SEL0_RNG:
468 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
469 in_be32(priv->reg + TALITOS_RNGUISR),
470 in_be32(priv->reg + TALITOS_RNGUISR_LO));
472 case DESC_HDR_SEL0_PKEU:
473 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
474 in_be32(priv->reg + TALITOS_PKEUISR),
475 in_be32(priv->reg + TALITOS_PKEUISR_LO));
477 case DESC_HDR_SEL0_AESU:
478 dev_err(dev, "AESUISR 0x%08x_%08x\n",
479 in_be32(priv->reg + TALITOS_AESUISR),
480 in_be32(priv->reg + TALITOS_AESUISR_LO));
482 case DESC_HDR_SEL0_CRCU:
483 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
484 in_be32(priv->reg + TALITOS_CRCUISR),
485 in_be32(priv->reg + TALITOS_CRCUISR_LO));
487 case DESC_HDR_SEL0_KEU:
488 dev_err(dev, "KEUISR 0x%08x_%08x\n",
489 in_be32(priv->reg + TALITOS_KEUISR),
490 in_be32(priv->reg + TALITOS_KEUISR_LO));
494 switch (desc->hdr & DESC_HDR_SEL1_MASK) {
495 case DESC_HDR_SEL1_MDEUA:
496 case DESC_HDR_SEL1_MDEUB:
497 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
498 in_be32(priv->reg + TALITOS_MDEUISR),
499 in_be32(priv->reg + TALITOS_MDEUISR_LO));
501 case DESC_HDR_SEL1_CRCU:
502 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
503 in_be32(priv->reg + TALITOS_CRCUISR),
504 in_be32(priv->reg + TALITOS_CRCUISR_LO));
508 for (i = 0; i < 8; i++)
509 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
510 in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
511 in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
515 * recover from error interrupts
517 static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
519 struct device *dev = (struct device *)data;
520 struct talitos_private *priv = dev_get_drvdata(dev);
521 unsigned int timeout = TALITOS_TIMEOUT;
522 int ch, error, reset_dev = 0, reset_ch = 0;
525 for (ch = 0; ch < priv->num_channels; ch++) {
526 /* skip channels without errors */
527 if (!(isr & (1 << (ch * 2 + 1))))
532 v = in_be32(priv->reg + TALITOS_CCPSR(ch));
533 v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
535 if (v_lo & TALITOS_CCPSR_LO_DOF) {
536 dev_err(dev, "double fetch fifo overflow error\n");
540 if (v_lo & TALITOS_CCPSR_LO_SOF) {
541 /* h/w dropped descriptor */
542 dev_err(dev, "single fetch fifo overflow error\n");
545 if (v_lo & TALITOS_CCPSR_LO_MDTE)
546 dev_err(dev, "master data transfer error\n");
547 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
548 dev_err(dev, "s/g data length zero error\n");
549 if (v_lo & TALITOS_CCPSR_LO_FPZ)
550 dev_err(dev, "fetch pointer zero error\n");
551 if (v_lo & TALITOS_CCPSR_LO_IDH)
552 dev_err(dev, "illegal descriptor header error\n");
553 if (v_lo & TALITOS_CCPSR_LO_IEU)
554 dev_err(dev, "invalid execution unit error\n");
555 if (v_lo & TALITOS_CCPSR_LO_EU)
556 report_eu_error(dev, ch, current_desc(dev, ch));
557 if (v_lo & TALITOS_CCPSR_LO_GB)
558 dev_err(dev, "gather boundary error\n");
559 if (v_lo & TALITOS_CCPSR_LO_GRL)
560 dev_err(dev, "gather return/length error\n");
561 if (v_lo & TALITOS_CCPSR_LO_SB)
562 dev_err(dev, "scatter boundary error\n");
563 if (v_lo & TALITOS_CCPSR_LO_SRL)
564 dev_err(dev, "scatter return/length error\n");
566 flush_channel(dev, ch, error, reset_ch);
569 reset_channel(dev, ch);
571 setbits32(priv->reg + TALITOS_CCCR(ch),
573 setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
574 while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
575 TALITOS_CCCR_CONT) && --timeout)
578 dev_err(dev, "failed to restart channel %d\n",
584 if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
585 dev_err(dev, "done overflow, internal time out, or rngu error: "
586 "ISR 0x%08x_%08x\n", isr, isr_lo);
588 /* purge request queues */
589 for (ch = 0; ch < priv->num_channels; ch++)
590 flush_channel(dev, ch, -EIO, 1);
592 /* reset and reinitialize the device */
597 static irqreturn_t talitos_interrupt(int irq, void *data)
599 struct device *dev = data;
600 struct talitos_private *priv = dev_get_drvdata(dev);
603 isr = in_be32(priv->reg + TALITOS_ISR);
604 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
605 /* Acknowledge interrupt */
606 out_be32(priv->reg + TALITOS_ICR, isr);
607 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
609 if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
610 talitos_error((unsigned long)data, isr, isr_lo);
612 if (likely(isr & TALITOS_ISR_CHDONE)) {
613 /* mask further done interrupts. */
614 clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
615 /* done_task will unmask done interrupts at exit */
616 tasklet_schedule(&priv->done_task);
619 return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
625 static int talitos_rng_data_present(struct hwrng *rng, int wait)
627 struct device *dev = (struct device *)rng->priv;
628 struct talitos_private *priv = dev_get_drvdata(dev);
632 for (i = 0; i < 20; i++) {
633 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
634 TALITOS_RNGUSR_LO_OFL;
643 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
645 struct device *dev = (struct device *)rng->priv;
646 struct talitos_private *priv = dev_get_drvdata(dev);
648 /* rng fifo requires 64-bit accesses */
649 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
650 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
655 static int talitos_rng_init(struct hwrng *rng)
657 struct device *dev = (struct device *)rng->priv;
658 struct talitos_private *priv = dev_get_drvdata(dev);
659 unsigned int timeout = TALITOS_TIMEOUT;
661 setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
662 while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
666 dev_err(dev, "failed to reset rng hw\n");
670 /* start generating */
671 setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
676 static int talitos_register_rng(struct device *dev)
678 struct talitos_private *priv = dev_get_drvdata(dev);
680 priv->rng.name = dev_driver_string(dev),
681 priv->rng.init = talitos_rng_init,
682 priv->rng.data_present = talitos_rng_data_present,
683 priv->rng.data_read = talitos_rng_data_read,
684 priv->rng.priv = (unsigned long)dev;
686 return hwrng_register(&priv->rng);
689 static void talitos_unregister_rng(struct device *dev)
691 struct talitos_private *priv = dev_get_drvdata(dev);
693 hwrng_unregister(&priv->rng);
699 #define TALITOS_CRA_PRIORITY 3000
700 #define TALITOS_MAX_KEY_SIZE 64
701 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
703 #define MD5_BLOCK_SIZE 64
708 __be32 desc_hdr_template;
709 u8 key[TALITOS_MAX_KEY_SIZE];
710 u8 iv[TALITOS_MAX_IV_LENGTH];
712 unsigned int enckeylen;
713 unsigned int authkeylen;
714 unsigned int authsize;
717 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
718 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
720 struct talitos_ahash_req_ctx {
721 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
722 unsigned int hw_context_size;
723 u8 buf[HASH_MAX_BLOCK_SIZE];
724 u8 bufnext[HASH_MAX_BLOCK_SIZE];
728 unsigned int to_hash_later;
730 struct scatterlist bufsl[2];
731 struct scatterlist *psrc;
734 static int aead_setauthsize(struct crypto_aead *authenc,
735 unsigned int authsize)
737 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
739 ctx->authsize = authsize;
744 static int aead_setkey(struct crypto_aead *authenc,
745 const u8 *key, unsigned int keylen)
747 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
748 struct rtattr *rta = (void *)key;
749 struct crypto_authenc_key_param *param;
750 unsigned int authkeylen;
751 unsigned int enckeylen;
753 if (!RTA_OK(rta, keylen))
756 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
759 if (RTA_PAYLOAD(rta) < sizeof(*param))
762 param = RTA_DATA(rta);
763 enckeylen = be32_to_cpu(param->enckeylen);
765 key += RTA_ALIGN(rta->rta_len);
766 keylen -= RTA_ALIGN(rta->rta_len);
768 if (keylen < enckeylen)
771 authkeylen = keylen - enckeylen;
773 if (keylen > TALITOS_MAX_KEY_SIZE)
776 memcpy(&ctx->key, key, keylen);
778 ctx->keylen = keylen;
779 ctx->enckeylen = enckeylen;
780 ctx->authkeylen = authkeylen;
785 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
790 * talitos_edesc - s/w-extended descriptor
791 * @src_nents: number of segments in input scatterlist
792 * @dst_nents: number of segments in output scatterlist
793 * @dma_len: length of dma mapped link_tbl space
794 * @dma_link_tbl: bus physical address of link_tbl
795 * @desc: h/w descriptor
796 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
798 * if decrypting (with authcheck), or either one of src_nents or dst_nents
799 * is greater than 1, an integrity check value is concatenated to the end
802 struct talitos_edesc {
808 dma_addr_t dma_link_tbl;
809 struct talitos_desc desc;
810 struct talitos_ptr link_tbl[0];
813 static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
814 unsigned int nents, enum dma_data_direction dir,
817 if (unlikely(chained))
819 dma_map_sg(dev, sg, 1, dir);
820 sg = scatterwalk_sg_next(sg);
823 dma_map_sg(dev, sg, nents, dir);
827 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
828 enum dma_data_direction dir)
831 dma_unmap_sg(dev, sg, 1, dir);
832 sg = scatterwalk_sg_next(sg);
836 static void talitos_sg_unmap(struct device *dev,
837 struct talitos_edesc *edesc,
838 struct scatterlist *src,
839 struct scatterlist *dst)
841 unsigned int src_nents = edesc->src_nents ? : 1;
842 unsigned int dst_nents = edesc->dst_nents ? : 1;
845 if (edesc->src_is_chained)
846 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
848 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
851 if (edesc->dst_is_chained)
852 talitos_unmap_sg_chain(dev, dst,
855 dma_unmap_sg(dev, dst, dst_nents,
859 if (edesc->src_is_chained)
860 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
862 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
865 static void ipsec_esp_unmap(struct device *dev,
866 struct talitos_edesc *edesc,
867 struct aead_request *areq)
869 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
870 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
871 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
872 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
874 dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
876 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
879 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
884 * ipsec_esp descriptor callbacks
886 static void ipsec_esp_encrypt_done(struct device *dev,
887 struct talitos_desc *desc, void *context,
890 struct aead_request *areq = context;
891 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
892 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
893 struct talitos_edesc *edesc;
894 struct scatterlist *sg;
897 edesc = container_of(desc, struct talitos_edesc, desc);
899 ipsec_esp_unmap(dev, edesc, areq);
901 /* copy the generated ICV to dst */
902 if (edesc->dma_len) {
903 icvdata = &edesc->link_tbl[edesc->src_nents +
904 edesc->dst_nents + 2];
905 sg = sg_last(areq->dst, edesc->dst_nents);
906 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
907 icvdata, ctx->authsize);
912 aead_request_complete(areq, err);
915 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
916 struct talitos_desc *desc,
917 void *context, int err)
919 struct aead_request *req = context;
920 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
921 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
922 struct talitos_edesc *edesc;
923 struct scatterlist *sg;
926 edesc = container_of(desc, struct talitos_edesc, desc);
928 ipsec_esp_unmap(dev, edesc, req);
933 icvdata = &edesc->link_tbl[edesc->src_nents +
934 edesc->dst_nents + 2];
936 icvdata = &edesc->link_tbl[0];
938 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
939 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
940 ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
945 aead_request_complete(req, err);
948 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
949 struct talitos_desc *desc,
950 void *context, int err)
952 struct aead_request *req = context;
953 struct talitos_edesc *edesc;
955 edesc = container_of(desc, struct talitos_edesc, desc);
957 ipsec_esp_unmap(dev, edesc, req);
959 /* check ICV auth status */
960 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
961 DESC_HDR_LO_ICCR1_PASS))
966 aead_request_complete(req, err);
970 * convert scatterlist to SEC h/w link table format
971 * stop at cryptlen bytes
973 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
974 int cryptlen, struct talitos_ptr *link_tbl_ptr)
979 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
980 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
981 link_tbl_ptr->j_extent = 0;
983 cryptlen -= sg_dma_len(sg);
984 sg = scatterwalk_sg_next(sg);
987 /* adjust (decrease) last one (or two) entry's len to cryptlen */
989 while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
990 /* Empty this entry, and move to previous one */
991 cryptlen += be16_to_cpu(link_tbl_ptr->len);
992 link_tbl_ptr->len = 0;
996 link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
999 /* tag end of link table */
1000 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1006 * fill in and submit ipsec_esp descriptor
1008 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1010 void (*callback) (struct device *dev,
1011 struct talitos_desc *desc,
1012 void *context, int error))
1014 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1015 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1016 struct device *dev = ctx->dev;
1017 struct talitos_desc *desc = &edesc->desc;
1018 unsigned int cryptlen = areq->cryptlen;
1019 unsigned int authsize = ctx->authsize;
1020 unsigned int ivsize = crypto_aead_ivsize(aead);
1022 int sg_link_tbl_len;
1025 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1028 map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
1029 sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
1031 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
1035 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1036 (char *)&ctx->key + ctx->authkeylen, 0,
1041 * map and adjust cipher len to aead request cryptlen.
1042 * extent is bytes of HMAC postpended to ciphertext,
1043 * typically 12 for ipsec
1045 desc->ptr[4].len = cpu_to_be16(cryptlen);
1046 desc->ptr[4].j_extent = authsize;
1048 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1049 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1051 edesc->src_is_chained);
1053 if (sg_count == 1) {
1054 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1056 sg_link_tbl_len = cryptlen;
1058 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1059 sg_link_tbl_len = cryptlen + authsize;
1061 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1062 &edesc->link_tbl[0]);
1064 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1065 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1066 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1070 /* Only one segment now, so no link tbl needed */
1071 to_talitos_ptr(&desc->ptr[4],
1072 sg_dma_address(areq->src));
1077 desc->ptr[5].len = cpu_to_be16(cryptlen);
1078 desc->ptr[5].j_extent = authsize;
1080 if (areq->src != areq->dst)
1081 sg_count = talitos_map_sg(dev, areq->dst,
1082 edesc->dst_nents ? : 1,
1084 edesc->dst_is_chained);
1086 if (sg_count == 1) {
1087 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1089 struct talitos_ptr *link_tbl_ptr =
1090 &edesc->link_tbl[edesc->src_nents + 1];
1092 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1093 (edesc->src_nents + 1) *
1094 sizeof(struct talitos_ptr));
1095 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1098 /* Add an entry to the link table for ICV data */
1099 link_tbl_ptr += sg_count - 1;
1100 link_tbl_ptr->j_extent = 0;
1103 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1104 link_tbl_ptr->len = cpu_to_be16(authsize);
1106 /* icv data follows link tables */
1107 to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
1108 (edesc->src_nents + edesc->dst_nents + 2) *
1109 sizeof(struct talitos_ptr));
1110 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1111 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1112 edesc->dma_len, DMA_BIDIRECTIONAL);
1116 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
1119 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1120 if (ret != -EINPROGRESS) {
1121 ipsec_esp_unmap(dev, edesc, areq);
1128 * derive number of elements in scatterlist
1130 static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
1132 struct scatterlist *sg = sg_list;
1136 while (nbytes > 0) {
1138 nbytes -= sg->length;
1139 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1141 sg = scatterwalk_sg_next(sg);
1148 * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
1150 * @nents: Number of SG entries
1151 * @buf: Where to copy to
1152 * @buflen: The number of bytes to copy
1153 * @skip: The number of bytes to skip before copying.
1154 * Note: skip + buflen should equal SG total size.
1156 * Returns the number of copied bytes.
1159 static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
1160 void *buf, size_t buflen, unsigned int skip)
1162 unsigned int offset = 0;
1163 unsigned int boffset = 0;
1164 struct sg_mapping_iter miter;
1165 unsigned long flags;
1166 unsigned int sg_flags = SG_MITER_ATOMIC;
1167 size_t total_buffer = buflen + skip;
1169 sg_flags |= SG_MITER_FROM_SG;
1171 sg_miter_start(&miter, sgl, nents, sg_flags);
1173 local_irq_save(flags);
1175 while (sg_miter_next(&miter) && offset < total_buffer) {
1177 unsigned int ignore;
1179 if ((offset + miter.length) > skip) {
1180 if (offset < skip) {
1181 /* Copy part of this segment */
1182 ignore = skip - offset;
1183 len = miter.length - ignore;
1184 if (boffset + len > buflen)
1185 len = buflen - boffset;
1186 memcpy(buf + boffset, miter.addr + ignore, len);
1188 /* Copy all of this segment (up to buflen) */
1190 if (boffset + len > buflen)
1191 len = buflen - boffset;
1192 memcpy(buf + boffset, miter.addr, len);
1196 offset += miter.length;
1199 sg_miter_stop(&miter);
1201 local_irq_restore(flags);
1206 * allocate and map the extended descriptor
1208 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1209 struct scatterlist *src,
1210 struct scatterlist *dst,
1212 unsigned int cryptlen,
1213 unsigned int authsize,
1217 struct talitos_edesc *edesc;
1218 int src_nents, dst_nents, alloc_len, dma_len;
1219 int src_chained, dst_chained = 0;
1220 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1223 if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1224 dev_err(dev, "length exceeds h/w max limit\n");
1225 return ERR_PTR(-EINVAL);
1228 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1229 src_nents = (src_nents == 1) ? 0 : src_nents;
1235 dst_nents = src_nents;
1237 dst_nents = sg_count(dst, cryptlen + authsize,
1239 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1244 * allocate space for base edesc plus the link tables,
1245 * allowing for two separate entries for ICV and generated ICV (+ 2),
1246 * and the ICV data itself
1248 alloc_len = sizeof(struct talitos_edesc);
1249 if (src_nents || dst_nents) {
1250 dma_len = (src_nents + dst_nents + 2) *
1251 sizeof(struct talitos_ptr) + authsize;
1252 alloc_len += dma_len;
1255 alloc_len += icv_stashing ? authsize : 0;
1258 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1260 dev_err(dev, "could not allocate edescriptor\n");
1261 return ERR_PTR(-ENOMEM);
1264 edesc->src_nents = src_nents;
1265 edesc->dst_nents = dst_nents;
1266 edesc->src_is_chained = src_chained;
1267 edesc->dst_is_chained = dst_chained;
1268 edesc->dma_len = dma_len;
1270 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1277 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
1280 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1281 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1283 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1284 areq->cryptlen, ctx->authsize, icv_stashing,
1288 static int aead_encrypt(struct aead_request *req)
1290 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1291 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1292 struct talitos_edesc *edesc;
1294 /* allocate extended descriptor */
1295 edesc = aead_edesc_alloc(req, 0);
1297 return PTR_ERR(edesc);
1300 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1302 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1305 static int aead_decrypt(struct aead_request *req)
1307 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1308 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1309 unsigned int authsize = ctx->authsize;
1310 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1311 struct talitos_edesc *edesc;
1312 struct scatterlist *sg;
1315 req->cryptlen -= authsize;
1317 /* allocate extended descriptor */
1318 edesc = aead_edesc_alloc(req, 1);
1320 return PTR_ERR(edesc);
1322 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1323 ((!edesc->src_nents && !edesc->dst_nents) ||
1324 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1326 /* decrypt and check the ICV */
1327 edesc->desc.hdr = ctx->desc_hdr_template |
1328 DESC_HDR_DIR_INBOUND |
1329 DESC_HDR_MODE1_MDEU_CICV;
1331 /* reset integrity check result bits */
1332 edesc->desc.hdr_lo = 0;
1334 return ipsec_esp(edesc, req, NULL, 0,
1335 ipsec_esp_decrypt_hwauth_done);
1339 /* Have to check the ICV with software */
1340 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1342 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1344 icvdata = &edesc->link_tbl[edesc->src_nents +
1345 edesc->dst_nents + 2];
1347 icvdata = &edesc->link_tbl[0];
1349 sg = sg_last(req->src, edesc->src_nents ? : 1);
1351 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1354 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
1357 static int aead_givencrypt(struct aead_givcrypt_request *req)
1359 struct aead_request *areq = &req->areq;
1360 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1361 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1362 struct talitos_edesc *edesc;
1364 /* allocate extended descriptor */
1365 edesc = aead_edesc_alloc(areq, 0);
1367 return PTR_ERR(edesc);
1370 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1372 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1373 /* avoid consecutive packets going out with same IV */
1374 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1376 return ipsec_esp(edesc, areq, req->giv, req->seq,
1377 ipsec_esp_encrypt_done);
1380 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1381 const u8 *key, unsigned int keylen)
1383 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1384 struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
1386 if (keylen > TALITOS_MAX_KEY_SIZE)
1389 if (keylen < alg->min_keysize || keylen > alg->max_keysize)
1392 memcpy(&ctx->key, key, keylen);
1393 ctx->keylen = keylen;
1398 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1402 static void common_nonsnoop_unmap(struct device *dev,
1403 struct talitos_edesc *edesc,
1404 struct ablkcipher_request *areq)
1406 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1407 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1408 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1410 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
1413 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1417 static void ablkcipher_done(struct device *dev,
1418 struct talitos_desc *desc, void *context,
1421 struct ablkcipher_request *areq = context;
1422 struct talitos_edesc *edesc;
1424 edesc = container_of(desc, struct talitos_edesc, desc);
1426 common_nonsnoop_unmap(dev, edesc, areq);
1430 areq->base.complete(&areq->base, err);
1433 static int common_nonsnoop(struct talitos_edesc *edesc,
1434 struct ablkcipher_request *areq,
1436 void (*callback) (struct device *dev,
1437 struct talitos_desc *desc,
1438 void *context, int error))
1440 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1441 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1442 struct device *dev = ctx->dev;
1443 struct talitos_desc *desc = &edesc->desc;
1444 unsigned int cryptlen = areq->nbytes;
1445 unsigned int ivsize;
1448 /* first DWORD empty */
1449 desc->ptr[0].len = 0;
1450 to_talitos_ptr(&desc->ptr[0], 0);
1451 desc->ptr[0].j_extent = 0;
1454 ivsize = crypto_ablkcipher_ivsize(cipher);
1455 map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
1459 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1460 (char *)&ctx->key, 0, DMA_TO_DEVICE);
1465 desc->ptr[3].len = cpu_to_be16(cryptlen);
1466 desc->ptr[3].j_extent = 0;
1468 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1469 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1471 edesc->src_is_chained);
1473 if (sg_count == 1) {
1474 to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
1476 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1477 &edesc->link_tbl[0]);
1479 to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1480 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1481 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1485 /* Only one segment now, so no link tbl needed */
1486 to_talitos_ptr(&desc->ptr[3],
1487 sg_dma_address(areq->src));
1492 desc->ptr[4].len = cpu_to_be16(cryptlen);
1493 desc->ptr[4].j_extent = 0;
1495 if (areq->src != areq->dst)
1496 sg_count = talitos_map_sg(dev, areq->dst,
1497 edesc->dst_nents ? : 1,
1499 edesc->dst_is_chained);
1501 if (sg_count == 1) {
1502 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
1504 struct talitos_ptr *link_tbl_ptr =
1505 &edesc->link_tbl[edesc->src_nents + 1];
1507 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1508 (edesc->src_nents + 1) *
1509 sizeof(struct talitos_ptr));
1510 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1511 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1513 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1514 edesc->dma_len, DMA_BIDIRECTIONAL);
1518 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1521 /* last DWORD empty */
1522 desc->ptr[6].len = 0;
1523 to_talitos_ptr(&desc->ptr[6], 0);
1524 desc->ptr[6].j_extent = 0;
1526 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1527 if (ret != -EINPROGRESS) {
1528 common_nonsnoop_unmap(dev, edesc, areq);
1534 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1537 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1538 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1540 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1541 areq->nbytes, 0, 0, areq->base.flags);
1544 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1546 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1547 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1548 struct talitos_edesc *edesc;
1550 /* allocate extended descriptor */
1551 edesc = ablkcipher_edesc_alloc(areq);
1553 return PTR_ERR(edesc);
1556 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1558 return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1561 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1563 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1564 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1565 struct talitos_edesc *edesc;
1567 /* allocate extended descriptor */
1568 edesc = ablkcipher_edesc_alloc(areq);
1570 return PTR_ERR(edesc);
1572 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1574 return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1577 static void common_nonsnoop_hash_unmap(struct device *dev,
1578 struct talitos_edesc *edesc,
1579 struct ahash_request *areq)
1581 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1583 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1585 /* When using hashctx-in, must unmap it. */
1586 if (edesc->desc.ptr[1].len)
1587 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1590 if (edesc->desc.ptr[2].len)
1591 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1594 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
1597 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1602 static void ahash_done(struct device *dev,
1603 struct talitos_desc *desc, void *context,
1606 struct ahash_request *areq = context;
1607 struct talitos_edesc *edesc =
1608 container_of(desc, struct talitos_edesc, desc);
1609 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1611 if (!req_ctx->last && req_ctx->to_hash_later) {
1612 /* Position any partial block for next update/final/finup */
1613 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1614 req_ctx->nbuf = req_ctx->to_hash_later;
1616 common_nonsnoop_hash_unmap(dev, edesc, areq);
1620 areq->base.complete(&areq->base, err);
1623 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1624 struct ahash_request *areq, unsigned int length,
1625 void (*callback) (struct device *dev,
1626 struct talitos_desc *desc,
1627 void *context, int error))
1629 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1630 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1631 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1632 struct device *dev = ctx->dev;
1633 struct talitos_desc *desc = &edesc->desc;
1636 /* first DWORD empty */
1637 desc->ptr[0] = zero_entry;
1639 /* hash context in */
1640 if (!req_ctx->first || req_ctx->swinit) {
1641 map_single_talitos_ptr(dev, &desc->ptr[1],
1642 req_ctx->hw_context_size,
1643 (char *)req_ctx->hw_context, 0,
1645 req_ctx->swinit = 0;
1647 desc->ptr[1] = zero_entry;
1648 /* Indicate next op is not the first. */
1654 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1655 (char *)&ctx->key, 0, DMA_TO_DEVICE);
1657 desc->ptr[2] = zero_entry;
1662 desc->ptr[3].len = cpu_to_be16(length);
1663 desc->ptr[3].j_extent = 0;
1665 sg_count = talitos_map_sg(dev, req_ctx->psrc,
1666 edesc->src_nents ? : 1,
1668 edesc->src_is_chained);
1670 if (sg_count == 1) {
1671 to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
1673 sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
1674 &edesc->link_tbl[0]);
1676 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1677 to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1678 dma_sync_single_for_device(ctx->dev,
1679 edesc->dma_link_tbl,
1683 /* Only one segment now, so no link tbl needed */
1684 to_talitos_ptr(&desc->ptr[3],
1685 sg_dma_address(req_ctx->psrc));
1689 /* fifth DWORD empty */
1690 desc->ptr[4] = zero_entry;
1692 /* hash/HMAC out -or- hash context out */
1694 map_single_talitos_ptr(dev, &desc->ptr[5],
1695 crypto_ahash_digestsize(tfm),
1696 areq->result, 0, DMA_FROM_DEVICE);
1698 map_single_talitos_ptr(dev, &desc->ptr[5],
1699 req_ctx->hw_context_size,
1700 req_ctx->hw_context, 0, DMA_FROM_DEVICE);
1702 /* last DWORD empty */
1703 desc->ptr[6] = zero_entry;
1705 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1706 if (ret != -EINPROGRESS) {
1707 common_nonsnoop_hash_unmap(dev, edesc, areq);
1713 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1714 unsigned int nbytes)
1716 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1717 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1718 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1720 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
1721 nbytes, 0, 0, areq->base.flags);
1724 static int ahash_init(struct ahash_request *areq)
1726 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1727 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1729 /* Initialize the context */
1731 req_ctx->first = 1; /* first indicates h/w must init its context */
1732 req_ctx->swinit = 0; /* assume h/w init of context */
1733 req_ctx->hw_context_size =
1734 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1735 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1736 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1742 * on h/w without explicit sha224 support, we initialize h/w context
1743 * manually with sha224 constants, and tell it to run sha256.
1745 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1747 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1750 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1752 req_ctx->hw_context[0] = SHA224_H0;
1753 req_ctx->hw_context[1] = SHA224_H1;
1754 req_ctx->hw_context[2] = SHA224_H2;
1755 req_ctx->hw_context[3] = SHA224_H3;
1756 req_ctx->hw_context[4] = SHA224_H4;
1757 req_ctx->hw_context[5] = SHA224_H5;
1758 req_ctx->hw_context[6] = SHA224_H6;
1759 req_ctx->hw_context[7] = SHA224_H7;
1761 /* init 64-bit count */
1762 req_ctx->hw_context[8] = 0;
1763 req_ctx->hw_context[9] = 0;
1768 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1770 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1771 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1772 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1773 struct talitos_edesc *edesc;
1774 unsigned int blocksize =
1775 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1776 unsigned int nbytes_to_hash;
1777 unsigned int to_hash_later;
1781 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1782 /* Buffer up to one whole block */
1783 sg_copy_to_buffer(areq->src,
1784 sg_count(areq->src, nbytes, &chained),
1785 req_ctx->buf + req_ctx->nbuf, nbytes);
1786 req_ctx->nbuf += nbytes;
1790 /* At least (blocksize + 1) bytes are available to hash */
1791 nbytes_to_hash = nbytes + req_ctx->nbuf;
1792 to_hash_later = nbytes_to_hash & (blocksize - 1);
1796 else if (to_hash_later)
1797 /* There is a partial block. Hash the full block(s) now */
1798 nbytes_to_hash -= to_hash_later;
1800 /* Keep one block buffered */
1801 nbytes_to_hash -= blocksize;
1802 to_hash_later = blocksize;
1805 /* Chain in any previously buffered data */
1806 if (req_ctx->nbuf) {
1807 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1808 sg_init_table(req_ctx->bufsl, nsg);
1809 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1811 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1812 req_ctx->psrc = req_ctx->bufsl;
1814 req_ctx->psrc = areq->src;
1816 if (to_hash_later) {
1817 int nents = sg_count(areq->src, nbytes, &chained);
1818 sg_copy_end_to_buffer(areq->src, nents,
1821 nbytes - to_hash_later);
1823 req_ctx->to_hash_later = to_hash_later;
1825 /* Allocate extended descriptor */
1826 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1828 return PTR_ERR(edesc);
1830 edesc->desc.hdr = ctx->desc_hdr_template;
1832 /* On last one, request SEC to pad; otherwise continue */
1834 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1836 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1838 /* request SEC to INIT hash. */
1839 if (req_ctx->first && !req_ctx->swinit)
1840 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1842 /* When the tfm context has a keylen, it's an HMAC.
1843 * A first or last (ie. not middle) descriptor must request HMAC.
1845 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1846 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1848 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1852 static int ahash_update(struct ahash_request *areq)
1854 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1858 return ahash_process_req(areq, areq->nbytes);
1861 static int ahash_final(struct ahash_request *areq)
1863 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1867 return ahash_process_req(areq, 0);
1870 static int ahash_finup(struct ahash_request *areq)
1872 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1876 return ahash_process_req(areq, areq->nbytes);
1879 static int ahash_digest(struct ahash_request *areq)
1881 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1882 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1887 return ahash_process_req(areq, areq->nbytes);
1890 struct talitos_alg_template {
1893 struct crypto_alg crypto;
1894 struct ahash_alg hash;
1896 __be32 desc_hdr_template;
1899 static struct talitos_alg_template driver_algs[] = {
1900 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
1901 { .type = CRYPTO_ALG_TYPE_AEAD,
1903 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1904 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1905 .cra_blocksize = AES_BLOCK_SIZE,
1906 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1907 .cra_type = &crypto_aead_type,
1909 .setkey = aead_setkey,
1910 .setauthsize = aead_setauthsize,
1911 .encrypt = aead_encrypt,
1912 .decrypt = aead_decrypt,
1913 .givencrypt = aead_givencrypt,
1914 .geniv = "<built-in>",
1915 .ivsize = AES_BLOCK_SIZE,
1916 .maxauthsize = SHA1_DIGEST_SIZE,
1919 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1920 DESC_HDR_SEL0_AESU |
1921 DESC_HDR_MODE0_AESU_CBC |
1922 DESC_HDR_SEL1_MDEUA |
1923 DESC_HDR_MODE1_MDEU_INIT |
1924 DESC_HDR_MODE1_MDEU_PAD |
1925 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1927 { .type = CRYPTO_ALG_TYPE_AEAD,
1929 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1930 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1931 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1932 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1933 .cra_type = &crypto_aead_type,
1935 .setkey = aead_setkey,
1936 .setauthsize = aead_setauthsize,
1937 .encrypt = aead_encrypt,
1938 .decrypt = aead_decrypt,
1939 .givencrypt = aead_givencrypt,
1940 .geniv = "<built-in>",
1941 .ivsize = DES3_EDE_BLOCK_SIZE,
1942 .maxauthsize = SHA1_DIGEST_SIZE,
1945 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1947 DESC_HDR_MODE0_DEU_CBC |
1948 DESC_HDR_MODE0_DEU_3DES |
1949 DESC_HDR_SEL1_MDEUA |
1950 DESC_HDR_MODE1_MDEU_INIT |
1951 DESC_HDR_MODE1_MDEU_PAD |
1952 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1954 { .type = CRYPTO_ALG_TYPE_AEAD,
1956 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1957 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
1958 .cra_blocksize = AES_BLOCK_SIZE,
1959 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1960 .cra_type = &crypto_aead_type,
1962 .setkey = aead_setkey,
1963 .setauthsize = aead_setauthsize,
1964 .encrypt = aead_encrypt,
1965 .decrypt = aead_decrypt,
1966 .givencrypt = aead_givencrypt,
1967 .geniv = "<built-in>",
1968 .ivsize = AES_BLOCK_SIZE,
1969 .maxauthsize = SHA256_DIGEST_SIZE,
1972 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1973 DESC_HDR_SEL0_AESU |
1974 DESC_HDR_MODE0_AESU_CBC |
1975 DESC_HDR_SEL1_MDEUA |
1976 DESC_HDR_MODE1_MDEU_INIT |
1977 DESC_HDR_MODE1_MDEU_PAD |
1978 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1980 { .type = CRYPTO_ALG_TYPE_AEAD,
1982 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
1983 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
1984 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1985 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1986 .cra_type = &crypto_aead_type,
1988 .setkey = aead_setkey,
1989 .setauthsize = aead_setauthsize,
1990 .encrypt = aead_encrypt,
1991 .decrypt = aead_decrypt,
1992 .givencrypt = aead_givencrypt,
1993 .geniv = "<built-in>",
1994 .ivsize = DES3_EDE_BLOCK_SIZE,
1995 .maxauthsize = SHA256_DIGEST_SIZE,
1998 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2000 DESC_HDR_MODE0_DEU_CBC |
2001 DESC_HDR_MODE0_DEU_3DES |
2002 DESC_HDR_SEL1_MDEUA |
2003 DESC_HDR_MODE1_MDEU_INIT |
2004 DESC_HDR_MODE1_MDEU_PAD |
2005 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2007 { .type = CRYPTO_ALG_TYPE_AEAD,
2009 .cra_name = "authenc(hmac(md5),cbc(aes))",
2010 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2011 .cra_blocksize = AES_BLOCK_SIZE,
2012 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2013 .cra_type = &crypto_aead_type,
2015 .setkey = aead_setkey,
2016 .setauthsize = aead_setauthsize,
2017 .encrypt = aead_encrypt,
2018 .decrypt = aead_decrypt,
2019 .givencrypt = aead_givencrypt,
2020 .geniv = "<built-in>",
2021 .ivsize = AES_BLOCK_SIZE,
2022 .maxauthsize = MD5_DIGEST_SIZE,
2025 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2026 DESC_HDR_SEL0_AESU |
2027 DESC_HDR_MODE0_AESU_CBC |
2028 DESC_HDR_SEL1_MDEUA |
2029 DESC_HDR_MODE1_MDEU_INIT |
2030 DESC_HDR_MODE1_MDEU_PAD |
2031 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2033 { .type = CRYPTO_ALG_TYPE_AEAD,
2035 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2036 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2037 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2038 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2039 .cra_type = &crypto_aead_type,
2041 .setkey = aead_setkey,
2042 .setauthsize = aead_setauthsize,
2043 .encrypt = aead_encrypt,
2044 .decrypt = aead_decrypt,
2045 .givencrypt = aead_givencrypt,
2046 .geniv = "<built-in>",
2047 .ivsize = DES3_EDE_BLOCK_SIZE,
2048 .maxauthsize = MD5_DIGEST_SIZE,
2051 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2053 DESC_HDR_MODE0_DEU_CBC |
2054 DESC_HDR_MODE0_DEU_3DES |
2055 DESC_HDR_SEL1_MDEUA |
2056 DESC_HDR_MODE1_MDEU_INIT |
2057 DESC_HDR_MODE1_MDEU_PAD |
2058 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2060 /* ABLKCIPHER algorithms. */
2061 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2063 .cra_name = "cbc(aes)",
2064 .cra_driver_name = "cbc-aes-talitos",
2065 .cra_blocksize = AES_BLOCK_SIZE,
2066 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2068 .cra_type = &crypto_ablkcipher_type,
2070 .setkey = ablkcipher_setkey,
2071 .encrypt = ablkcipher_encrypt,
2072 .decrypt = ablkcipher_decrypt,
2074 .min_keysize = AES_MIN_KEY_SIZE,
2075 .max_keysize = AES_MAX_KEY_SIZE,
2076 .ivsize = AES_BLOCK_SIZE,
2079 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2080 DESC_HDR_SEL0_AESU |
2081 DESC_HDR_MODE0_AESU_CBC,
2083 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2085 .cra_name = "cbc(des3_ede)",
2086 .cra_driver_name = "cbc-3des-talitos",
2087 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2088 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2090 .cra_type = &crypto_ablkcipher_type,
2092 .setkey = ablkcipher_setkey,
2093 .encrypt = ablkcipher_encrypt,
2094 .decrypt = ablkcipher_decrypt,
2096 .min_keysize = DES3_EDE_KEY_SIZE,
2097 .max_keysize = DES3_EDE_KEY_SIZE,
2098 .ivsize = DES3_EDE_BLOCK_SIZE,
2101 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2103 DESC_HDR_MODE0_DEU_CBC |
2104 DESC_HDR_MODE0_DEU_3DES,
2106 /* AHASH algorithms. */
2107 { .type = CRYPTO_ALG_TYPE_AHASH,
2110 .update = ahash_update,
2111 .final = ahash_final,
2112 .finup = ahash_finup,
2113 .digest = ahash_digest,
2114 .halg.digestsize = MD5_DIGEST_SIZE,
2117 .cra_driver_name = "md5-talitos",
2118 .cra_blocksize = MD5_BLOCK_SIZE,
2119 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2121 .cra_type = &crypto_ahash_type
2124 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2125 DESC_HDR_SEL0_MDEUA |
2126 DESC_HDR_MODE0_MDEU_MD5,
2128 { .type = CRYPTO_ALG_TYPE_AHASH,
2131 .update = ahash_update,
2132 .final = ahash_final,
2133 .finup = ahash_finup,
2134 .digest = ahash_digest,
2135 .halg.digestsize = SHA1_DIGEST_SIZE,
2138 .cra_driver_name = "sha1-talitos",
2139 .cra_blocksize = SHA1_BLOCK_SIZE,
2140 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2142 .cra_type = &crypto_ahash_type
2145 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2146 DESC_HDR_SEL0_MDEUA |
2147 DESC_HDR_MODE0_MDEU_SHA1,
2149 { .type = CRYPTO_ALG_TYPE_AHASH,
2152 .update = ahash_update,
2153 .final = ahash_final,
2154 .finup = ahash_finup,
2155 .digest = ahash_digest,
2156 .halg.digestsize = SHA224_DIGEST_SIZE,
2158 .cra_name = "sha224",
2159 .cra_driver_name = "sha224-talitos",
2160 .cra_blocksize = SHA224_BLOCK_SIZE,
2161 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2163 .cra_type = &crypto_ahash_type
2166 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2167 DESC_HDR_SEL0_MDEUA |
2168 DESC_HDR_MODE0_MDEU_SHA224,
2170 { .type = CRYPTO_ALG_TYPE_AHASH,
2173 .update = ahash_update,
2174 .final = ahash_final,
2175 .finup = ahash_finup,
2176 .digest = ahash_digest,
2177 .halg.digestsize = SHA256_DIGEST_SIZE,
2179 .cra_name = "sha256",
2180 .cra_driver_name = "sha256-talitos",
2181 .cra_blocksize = SHA256_BLOCK_SIZE,
2182 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2184 .cra_type = &crypto_ahash_type
2187 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2188 DESC_HDR_SEL0_MDEUA |
2189 DESC_HDR_MODE0_MDEU_SHA256,
2191 { .type = CRYPTO_ALG_TYPE_AHASH,
2194 .update = ahash_update,
2195 .final = ahash_final,
2196 .finup = ahash_finup,
2197 .digest = ahash_digest,
2198 .halg.digestsize = SHA384_DIGEST_SIZE,
2200 .cra_name = "sha384",
2201 .cra_driver_name = "sha384-talitos",
2202 .cra_blocksize = SHA384_BLOCK_SIZE,
2203 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2205 .cra_type = &crypto_ahash_type
2208 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2209 DESC_HDR_SEL0_MDEUB |
2210 DESC_HDR_MODE0_MDEUB_SHA384,
2212 { .type = CRYPTO_ALG_TYPE_AHASH,
2215 .update = ahash_update,
2216 .final = ahash_final,
2217 .finup = ahash_finup,
2218 .digest = ahash_digest,
2219 .halg.digestsize = SHA512_DIGEST_SIZE,
2221 .cra_name = "sha512",
2222 .cra_driver_name = "sha512-talitos",
2223 .cra_blocksize = SHA512_BLOCK_SIZE,
2224 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2226 .cra_type = &crypto_ahash_type
2229 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2230 DESC_HDR_SEL0_MDEUB |
2231 DESC_HDR_MODE0_MDEUB_SHA512,
2235 struct talitos_crypto_alg {
2236 struct list_head entry;
2238 struct talitos_alg_template algt;
2241 static int talitos_cra_init(struct crypto_tfm *tfm)
2243 struct crypto_alg *alg = tfm->__crt_alg;
2244 struct talitos_crypto_alg *talitos_alg;
2245 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2246 struct talitos_private *priv;
2248 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2249 talitos_alg = container_of(__crypto_ahash_alg(alg),
2250 struct talitos_crypto_alg,
2253 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2256 /* update context with ptr to dev */
2257 ctx->dev = talitos_alg->dev;
2259 /* assign SEC channel to tfm in round-robin fashion */
2260 priv = dev_get_drvdata(ctx->dev);
2261 ctx->ch = atomic_inc_return(&priv->last_chan) &
2262 (priv->num_channels - 1);
2264 /* copy descriptor header template value */
2265 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2270 static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2272 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2274 talitos_cra_init(tfm);
2276 /* random first IV */
2277 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2282 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2284 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2286 talitos_cra_init(tfm);
2289 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2290 sizeof(struct talitos_ahash_req_ctx));
2296 * given the alg's descriptor header template, determine whether descriptor
2297 * type and primary/secondary execution units required match the hw
2298 * capabilities description provided in the device tree node.
2300 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2302 struct talitos_private *priv = dev_get_drvdata(dev);
2305 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2306 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2308 if (SECONDARY_EU(desc_hdr_template))
2309 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2310 & priv->exec_units);
2315 static int talitos_remove(struct platform_device *ofdev)
2317 struct device *dev = &ofdev->dev;
2318 struct talitos_private *priv = dev_get_drvdata(dev);
2319 struct talitos_crypto_alg *t_alg, *n;
2322 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2323 switch (t_alg->algt.type) {
2324 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2325 case CRYPTO_ALG_TYPE_AEAD:
2326 crypto_unregister_alg(&t_alg->algt.alg.crypto);
2328 case CRYPTO_ALG_TYPE_AHASH:
2329 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2332 list_del(&t_alg->entry);
2336 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2337 talitos_unregister_rng(dev);
2339 for (i = 0; i < priv->num_channels; i++)
2340 kfree(priv->chan[i].fifo);
2344 if (priv->irq != NO_IRQ) {
2345 free_irq(priv->irq, dev);
2346 irq_dispose_mapping(priv->irq);
2349 tasklet_kill(&priv->done_task);
2353 dev_set_drvdata(dev, NULL);
2360 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2361 struct talitos_alg_template
2364 struct talitos_private *priv = dev_get_drvdata(dev);
2365 struct talitos_crypto_alg *t_alg;
2366 struct crypto_alg *alg;
2368 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2370 return ERR_PTR(-ENOMEM);
2372 t_alg->algt = *template;
2374 switch (t_alg->algt.type) {
2375 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2376 alg = &t_alg->algt.alg.crypto;
2377 alg->cra_init = talitos_cra_init;
2379 case CRYPTO_ALG_TYPE_AEAD:
2380 alg = &t_alg->algt.alg.crypto;
2381 alg->cra_init = talitos_cra_init_aead;
2383 case CRYPTO_ALG_TYPE_AHASH:
2384 alg = &t_alg->algt.alg.hash.halg.base;
2385 alg->cra_init = talitos_cra_init_ahash;
2386 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2387 !strcmp(alg->cra_name, "sha224")) {
2388 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2389 t_alg->algt.desc_hdr_template =
2390 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2391 DESC_HDR_SEL0_MDEUA |
2392 DESC_HDR_MODE0_MDEU_SHA256;
2396 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2397 return ERR_PTR(-EINVAL);
2400 alg->cra_module = THIS_MODULE;
2401 alg->cra_priority = TALITOS_CRA_PRIORITY;
2402 alg->cra_alignmask = 0;
2403 alg->cra_ctxsize = sizeof(struct talitos_ctx);
2410 static int talitos_probe(struct platform_device *ofdev)
2412 struct device *dev = &ofdev->dev;
2413 struct device_node *np = ofdev->dev.of_node;
2414 struct talitos_private *priv;
2415 const unsigned int *prop;
2418 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2422 dev_set_drvdata(dev, priv);
2424 priv->ofdev = ofdev;
2426 tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
2428 INIT_LIST_HEAD(&priv->alg_list);
2430 priv->irq = irq_of_parse_and_map(np, 0);
2432 if (priv->irq == NO_IRQ) {
2433 dev_err(dev, "failed to map irq\n");
2438 /* get the irq line */
2439 err = request_irq(priv->irq, talitos_interrupt, 0,
2440 dev_driver_string(dev), dev);
2442 dev_err(dev, "failed to request irq %d\n", priv->irq);
2443 irq_dispose_mapping(priv->irq);
2448 priv->reg = of_iomap(np, 0);
2450 dev_err(dev, "failed to of_iomap\n");
2455 /* get SEC version capabilities from device tree */
2456 prop = of_get_property(np, "fsl,num-channels", NULL);
2458 priv->num_channels = *prop;
2460 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2462 priv->chfifo_len = *prop;
2464 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2466 priv->exec_units = *prop;
2468 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2470 priv->desc_types = *prop;
2472 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2473 !priv->exec_units || !priv->desc_types) {
2474 dev_err(dev, "invalid property data in device tree node\n");
2479 if (of_device_is_compatible(np, "fsl,sec3.0"))
2480 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2482 if (of_device_is_compatible(np, "fsl,sec2.1"))
2483 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2484 TALITOS_FTR_SHA224_HWINIT;
2486 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2487 priv->num_channels, GFP_KERNEL);
2489 dev_err(dev, "failed to allocate channel management space\n");
2494 for (i = 0; i < priv->num_channels; i++) {
2495 spin_lock_init(&priv->chan[i].head_lock);
2496 spin_lock_init(&priv->chan[i].tail_lock);
2499 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2501 for (i = 0; i < priv->num_channels; i++) {
2502 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2503 priv->fifo_len, GFP_KERNEL);
2504 if (!priv->chan[i].fifo) {
2505 dev_err(dev, "failed to allocate request fifo %d\n", i);
2511 for (i = 0; i < priv->num_channels; i++)
2512 atomic_set(&priv->chan[i].submit_count,
2513 -(priv->chfifo_len - 1));
2515 dma_set_mask(dev, DMA_BIT_MASK(36));
2517 /* reset and initialize the h/w */
2518 err = init_device(dev);
2520 dev_err(dev, "failed to initialize device\n");
2524 /* register the RNG, if available */
2525 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2526 err = talitos_register_rng(dev);
2528 dev_err(dev, "failed to register hwrng: %d\n", err);
2531 dev_info(dev, "hwrng\n");
2534 /* register crypto algorithms the device supports */
2535 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2536 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2537 struct talitos_crypto_alg *t_alg;
2540 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2541 if (IS_ERR(t_alg)) {
2542 err = PTR_ERR(t_alg);
2546 switch (t_alg->algt.type) {
2547 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2548 case CRYPTO_ALG_TYPE_AEAD:
2549 err = crypto_register_alg(
2550 &t_alg->algt.alg.crypto);
2551 name = t_alg->algt.alg.crypto.cra_driver_name;
2553 case CRYPTO_ALG_TYPE_AHASH:
2554 err = crypto_register_ahash(
2555 &t_alg->algt.alg.hash);
2557 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2561 dev_err(dev, "%s alg registration failed\n",
2565 list_add_tail(&t_alg->entry, &priv->alg_list);
2566 dev_info(dev, "%s\n", name);
2574 talitos_remove(ofdev);
2579 static const struct of_device_id talitos_match[] = {
2581 .compatible = "fsl,sec2.0",
2585 MODULE_DEVICE_TABLE(of, talitos_match);
2587 static struct platform_driver talitos_driver = {
2590 .owner = THIS_MODULE,
2591 .of_match_table = talitos_match,
2593 .probe = talitos_probe,
2594 .remove = talitos_remove,
2597 static int __init talitos_init(void)
2599 return platform_driver_register(&talitos_driver);
2601 module_init(talitos_init);
2603 static void __exit talitos_exit(void)
2605 platform_driver_unregister(&talitos_driver);
2607 module_exit(talitos_exit);
2609 MODULE_LICENSE("GPL");
2610 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2611 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");