2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr)
60 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
61 ptr->eptr = upper_32_bits(dma_addr);
64 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr)
70 * map virtual single (contiguous) pointer to h/w descriptor pointer
72 static void map_single_talitos_ptr(struct device *dev,
73 struct talitos_ptr *ptr,
74 unsigned short len, void *data,
75 enum dma_data_direction dir)
77 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
79 ptr->len = cpu_to_be16(len);
80 to_talitos_ptr(ptr, dma_addr);
81 to_talitos_ptr_extent_clear(ptr);
85 * unmap bus single (contiguous) h/w descriptor pointer
87 static void unmap_single_talitos_ptr(struct device *dev,
88 struct talitos_ptr *ptr,
89 enum dma_data_direction dir)
91 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
92 be16_to_cpu(ptr->len), dir);
95 static int reset_channel(struct device *dev, int ch)
97 struct talitos_private *priv = dev_get_drvdata(dev);
98 unsigned int timeout = TALITOS_TIMEOUT;
100 setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
102 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
107 dev_err(dev, "failed to reset channel %d\n", ch);
111 /* set 36-bit addressing, done writeback enable and done IRQ enable */
112 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
113 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
115 /* and ICCR writeback, if available */
116 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
117 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
118 TALITOS_CCCR_LO_IWSE);
123 static int reset_device(struct device *dev)
125 struct talitos_private *priv = dev_get_drvdata(dev);
126 unsigned int timeout = TALITOS_TIMEOUT;
127 u32 mcr = TALITOS_MCR_SWR;
129 setbits32(priv->reg + TALITOS_MCR, mcr);
131 while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
136 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
137 setbits32(priv->reg + TALITOS_MCR, mcr);
141 dev_err(dev, "failed to reset device\n");
149 * Reset and initialize the device
151 static int init_device(struct device *dev)
153 struct talitos_private *priv = dev_get_drvdata(dev);
158 * errata documentation: warning: certain SEC interrupts
159 * are not fully cleared by writing the MCR:SWR bit,
160 * set bit twice to completely reset
162 err = reset_device(dev);
166 err = reset_device(dev);
171 for (ch = 0; ch < priv->num_channels; ch++) {
172 err = reset_channel(dev, ch);
177 /* enable channel done and error interrupts */
178 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
179 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
181 /* disable integrity check error interrupts (use writeback instead) */
182 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
183 setbits32(priv->reg + TALITOS_MDEUICR_LO,
184 TALITOS_MDEUICR_LO_ICE);
190 * talitos_submit - submits a descriptor to the device for processing
191 * @dev: the SEC device to be used
192 * @ch: the SEC device channel to be used
193 * @desc: the descriptor to be processed by the device
194 * @callback: whom to call when processing is complete
195 * @context: a handle for use by caller (optional)
197 * desc must contain valid dma-mapped (bus physical) address pointers.
198 * callback must check err and feedback in descriptor header
199 * for device processing status.
201 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
202 void (*callback)(struct device *dev,
203 struct talitos_desc *desc,
204 void *context, int error),
207 struct talitos_private *priv = dev_get_drvdata(dev);
208 struct talitos_request *request;
212 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
214 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
215 /* h/w fifo is full */
216 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
220 head = priv->chan[ch].head;
221 request = &priv->chan[ch].fifo[head];
223 /* map descriptor and save caller data */
224 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
226 request->callback = callback;
227 request->context = context;
229 /* increment fifo head */
230 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
233 request->desc = desc;
237 out_be32(priv->chan[ch].reg + TALITOS_FF,
238 upper_32_bits(request->dma_desc));
239 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
240 lower_32_bits(request->dma_desc));
242 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
246 EXPORT_SYMBOL(talitos_submit);
249 * process what was done, notify callback of error if not
251 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
253 struct talitos_private *priv = dev_get_drvdata(dev);
254 struct talitos_request *request, saved_req;
258 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
260 tail = priv->chan[ch].tail;
261 while (priv->chan[ch].fifo[tail].desc) {
262 request = &priv->chan[ch].fifo[tail];
264 /* descriptors with their done bits set don't get the error */
266 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
274 dma_unmap_single(dev, request->dma_desc,
275 sizeof(struct talitos_desc),
278 /* copy entries so we can call callback outside lock */
279 saved_req.desc = request->desc;
280 saved_req.callback = request->callback;
281 saved_req.context = request->context;
283 /* release request entry in fifo */
285 request->desc = NULL;
287 /* increment fifo tail */
288 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
290 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
292 atomic_dec(&priv->chan[ch].submit_count);
294 saved_req.callback(dev, saved_req.desc, saved_req.context,
296 /* channel may resume processing in single desc error case */
297 if (error && !reset_ch && status == error)
299 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
300 tail = priv->chan[ch].tail;
303 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
307 * process completed requests for channels that have done status
309 #define DEF_TALITOS_DONE(name, ch_done_mask) \
310 static void talitos_done_##name(unsigned long data) \
312 struct device *dev = (struct device *)data; \
313 struct talitos_private *priv = dev_get_drvdata(dev); \
314 unsigned long flags; \
316 if (ch_done_mask & 1) \
317 flush_channel(dev, 0, 0, 0); \
318 if (priv->num_channels == 1) \
320 if (ch_done_mask & (1 << 2)) \
321 flush_channel(dev, 1, 0, 0); \
322 if (ch_done_mask & (1 << 4)) \
323 flush_channel(dev, 2, 0, 0); \
324 if (ch_done_mask & (1 << 6)) \
325 flush_channel(dev, 3, 0, 0); \
328 /* At this point, all completed channels have been processed */ \
329 /* Unmask done interrupts for channels completed later on. */ \
330 spin_lock_irqsave(&priv->reg_lock, flags); \
331 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
332 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
333 spin_unlock_irqrestore(&priv->reg_lock, flags); \
335 DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
336 DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
337 DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
340 * locate current (offending) descriptor
342 static u32 current_desc_hdr(struct device *dev, int ch)
344 struct talitos_private *priv = dev_get_drvdata(dev);
348 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
349 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
352 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
356 tail = priv->chan[ch].tail;
359 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
360 iter = (iter + 1) & (priv->fifo_len - 1);
362 dev_err(dev, "couldn't locate current descriptor\n");
367 return priv->chan[ch].fifo[iter].desc->hdr;
371 * user diagnostics; report root cause of error based on execution unit status
373 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
375 struct talitos_private *priv = dev_get_drvdata(dev);
379 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
381 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
382 case DESC_HDR_SEL0_AFEU:
383 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
384 in_be32(priv->reg + TALITOS_AFEUISR),
385 in_be32(priv->reg + TALITOS_AFEUISR_LO));
387 case DESC_HDR_SEL0_DEU:
388 dev_err(dev, "DEUISR 0x%08x_%08x\n",
389 in_be32(priv->reg + TALITOS_DEUISR),
390 in_be32(priv->reg + TALITOS_DEUISR_LO));
392 case DESC_HDR_SEL0_MDEUA:
393 case DESC_HDR_SEL0_MDEUB:
394 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
395 in_be32(priv->reg + TALITOS_MDEUISR),
396 in_be32(priv->reg + TALITOS_MDEUISR_LO));
398 case DESC_HDR_SEL0_RNG:
399 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
400 in_be32(priv->reg + TALITOS_RNGUISR),
401 in_be32(priv->reg + TALITOS_RNGUISR_LO));
403 case DESC_HDR_SEL0_PKEU:
404 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
405 in_be32(priv->reg + TALITOS_PKEUISR),
406 in_be32(priv->reg + TALITOS_PKEUISR_LO));
408 case DESC_HDR_SEL0_AESU:
409 dev_err(dev, "AESUISR 0x%08x_%08x\n",
410 in_be32(priv->reg + TALITOS_AESUISR),
411 in_be32(priv->reg + TALITOS_AESUISR_LO));
413 case DESC_HDR_SEL0_CRCU:
414 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
415 in_be32(priv->reg + TALITOS_CRCUISR),
416 in_be32(priv->reg + TALITOS_CRCUISR_LO));
418 case DESC_HDR_SEL0_KEU:
419 dev_err(dev, "KEUISR 0x%08x_%08x\n",
420 in_be32(priv->reg + TALITOS_KEUISR),
421 in_be32(priv->reg + TALITOS_KEUISR_LO));
425 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
426 case DESC_HDR_SEL1_MDEUA:
427 case DESC_HDR_SEL1_MDEUB:
428 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
429 in_be32(priv->reg + TALITOS_MDEUISR),
430 in_be32(priv->reg + TALITOS_MDEUISR_LO));
432 case DESC_HDR_SEL1_CRCU:
433 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
434 in_be32(priv->reg + TALITOS_CRCUISR),
435 in_be32(priv->reg + TALITOS_CRCUISR_LO));
439 for (i = 0; i < 8; i++)
440 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
441 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
442 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
446 * recover from error interrupts
448 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
450 struct talitos_private *priv = dev_get_drvdata(dev);
451 unsigned int timeout = TALITOS_TIMEOUT;
452 int ch, error, reset_dev = 0, reset_ch = 0;
455 for (ch = 0; ch < priv->num_channels; ch++) {
456 /* skip channels without errors */
457 if (!(isr & (1 << (ch * 2 + 1))))
462 v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
463 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
465 if (v_lo & TALITOS_CCPSR_LO_DOF) {
466 dev_err(dev, "double fetch fifo overflow error\n");
470 if (v_lo & TALITOS_CCPSR_LO_SOF) {
471 /* h/w dropped descriptor */
472 dev_err(dev, "single fetch fifo overflow error\n");
475 if (v_lo & TALITOS_CCPSR_LO_MDTE)
476 dev_err(dev, "master data transfer error\n");
477 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
478 dev_err(dev, "s/g data length zero error\n");
479 if (v_lo & TALITOS_CCPSR_LO_FPZ)
480 dev_err(dev, "fetch pointer zero error\n");
481 if (v_lo & TALITOS_CCPSR_LO_IDH)
482 dev_err(dev, "illegal descriptor header error\n");
483 if (v_lo & TALITOS_CCPSR_LO_IEU)
484 dev_err(dev, "invalid execution unit error\n");
485 if (v_lo & TALITOS_CCPSR_LO_EU)
486 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
487 if (v_lo & TALITOS_CCPSR_LO_GB)
488 dev_err(dev, "gather boundary error\n");
489 if (v_lo & TALITOS_CCPSR_LO_GRL)
490 dev_err(dev, "gather return/length error\n");
491 if (v_lo & TALITOS_CCPSR_LO_SB)
492 dev_err(dev, "scatter boundary error\n");
493 if (v_lo & TALITOS_CCPSR_LO_SRL)
494 dev_err(dev, "scatter return/length error\n");
496 flush_channel(dev, ch, error, reset_ch);
499 reset_channel(dev, ch);
501 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
503 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
504 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
505 TALITOS_CCCR_CONT) && --timeout)
508 dev_err(dev, "failed to restart channel %d\n",
514 if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
515 dev_err(dev, "done overflow, internal time out, or rngu error: "
516 "ISR 0x%08x_%08x\n", isr, isr_lo);
518 /* purge request queues */
519 for (ch = 0; ch < priv->num_channels; ch++)
520 flush_channel(dev, ch, -EIO, 1);
522 /* reset and reinitialize the device */
527 #define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
528 static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
530 struct device *dev = data; \
531 struct talitos_private *priv = dev_get_drvdata(dev); \
533 unsigned long flags; \
535 spin_lock_irqsave(&priv->reg_lock, flags); \
536 isr = in_be32(priv->reg + TALITOS_ISR); \
537 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
538 /* Acknowledge interrupt */ \
539 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
540 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
542 if (unlikely(isr & ch_err_mask || isr_lo)) { \
543 spin_unlock_irqrestore(&priv->reg_lock, flags); \
544 talitos_error(dev, isr & ch_err_mask, isr_lo); \
547 if (likely(isr & ch_done_mask)) { \
548 /* mask further done interrupts. */ \
549 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
550 /* done_task will unmask done interrupts at exit */ \
551 tasklet_schedule(&priv->done_task[tlet]); \
553 spin_unlock_irqrestore(&priv->reg_lock, flags); \
556 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
559 DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
560 DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
561 DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
566 static int talitos_rng_data_present(struct hwrng *rng, int wait)
568 struct device *dev = (struct device *)rng->priv;
569 struct talitos_private *priv = dev_get_drvdata(dev);
573 for (i = 0; i < 20; i++) {
574 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
575 TALITOS_RNGUSR_LO_OFL;
584 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
586 struct device *dev = (struct device *)rng->priv;
587 struct talitos_private *priv = dev_get_drvdata(dev);
589 /* rng fifo requires 64-bit accesses */
590 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
591 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
596 static int talitos_rng_init(struct hwrng *rng)
598 struct device *dev = (struct device *)rng->priv;
599 struct talitos_private *priv = dev_get_drvdata(dev);
600 unsigned int timeout = TALITOS_TIMEOUT;
602 setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
603 while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
607 dev_err(dev, "failed to reset rng hw\n");
611 /* start generating */
612 setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
617 static int talitos_register_rng(struct device *dev)
619 struct talitos_private *priv = dev_get_drvdata(dev);
621 priv->rng.name = dev_driver_string(dev),
622 priv->rng.init = talitos_rng_init,
623 priv->rng.data_present = talitos_rng_data_present,
624 priv->rng.data_read = talitos_rng_data_read,
625 priv->rng.priv = (unsigned long)dev;
627 return hwrng_register(&priv->rng);
630 static void talitos_unregister_rng(struct device *dev)
632 struct talitos_private *priv = dev_get_drvdata(dev);
634 hwrng_unregister(&priv->rng);
640 #define TALITOS_CRA_PRIORITY 3000
641 #define TALITOS_MAX_KEY_SIZE 96
642 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
647 __be32 desc_hdr_template;
648 u8 key[TALITOS_MAX_KEY_SIZE];
649 u8 iv[TALITOS_MAX_IV_LENGTH];
651 unsigned int enckeylen;
652 unsigned int authkeylen;
653 unsigned int authsize;
656 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
657 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
659 struct talitos_ahash_req_ctx {
660 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
661 unsigned int hw_context_size;
662 u8 buf[HASH_MAX_BLOCK_SIZE];
663 u8 bufnext[HASH_MAX_BLOCK_SIZE];
667 unsigned int to_hash_later;
669 struct scatterlist bufsl[2];
670 struct scatterlist *psrc;
673 static int aead_setauthsize(struct crypto_aead *authenc,
674 unsigned int authsize)
676 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
678 ctx->authsize = authsize;
683 static int aead_setkey(struct crypto_aead *authenc,
684 const u8 *key, unsigned int keylen)
686 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
687 struct crypto_authenc_keys keys;
689 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
692 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
695 memcpy(ctx->key, keys.authkey, keys.authkeylen);
696 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
698 ctx->keylen = keys.authkeylen + keys.enckeylen;
699 ctx->enckeylen = keys.enckeylen;
700 ctx->authkeylen = keys.authkeylen;
705 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
710 * talitos_edesc - s/w-extended descriptor
711 * @assoc_nents: number of segments in associated data scatterlist
712 * @src_nents: number of segments in input scatterlist
713 * @dst_nents: number of segments in output scatterlist
714 * @assoc_chained: whether assoc is chained or not
715 * @src_chained: whether src is chained or not
716 * @dst_chained: whether dst is chained or not
717 * @iv_dma: dma address of iv for checking continuity and link table
718 * @dma_len: length of dma mapped link_tbl space
719 * @dma_link_tbl: bus physical address of link_tbl
720 * @desc: h/w descriptor
721 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
723 * if decrypting (with authcheck), or either one of src_nents or dst_nents
724 * is greater than 1, an integrity check value is concatenated to the end
727 struct talitos_edesc {
736 dma_addr_t dma_link_tbl;
737 struct talitos_desc desc;
738 struct talitos_ptr link_tbl[0];
741 static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
742 unsigned int nents, enum dma_data_direction dir,
745 if (unlikely(chained))
747 dma_map_sg(dev, sg, 1, dir);
751 dma_map_sg(dev, sg, nents, dir);
755 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
756 enum dma_data_direction dir)
759 dma_unmap_sg(dev, sg, 1, dir);
764 static void talitos_sg_unmap(struct device *dev,
765 struct talitos_edesc *edesc,
766 struct scatterlist *src,
767 struct scatterlist *dst)
769 unsigned int src_nents = edesc->src_nents ? : 1;
770 unsigned int dst_nents = edesc->dst_nents ? : 1;
773 if (edesc->src_chained)
774 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
776 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
779 if (edesc->dst_chained)
780 talitos_unmap_sg_chain(dev, dst,
783 dma_unmap_sg(dev, dst, dst_nents,
787 if (edesc->src_chained)
788 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
790 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
793 static void ipsec_esp_unmap(struct device *dev,
794 struct talitos_edesc *edesc,
795 struct aead_request *areq)
797 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
798 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
799 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
800 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
802 if (edesc->assoc_chained)
803 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
804 else if (areq->assoclen)
805 /* assoc_nents counts also for IV in non-contiguous cases */
806 dma_unmap_sg(dev, areq->assoc,
807 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
810 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
813 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
818 * ipsec_esp descriptor callbacks
820 static void ipsec_esp_encrypt_done(struct device *dev,
821 struct talitos_desc *desc, void *context,
824 struct aead_request *areq = context;
825 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
826 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
827 struct talitos_edesc *edesc;
828 struct scatterlist *sg;
831 edesc = container_of(desc, struct talitos_edesc, desc);
833 ipsec_esp_unmap(dev, edesc, areq);
835 /* copy the generated ICV to dst */
836 if (edesc->dst_nents) {
837 icvdata = &edesc->link_tbl[edesc->src_nents +
838 edesc->dst_nents + 2 +
840 sg = sg_last(areq->dst, edesc->dst_nents);
841 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
842 icvdata, ctx->authsize);
847 aead_request_complete(areq, err);
850 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
851 struct talitos_desc *desc,
852 void *context, int err)
854 struct aead_request *req = context;
855 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
856 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
857 struct talitos_edesc *edesc;
858 struct scatterlist *sg;
861 edesc = container_of(desc, struct talitos_edesc, desc);
863 ipsec_esp_unmap(dev, edesc, req);
868 icvdata = &edesc->link_tbl[edesc->src_nents +
869 edesc->dst_nents + 2 +
872 icvdata = &edesc->link_tbl[0];
874 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
875 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
876 ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
881 aead_request_complete(req, err);
884 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
885 struct talitos_desc *desc,
886 void *context, int err)
888 struct aead_request *req = context;
889 struct talitos_edesc *edesc;
891 edesc = container_of(desc, struct talitos_edesc, desc);
893 ipsec_esp_unmap(dev, edesc, req);
895 /* check ICV auth status */
896 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
897 DESC_HDR_LO_ICCR1_PASS))
902 aead_request_complete(req, err);
906 * convert scatterlist to SEC h/w link table format
907 * stop at cryptlen bytes
909 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
910 int cryptlen, struct talitos_ptr *link_tbl_ptr)
915 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
916 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
917 link_tbl_ptr->j_extent = 0;
919 cryptlen -= sg_dma_len(sg);
923 /* adjust (decrease) last one (or two) entry's len to cryptlen */
925 while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
926 /* Empty this entry, and move to previous one */
927 cryptlen += be16_to_cpu(link_tbl_ptr->len);
928 link_tbl_ptr->len = 0;
932 be16_add_cpu(&link_tbl_ptr->len, cryptlen);
934 /* tag end of link table */
935 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
941 * fill in and submit ipsec_esp descriptor
943 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
944 u64 seq, void (*callback) (struct device *dev,
945 struct talitos_desc *desc,
946 void *context, int error))
948 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
949 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
950 struct device *dev = ctx->dev;
951 struct talitos_desc *desc = &edesc->desc;
952 unsigned int cryptlen = areq->cryptlen;
953 unsigned int authsize = ctx->authsize;
954 unsigned int ivsize = crypto_aead_ivsize(aead);
959 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
963 desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
964 if (edesc->assoc_nents) {
965 int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
966 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
968 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
969 sizeof(struct talitos_ptr));
970 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
972 /* assoc_nents - 1 entries for assoc, 1 for IV */
973 sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
974 areq->assoclen, tbl_ptr);
976 /* add IV to link table */
977 tbl_ptr += sg_count - 1;
978 tbl_ptr->j_extent = 0;
980 to_talitos_ptr(tbl_ptr, edesc->iv_dma);
981 tbl_ptr->len = cpu_to_be16(ivsize);
982 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
984 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
985 edesc->dma_len, DMA_BIDIRECTIONAL);
988 to_talitos_ptr(&desc->ptr[1],
989 sg_dma_address(areq->assoc));
991 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
992 desc->ptr[1].j_extent = 0;
996 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma);
997 desc->ptr[2].len = cpu_to_be16(ivsize);
998 desc->ptr[2].j_extent = 0;
999 /* Sync needed for the aead_givencrypt case */
1000 dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1003 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1004 (char *)&ctx->key + ctx->authkeylen,
1009 * map and adjust cipher len to aead request cryptlen.
1010 * extent is bytes of HMAC postpended to ciphertext,
1011 * typically 12 for ipsec
1013 desc->ptr[4].len = cpu_to_be16(cryptlen);
1014 desc->ptr[4].j_extent = authsize;
1016 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1017 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1019 edesc->src_chained);
1021 if (sg_count == 1) {
1022 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1024 sg_link_tbl_len = cryptlen;
1026 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1027 sg_link_tbl_len = cryptlen + authsize;
1029 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1030 &edesc->link_tbl[0]);
1032 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1033 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1034 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1038 /* Only one segment now, so no link tbl needed */
1039 to_talitos_ptr(&desc->ptr[4],
1040 sg_dma_address(areq->src));
1045 desc->ptr[5].len = cpu_to_be16(cryptlen);
1046 desc->ptr[5].j_extent = authsize;
1048 if (areq->src != areq->dst)
1049 sg_count = talitos_map_sg(dev, areq->dst,
1050 edesc->dst_nents ? : 1,
1051 DMA_FROM_DEVICE, edesc->dst_chained);
1053 if (sg_count == 1) {
1054 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1056 int tbl_off = edesc->src_nents + 1;
1057 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1059 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1060 tbl_off * sizeof(struct talitos_ptr));
1061 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1064 /* Add an entry to the link table for ICV data */
1065 tbl_ptr += sg_count - 1;
1066 tbl_ptr->j_extent = 0;
1068 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1069 tbl_ptr->len = cpu_to_be16(authsize);
1071 /* icv data follows link tables */
1072 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1073 (tbl_off + edesc->dst_nents + 1 +
1074 edesc->assoc_nents) *
1075 sizeof(struct talitos_ptr));
1076 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1077 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1078 edesc->dma_len, DMA_BIDIRECTIONAL);
1082 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1085 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1086 if (ret != -EINPROGRESS) {
1087 ipsec_esp_unmap(dev, edesc, areq);
1094 * derive number of elements in scatterlist
1096 static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
1098 struct scatterlist *sg = sg_list;
1102 while (nbytes > 0) {
1104 nbytes -= sg->length;
1105 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1114 * allocate and map the extended descriptor
1116 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1117 struct scatterlist *assoc,
1118 struct scatterlist *src,
1119 struct scatterlist *dst,
1121 unsigned int assoclen,
1122 unsigned int cryptlen,
1123 unsigned int authsize,
1124 unsigned int ivsize,
1129 struct talitos_edesc *edesc;
1130 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
1131 bool assoc_chained = false, src_chained = false, dst_chained = false;
1132 dma_addr_t iv_dma = 0;
1133 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1136 if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1137 dev_err(dev, "length exceeds h/w max limit\n");
1138 return ERR_PTR(-EINVAL);
1142 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1146 * Currently it is assumed that iv is provided whenever assoc
1151 assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
1152 talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
1154 assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
1156 if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
1157 assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1160 if (!dst || dst == src) {
1161 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1162 src_nents = (src_nents == 1) ? 0 : src_nents;
1163 dst_nents = dst ? src_nents : 0;
1164 } else { /* dst && dst != src*/
1165 src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1167 src_nents = (src_nents == 1) ? 0 : src_nents;
1168 dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1170 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1174 * allocate space for base edesc plus the link tables,
1175 * allowing for two separate entries for ICV and generated ICV (+ 2),
1176 * and the ICV data itself
1178 alloc_len = sizeof(struct talitos_edesc);
1179 if (assoc_nents || src_nents || dst_nents) {
1180 dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
1181 sizeof(struct talitos_ptr) + authsize;
1182 alloc_len += dma_len;
1185 alloc_len += icv_stashing ? authsize : 0;
1188 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1191 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1193 dma_unmap_sg(dev, assoc,
1194 assoc_nents ? assoc_nents - 1 : 1,
1198 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1200 dev_err(dev, "could not allocate edescriptor\n");
1201 return ERR_PTR(-ENOMEM);
1204 edesc->assoc_nents = assoc_nents;
1205 edesc->src_nents = src_nents;
1206 edesc->dst_nents = dst_nents;
1207 edesc->assoc_chained = assoc_chained;
1208 edesc->src_chained = src_chained;
1209 edesc->dst_chained = dst_chained;
1210 edesc->iv_dma = iv_dma;
1211 edesc->dma_len = dma_len;
1213 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1220 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1221 int icv_stashing, bool encrypt)
1223 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1224 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1225 unsigned int ivsize = crypto_aead_ivsize(authenc);
1227 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1228 iv, areq->assoclen, areq->cryptlen,
1229 ctx->authsize, ivsize, icv_stashing,
1230 areq->base.flags, encrypt);
1233 static int aead_encrypt(struct aead_request *req)
1235 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1236 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1237 struct talitos_edesc *edesc;
1239 /* allocate extended descriptor */
1240 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1242 return PTR_ERR(edesc);
1245 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1247 return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
1250 static int aead_decrypt(struct aead_request *req)
1252 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1253 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1254 unsigned int authsize = ctx->authsize;
1255 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1256 struct talitos_edesc *edesc;
1257 struct scatterlist *sg;
1260 req->cryptlen -= authsize;
1262 /* allocate extended descriptor */
1263 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1265 return PTR_ERR(edesc);
1267 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1268 ((!edesc->src_nents && !edesc->dst_nents) ||
1269 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1271 /* decrypt and check the ICV */
1272 edesc->desc.hdr = ctx->desc_hdr_template |
1273 DESC_HDR_DIR_INBOUND |
1274 DESC_HDR_MODE1_MDEU_CICV;
1276 /* reset integrity check result bits */
1277 edesc->desc.hdr_lo = 0;
1279 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
1282 /* Have to check the ICV with software */
1283 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1285 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1287 icvdata = &edesc->link_tbl[edesc->src_nents +
1288 edesc->dst_nents + 2 +
1289 edesc->assoc_nents];
1291 icvdata = &edesc->link_tbl[0];
1293 sg = sg_last(req->src, edesc->src_nents ? : 1);
1295 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1298 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
1301 static int aead_givencrypt(struct aead_givcrypt_request *req)
1303 struct aead_request *areq = &req->areq;
1304 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1305 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1306 struct talitos_edesc *edesc;
1308 /* allocate extended descriptor */
1309 edesc = aead_edesc_alloc(areq, req->giv, 0, true);
1311 return PTR_ERR(edesc);
1314 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1316 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1317 /* avoid consecutive packets going out with same IV */
1318 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1320 return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
1323 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1324 const u8 *key, unsigned int keylen)
1326 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1328 memcpy(&ctx->key, key, keylen);
1329 ctx->keylen = keylen;
1334 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1335 struct scatterlist *dst, unsigned int len,
1336 struct talitos_edesc *edesc)
1338 talitos_sg_unmap(dev, edesc, src, dst);
1341 static void common_nonsnoop_unmap(struct device *dev,
1342 struct talitos_edesc *edesc,
1343 struct ablkcipher_request *areq)
1345 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1347 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1348 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1349 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1352 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1356 static void ablkcipher_done(struct device *dev,
1357 struct talitos_desc *desc, void *context,
1360 struct ablkcipher_request *areq = context;
1361 struct talitos_edesc *edesc;
1363 edesc = container_of(desc, struct talitos_edesc, desc);
1365 common_nonsnoop_unmap(dev, edesc, areq);
1369 areq->base.complete(&areq->base, err);
1372 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1373 unsigned int len, struct talitos_edesc *edesc,
1374 enum dma_data_direction dir, struct talitos_ptr *ptr)
1378 ptr->len = cpu_to_be16(len);
1379 to_talitos_ptr_extent_clear(ptr);
1381 sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
1382 edesc->src_chained);
1384 if (sg_count == 1) {
1385 to_talitos_ptr(ptr, sg_dma_address(src));
1387 sg_count = sg_to_link_tbl(src, sg_count, len,
1388 &edesc->link_tbl[0]);
1390 to_talitos_ptr(ptr, edesc->dma_link_tbl);
1391 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1392 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1396 /* Only one segment now, so no link tbl needed */
1397 to_talitos_ptr(ptr, sg_dma_address(src));
1403 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1404 unsigned int len, struct talitos_edesc *edesc,
1405 enum dma_data_direction dir,
1406 struct talitos_ptr *ptr, int sg_count)
1408 ptr->len = cpu_to_be16(len);
1409 to_talitos_ptr_extent_clear(ptr);
1411 if (dir != DMA_NONE)
1412 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
1413 dir, edesc->dst_chained);
1415 if (sg_count == 1) {
1416 to_talitos_ptr(ptr, sg_dma_address(dst));
1418 struct talitos_ptr *link_tbl_ptr =
1419 &edesc->link_tbl[edesc->src_nents + 1];
1421 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1422 (edesc->src_nents + 1) *
1423 sizeof(struct talitos_ptr));
1424 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1425 sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1426 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1427 edesc->dma_len, DMA_BIDIRECTIONAL);
1431 static int common_nonsnoop(struct talitos_edesc *edesc,
1432 struct ablkcipher_request *areq,
1433 void (*callback) (struct device *dev,
1434 struct talitos_desc *desc,
1435 void *context, int error))
1437 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1438 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1439 struct device *dev = ctx->dev;
1440 struct talitos_desc *desc = &edesc->desc;
1441 unsigned int cryptlen = areq->nbytes;
1442 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1445 /* first DWORD empty */
1446 desc->ptr[0] = zero_entry;
1449 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
1450 desc->ptr[1].len = cpu_to_be16(ivsize);
1451 to_talitos_ptr_extent_clear(&desc->ptr[1]);
1454 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1455 (char *)&ctx->key, DMA_TO_DEVICE);
1460 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1461 (areq->src == areq->dst) ?
1462 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1466 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1467 (areq->src == areq->dst) ? DMA_NONE
1469 &desc->ptr[4], sg_count);
1472 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1475 /* last DWORD empty */
1476 desc->ptr[6] = zero_entry;
1478 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1479 if (ret != -EINPROGRESS) {
1480 common_nonsnoop_unmap(dev, edesc, areq);
1486 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1489 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1490 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1491 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1493 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1494 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1495 areq->base.flags, encrypt);
1498 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1500 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1501 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1502 struct talitos_edesc *edesc;
1504 /* allocate extended descriptor */
1505 edesc = ablkcipher_edesc_alloc(areq, true);
1507 return PTR_ERR(edesc);
1510 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1512 return common_nonsnoop(edesc, areq, ablkcipher_done);
1515 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1517 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1518 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1519 struct talitos_edesc *edesc;
1521 /* allocate extended descriptor */
1522 edesc = ablkcipher_edesc_alloc(areq, false);
1524 return PTR_ERR(edesc);
1526 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1528 return common_nonsnoop(edesc, areq, ablkcipher_done);
1531 static void common_nonsnoop_hash_unmap(struct device *dev,
1532 struct talitos_edesc *edesc,
1533 struct ahash_request *areq)
1535 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1537 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1539 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1541 /* When using hashctx-in, must unmap it. */
1542 if (edesc->desc.ptr[1].len)
1543 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1546 if (edesc->desc.ptr[2].len)
1547 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1551 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1556 static void ahash_done(struct device *dev,
1557 struct talitos_desc *desc, void *context,
1560 struct ahash_request *areq = context;
1561 struct talitos_edesc *edesc =
1562 container_of(desc, struct talitos_edesc, desc);
1563 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1565 if (!req_ctx->last && req_ctx->to_hash_later) {
1566 /* Position any partial block for next update/final/finup */
1567 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1568 req_ctx->nbuf = req_ctx->to_hash_later;
1570 common_nonsnoop_hash_unmap(dev, edesc, areq);
1574 areq->base.complete(&areq->base, err);
1577 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1578 struct ahash_request *areq, unsigned int length,
1579 void (*callback) (struct device *dev,
1580 struct talitos_desc *desc,
1581 void *context, int error))
1583 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1584 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1585 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1586 struct device *dev = ctx->dev;
1587 struct talitos_desc *desc = &edesc->desc;
1590 /* first DWORD empty */
1591 desc->ptr[0] = zero_entry;
1593 /* hash context in */
1594 if (!req_ctx->first || req_ctx->swinit) {
1595 map_single_talitos_ptr(dev, &desc->ptr[1],
1596 req_ctx->hw_context_size,
1597 (char *)req_ctx->hw_context,
1599 req_ctx->swinit = 0;
1601 desc->ptr[1] = zero_entry;
1602 /* Indicate next op is not the first. */
1608 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1609 (char *)&ctx->key, DMA_TO_DEVICE);
1611 desc->ptr[2] = zero_entry;
1616 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1617 DMA_TO_DEVICE, &desc->ptr[3]);
1619 /* fifth DWORD empty */
1620 desc->ptr[4] = zero_entry;
1622 /* hash/HMAC out -or- hash context out */
1624 map_single_talitos_ptr(dev, &desc->ptr[5],
1625 crypto_ahash_digestsize(tfm),
1626 areq->result, DMA_FROM_DEVICE);
1628 map_single_talitos_ptr(dev, &desc->ptr[5],
1629 req_ctx->hw_context_size,
1630 req_ctx->hw_context, DMA_FROM_DEVICE);
1632 /* last DWORD empty */
1633 desc->ptr[6] = zero_entry;
1635 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1636 if (ret != -EINPROGRESS) {
1637 common_nonsnoop_hash_unmap(dev, edesc, areq);
1643 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1644 unsigned int nbytes)
1646 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1647 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1648 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1650 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
1651 nbytes, 0, 0, 0, areq->base.flags, false);
1654 static int ahash_init(struct ahash_request *areq)
1656 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1657 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1659 /* Initialize the context */
1661 req_ctx->first = 1; /* first indicates h/w must init its context */
1662 req_ctx->swinit = 0; /* assume h/w init of context */
1663 req_ctx->hw_context_size =
1664 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1665 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1666 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1672 * on h/w without explicit sha224 support, we initialize h/w context
1673 * manually with sha224 constants, and tell it to run sha256.
1675 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1677 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1680 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1682 req_ctx->hw_context[0] = SHA224_H0;
1683 req_ctx->hw_context[1] = SHA224_H1;
1684 req_ctx->hw_context[2] = SHA224_H2;
1685 req_ctx->hw_context[3] = SHA224_H3;
1686 req_ctx->hw_context[4] = SHA224_H4;
1687 req_ctx->hw_context[5] = SHA224_H5;
1688 req_ctx->hw_context[6] = SHA224_H6;
1689 req_ctx->hw_context[7] = SHA224_H7;
1691 /* init 64-bit count */
1692 req_ctx->hw_context[8] = 0;
1693 req_ctx->hw_context[9] = 0;
1698 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1700 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1701 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1702 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1703 struct talitos_edesc *edesc;
1704 unsigned int blocksize =
1705 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1706 unsigned int nbytes_to_hash;
1707 unsigned int to_hash_later;
1711 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1712 /* Buffer up to one whole block */
1713 sg_copy_to_buffer(areq->src,
1714 sg_count(areq->src, nbytes, &chained),
1715 req_ctx->buf + req_ctx->nbuf, nbytes);
1716 req_ctx->nbuf += nbytes;
1720 /* At least (blocksize + 1) bytes are available to hash */
1721 nbytes_to_hash = nbytes + req_ctx->nbuf;
1722 to_hash_later = nbytes_to_hash & (blocksize - 1);
1726 else if (to_hash_later)
1727 /* There is a partial block. Hash the full block(s) now */
1728 nbytes_to_hash -= to_hash_later;
1730 /* Keep one block buffered */
1731 nbytes_to_hash -= blocksize;
1732 to_hash_later = blocksize;
1735 /* Chain in any previously buffered data */
1736 if (req_ctx->nbuf) {
1737 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1738 sg_init_table(req_ctx->bufsl, nsg);
1739 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1741 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1742 req_ctx->psrc = req_ctx->bufsl;
1744 req_ctx->psrc = areq->src;
1746 if (to_hash_later) {
1747 int nents = sg_count(areq->src, nbytes, &chained);
1748 sg_pcopy_to_buffer(areq->src, nents,
1751 nbytes - to_hash_later);
1753 req_ctx->to_hash_later = to_hash_later;
1755 /* Allocate extended descriptor */
1756 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1758 return PTR_ERR(edesc);
1760 edesc->desc.hdr = ctx->desc_hdr_template;
1762 /* On last one, request SEC to pad; otherwise continue */
1764 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1766 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1768 /* request SEC to INIT hash. */
1769 if (req_ctx->first && !req_ctx->swinit)
1770 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1772 /* When the tfm context has a keylen, it's an HMAC.
1773 * A first or last (ie. not middle) descriptor must request HMAC.
1775 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1776 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1778 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1782 static int ahash_update(struct ahash_request *areq)
1784 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1788 return ahash_process_req(areq, areq->nbytes);
1791 static int ahash_final(struct ahash_request *areq)
1793 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1797 return ahash_process_req(areq, 0);
1800 static int ahash_finup(struct ahash_request *areq)
1802 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1806 return ahash_process_req(areq, areq->nbytes);
1809 static int ahash_digest(struct ahash_request *areq)
1811 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1812 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1817 return ahash_process_req(areq, areq->nbytes);
1820 struct keyhash_result {
1821 struct completion completion;
1825 static void keyhash_complete(struct crypto_async_request *req, int err)
1827 struct keyhash_result *res = req->data;
1829 if (err == -EINPROGRESS)
1833 complete(&res->completion);
1836 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1839 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1841 struct scatterlist sg[1];
1842 struct ahash_request *req;
1843 struct keyhash_result hresult;
1846 init_completion(&hresult.completion);
1848 req = ahash_request_alloc(tfm, GFP_KERNEL);
1852 /* Keep tfm keylen == 0 during hash of the long key */
1854 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1855 keyhash_complete, &hresult);
1857 sg_init_one(&sg[0], key, keylen);
1859 ahash_request_set_crypt(req, sg, hash, keylen);
1860 ret = crypto_ahash_digest(req);
1866 ret = wait_for_completion_interruptible(
1867 &hresult.completion);
1874 ahash_request_free(req);
1879 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1880 unsigned int keylen)
1882 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1883 unsigned int blocksize =
1884 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1885 unsigned int digestsize = crypto_ahash_digestsize(tfm);
1886 unsigned int keysize = keylen;
1887 u8 hash[SHA512_DIGEST_SIZE];
1890 if (keylen <= blocksize)
1891 memcpy(ctx->key, key, keysize);
1893 /* Must get the hash of the long key */
1894 ret = keyhash(tfm, key, keylen, hash);
1897 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1901 keysize = digestsize;
1902 memcpy(ctx->key, hash, digestsize);
1905 ctx->keylen = keysize;
1911 struct talitos_alg_template {
1914 struct crypto_alg crypto;
1915 struct ahash_alg hash;
1917 __be32 desc_hdr_template;
1920 static struct talitos_alg_template driver_algs[] = {
1921 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
1922 { .type = CRYPTO_ALG_TYPE_AEAD,
1924 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1925 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1926 .cra_blocksize = AES_BLOCK_SIZE,
1927 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1929 .ivsize = AES_BLOCK_SIZE,
1930 .maxauthsize = SHA1_DIGEST_SIZE,
1933 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1934 DESC_HDR_SEL0_AESU |
1935 DESC_HDR_MODE0_AESU_CBC |
1936 DESC_HDR_SEL1_MDEUA |
1937 DESC_HDR_MODE1_MDEU_INIT |
1938 DESC_HDR_MODE1_MDEU_PAD |
1939 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1941 { .type = CRYPTO_ALG_TYPE_AEAD,
1943 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1944 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1945 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1946 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1948 .ivsize = DES3_EDE_BLOCK_SIZE,
1949 .maxauthsize = SHA1_DIGEST_SIZE,
1952 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1954 DESC_HDR_MODE0_DEU_CBC |
1955 DESC_HDR_MODE0_DEU_3DES |
1956 DESC_HDR_SEL1_MDEUA |
1957 DESC_HDR_MODE1_MDEU_INIT |
1958 DESC_HDR_MODE1_MDEU_PAD |
1959 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1961 { .type = CRYPTO_ALG_TYPE_AEAD,
1963 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1964 .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
1965 .cra_blocksize = AES_BLOCK_SIZE,
1966 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1968 .ivsize = AES_BLOCK_SIZE,
1969 .maxauthsize = SHA224_DIGEST_SIZE,
1972 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1973 DESC_HDR_SEL0_AESU |
1974 DESC_HDR_MODE0_AESU_CBC |
1975 DESC_HDR_SEL1_MDEUA |
1976 DESC_HDR_MODE1_MDEU_INIT |
1977 DESC_HDR_MODE1_MDEU_PAD |
1978 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
1980 { .type = CRYPTO_ALG_TYPE_AEAD,
1982 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
1983 .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
1984 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1985 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1987 .ivsize = DES3_EDE_BLOCK_SIZE,
1988 .maxauthsize = SHA224_DIGEST_SIZE,
1991 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1993 DESC_HDR_MODE0_DEU_CBC |
1994 DESC_HDR_MODE0_DEU_3DES |
1995 DESC_HDR_SEL1_MDEUA |
1996 DESC_HDR_MODE1_MDEU_INIT |
1997 DESC_HDR_MODE1_MDEU_PAD |
1998 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2000 { .type = CRYPTO_ALG_TYPE_AEAD,
2002 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2003 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2004 .cra_blocksize = AES_BLOCK_SIZE,
2005 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2007 .ivsize = AES_BLOCK_SIZE,
2008 .maxauthsize = SHA256_DIGEST_SIZE,
2011 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2012 DESC_HDR_SEL0_AESU |
2013 DESC_HDR_MODE0_AESU_CBC |
2014 DESC_HDR_SEL1_MDEUA |
2015 DESC_HDR_MODE1_MDEU_INIT |
2016 DESC_HDR_MODE1_MDEU_PAD |
2017 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2019 { .type = CRYPTO_ALG_TYPE_AEAD,
2021 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2022 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2023 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2024 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2026 .ivsize = DES3_EDE_BLOCK_SIZE,
2027 .maxauthsize = SHA256_DIGEST_SIZE,
2030 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2032 DESC_HDR_MODE0_DEU_CBC |
2033 DESC_HDR_MODE0_DEU_3DES |
2034 DESC_HDR_SEL1_MDEUA |
2035 DESC_HDR_MODE1_MDEU_INIT |
2036 DESC_HDR_MODE1_MDEU_PAD |
2037 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2039 { .type = CRYPTO_ALG_TYPE_AEAD,
2041 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2042 .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
2043 .cra_blocksize = AES_BLOCK_SIZE,
2044 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2046 .ivsize = AES_BLOCK_SIZE,
2047 .maxauthsize = SHA384_DIGEST_SIZE,
2050 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2051 DESC_HDR_SEL0_AESU |
2052 DESC_HDR_MODE0_AESU_CBC |
2053 DESC_HDR_SEL1_MDEUB |
2054 DESC_HDR_MODE1_MDEU_INIT |
2055 DESC_HDR_MODE1_MDEU_PAD |
2056 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2058 { .type = CRYPTO_ALG_TYPE_AEAD,
2060 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
2061 .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
2062 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2063 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2065 .ivsize = DES3_EDE_BLOCK_SIZE,
2066 .maxauthsize = SHA384_DIGEST_SIZE,
2069 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2071 DESC_HDR_MODE0_DEU_CBC |
2072 DESC_HDR_MODE0_DEU_3DES |
2073 DESC_HDR_SEL1_MDEUB |
2074 DESC_HDR_MODE1_MDEU_INIT |
2075 DESC_HDR_MODE1_MDEU_PAD |
2076 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2078 { .type = CRYPTO_ALG_TYPE_AEAD,
2080 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2081 .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
2082 .cra_blocksize = AES_BLOCK_SIZE,
2083 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2085 .ivsize = AES_BLOCK_SIZE,
2086 .maxauthsize = SHA512_DIGEST_SIZE,
2089 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2090 DESC_HDR_SEL0_AESU |
2091 DESC_HDR_MODE0_AESU_CBC |
2092 DESC_HDR_SEL1_MDEUB |
2093 DESC_HDR_MODE1_MDEU_INIT |
2094 DESC_HDR_MODE1_MDEU_PAD |
2095 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2097 { .type = CRYPTO_ALG_TYPE_AEAD,
2099 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
2100 .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
2101 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2102 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2104 .ivsize = DES3_EDE_BLOCK_SIZE,
2105 .maxauthsize = SHA512_DIGEST_SIZE,
2108 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2110 DESC_HDR_MODE0_DEU_CBC |
2111 DESC_HDR_MODE0_DEU_3DES |
2112 DESC_HDR_SEL1_MDEUB |
2113 DESC_HDR_MODE1_MDEU_INIT |
2114 DESC_HDR_MODE1_MDEU_PAD |
2115 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2117 { .type = CRYPTO_ALG_TYPE_AEAD,
2119 .cra_name = "authenc(hmac(md5),cbc(aes))",
2120 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2121 .cra_blocksize = AES_BLOCK_SIZE,
2122 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2124 .ivsize = AES_BLOCK_SIZE,
2125 .maxauthsize = MD5_DIGEST_SIZE,
2128 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2129 DESC_HDR_SEL0_AESU |
2130 DESC_HDR_MODE0_AESU_CBC |
2131 DESC_HDR_SEL1_MDEUA |
2132 DESC_HDR_MODE1_MDEU_INIT |
2133 DESC_HDR_MODE1_MDEU_PAD |
2134 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2136 { .type = CRYPTO_ALG_TYPE_AEAD,
2138 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2139 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2140 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2141 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2143 .ivsize = DES3_EDE_BLOCK_SIZE,
2144 .maxauthsize = MD5_DIGEST_SIZE,
2147 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2149 DESC_HDR_MODE0_DEU_CBC |
2150 DESC_HDR_MODE0_DEU_3DES |
2151 DESC_HDR_SEL1_MDEUA |
2152 DESC_HDR_MODE1_MDEU_INIT |
2153 DESC_HDR_MODE1_MDEU_PAD |
2154 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2156 /* ABLKCIPHER algorithms. */
2157 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2159 .cra_name = "cbc(aes)",
2160 .cra_driver_name = "cbc-aes-talitos",
2161 .cra_blocksize = AES_BLOCK_SIZE,
2162 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2165 .min_keysize = AES_MIN_KEY_SIZE,
2166 .max_keysize = AES_MAX_KEY_SIZE,
2167 .ivsize = AES_BLOCK_SIZE,
2170 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2171 DESC_HDR_SEL0_AESU |
2172 DESC_HDR_MODE0_AESU_CBC,
2174 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2176 .cra_name = "cbc(des3_ede)",
2177 .cra_driver_name = "cbc-3des-talitos",
2178 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2179 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2182 .min_keysize = DES3_EDE_KEY_SIZE,
2183 .max_keysize = DES3_EDE_KEY_SIZE,
2184 .ivsize = DES3_EDE_BLOCK_SIZE,
2187 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2189 DESC_HDR_MODE0_DEU_CBC |
2190 DESC_HDR_MODE0_DEU_3DES,
2192 /* AHASH algorithms. */
2193 { .type = CRYPTO_ALG_TYPE_AHASH,
2195 .halg.digestsize = MD5_DIGEST_SIZE,
2198 .cra_driver_name = "md5-talitos",
2199 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2200 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2204 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2205 DESC_HDR_SEL0_MDEUA |
2206 DESC_HDR_MODE0_MDEU_MD5,
2208 { .type = CRYPTO_ALG_TYPE_AHASH,
2210 .halg.digestsize = SHA1_DIGEST_SIZE,
2213 .cra_driver_name = "sha1-talitos",
2214 .cra_blocksize = SHA1_BLOCK_SIZE,
2215 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2219 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2220 DESC_HDR_SEL0_MDEUA |
2221 DESC_HDR_MODE0_MDEU_SHA1,
2223 { .type = CRYPTO_ALG_TYPE_AHASH,
2225 .halg.digestsize = SHA224_DIGEST_SIZE,
2227 .cra_name = "sha224",
2228 .cra_driver_name = "sha224-talitos",
2229 .cra_blocksize = SHA224_BLOCK_SIZE,
2230 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2234 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2235 DESC_HDR_SEL0_MDEUA |
2236 DESC_HDR_MODE0_MDEU_SHA224,
2238 { .type = CRYPTO_ALG_TYPE_AHASH,
2240 .halg.digestsize = SHA256_DIGEST_SIZE,
2242 .cra_name = "sha256",
2243 .cra_driver_name = "sha256-talitos",
2244 .cra_blocksize = SHA256_BLOCK_SIZE,
2245 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2249 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2250 DESC_HDR_SEL0_MDEUA |
2251 DESC_HDR_MODE0_MDEU_SHA256,
2253 { .type = CRYPTO_ALG_TYPE_AHASH,
2255 .halg.digestsize = SHA384_DIGEST_SIZE,
2257 .cra_name = "sha384",
2258 .cra_driver_name = "sha384-talitos",
2259 .cra_blocksize = SHA384_BLOCK_SIZE,
2260 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2264 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2265 DESC_HDR_SEL0_MDEUB |
2266 DESC_HDR_MODE0_MDEUB_SHA384,
2268 { .type = CRYPTO_ALG_TYPE_AHASH,
2270 .halg.digestsize = SHA512_DIGEST_SIZE,
2272 .cra_name = "sha512",
2273 .cra_driver_name = "sha512-talitos",
2274 .cra_blocksize = SHA512_BLOCK_SIZE,
2275 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2279 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2280 DESC_HDR_SEL0_MDEUB |
2281 DESC_HDR_MODE0_MDEUB_SHA512,
2283 { .type = CRYPTO_ALG_TYPE_AHASH,
2285 .halg.digestsize = MD5_DIGEST_SIZE,
2287 .cra_name = "hmac(md5)",
2288 .cra_driver_name = "hmac-md5-talitos",
2289 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2290 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2294 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2295 DESC_HDR_SEL0_MDEUA |
2296 DESC_HDR_MODE0_MDEU_MD5,
2298 { .type = CRYPTO_ALG_TYPE_AHASH,
2300 .halg.digestsize = SHA1_DIGEST_SIZE,
2302 .cra_name = "hmac(sha1)",
2303 .cra_driver_name = "hmac-sha1-talitos",
2304 .cra_blocksize = SHA1_BLOCK_SIZE,
2305 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2309 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2310 DESC_HDR_SEL0_MDEUA |
2311 DESC_HDR_MODE0_MDEU_SHA1,
2313 { .type = CRYPTO_ALG_TYPE_AHASH,
2315 .halg.digestsize = SHA224_DIGEST_SIZE,
2317 .cra_name = "hmac(sha224)",
2318 .cra_driver_name = "hmac-sha224-talitos",
2319 .cra_blocksize = SHA224_BLOCK_SIZE,
2320 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2324 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2325 DESC_HDR_SEL0_MDEUA |
2326 DESC_HDR_MODE0_MDEU_SHA224,
2328 { .type = CRYPTO_ALG_TYPE_AHASH,
2330 .halg.digestsize = SHA256_DIGEST_SIZE,
2332 .cra_name = "hmac(sha256)",
2333 .cra_driver_name = "hmac-sha256-talitos",
2334 .cra_blocksize = SHA256_BLOCK_SIZE,
2335 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2339 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2340 DESC_HDR_SEL0_MDEUA |
2341 DESC_HDR_MODE0_MDEU_SHA256,
2343 { .type = CRYPTO_ALG_TYPE_AHASH,
2345 .halg.digestsize = SHA384_DIGEST_SIZE,
2347 .cra_name = "hmac(sha384)",
2348 .cra_driver_name = "hmac-sha384-talitos",
2349 .cra_blocksize = SHA384_BLOCK_SIZE,
2350 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2354 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2355 DESC_HDR_SEL0_MDEUB |
2356 DESC_HDR_MODE0_MDEUB_SHA384,
2358 { .type = CRYPTO_ALG_TYPE_AHASH,
2360 .halg.digestsize = SHA512_DIGEST_SIZE,
2362 .cra_name = "hmac(sha512)",
2363 .cra_driver_name = "hmac-sha512-talitos",
2364 .cra_blocksize = SHA512_BLOCK_SIZE,
2365 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2369 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2370 DESC_HDR_SEL0_MDEUB |
2371 DESC_HDR_MODE0_MDEUB_SHA512,
2375 struct talitos_crypto_alg {
2376 struct list_head entry;
2378 struct talitos_alg_template algt;
2381 static int talitos_cra_init(struct crypto_tfm *tfm)
2383 struct crypto_alg *alg = tfm->__crt_alg;
2384 struct talitos_crypto_alg *talitos_alg;
2385 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2386 struct talitos_private *priv;
2388 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2389 talitos_alg = container_of(__crypto_ahash_alg(alg),
2390 struct talitos_crypto_alg,
2393 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2396 /* update context with ptr to dev */
2397 ctx->dev = talitos_alg->dev;
2399 /* assign SEC channel to tfm in round-robin fashion */
2400 priv = dev_get_drvdata(ctx->dev);
2401 ctx->ch = atomic_inc_return(&priv->last_chan) &
2402 (priv->num_channels - 1);
2404 /* copy descriptor header template value */
2405 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2407 /* select done notification */
2408 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2413 static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2415 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2417 talitos_cra_init(tfm);
2419 /* random first IV */
2420 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2425 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2427 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2429 talitos_cra_init(tfm);
2432 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2433 sizeof(struct talitos_ahash_req_ctx));
2439 * given the alg's descriptor header template, determine whether descriptor
2440 * type and primary/secondary execution units required match the hw
2441 * capabilities description provided in the device tree node.
2443 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2445 struct talitos_private *priv = dev_get_drvdata(dev);
2448 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2449 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2451 if (SECONDARY_EU(desc_hdr_template))
2452 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2453 & priv->exec_units);
2458 static int talitos_remove(struct platform_device *ofdev)
2460 struct device *dev = &ofdev->dev;
2461 struct talitos_private *priv = dev_get_drvdata(dev);
2462 struct talitos_crypto_alg *t_alg, *n;
2465 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2466 switch (t_alg->algt.type) {
2467 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2468 case CRYPTO_ALG_TYPE_AEAD:
2469 crypto_unregister_alg(&t_alg->algt.alg.crypto);
2471 case CRYPTO_ALG_TYPE_AHASH:
2472 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2475 list_del(&t_alg->entry);
2479 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2480 talitos_unregister_rng(dev);
2482 for (i = 0; i < priv->num_channels; i++)
2483 kfree(priv->chan[i].fifo);
2487 for (i = 0; i < 2; i++)
2489 free_irq(priv->irq[i], dev);
2490 irq_dispose_mapping(priv->irq[i]);
2493 tasklet_kill(&priv->done_task[0]);
2495 tasklet_kill(&priv->done_task[1]);
2504 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2505 struct talitos_alg_template
2508 struct talitos_private *priv = dev_get_drvdata(dev);
2509 struct talitos_crypto_alg *t_alg;
2510 struct crypto_alg *alg;
2512 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2514 return ERR_PTR(-ENOMEM);
2516 t_alg->algt = *template;
2518 switch (t_alg->algt.type) {
2519 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2520 alg = &t_alg->algt.alg.crypto;
2521 alg->cra_init = talitos_cra_init;
2522 alg->cra_type = &crypto_ablkcipher_type;
2523 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2524 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2525 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2526 alg->cra_ablkcipher.geniv = "eseqiv";
2528 case CRYPTO_ALG_TYPE_AEAD:
2529 alg = &t_alg->algt.alg.crypto;
2530 alg->cra_init = talitos_cra_init_aead;
2531 alg->cra_type = &crypto_aead_type;
2532 alg->cra_aead.setkey = aead_setkey;
2533 alg->cra_aead.setauthsize = aead_setauthsize;
2534 alg->cra_aead.encrypt = aead_encrypt;
2535 alg->cra_aead.decrypt = aead_decrypt;
2536 alg->cra_aead.givencrypt = aead_givencrypt;
2537 alg->cra_aead.geniv = "<built-in>";
2539 case CRYPTO_ALG_TYPE_AHASH:
2540 alg = &t_alg->algt.alg.hash.halg.base;
2541 alg->cra_init = talitos_cra_init_ahash;
2542 alg->cra_type = &crypto_ahash_type;
2543 t_alg->algt.alg.hash.init = ahash_init;
2544 t_alg->algt.alg.hash.update = ahash_update;
2545 t_alg->algt.alg.hash.final = ahash_final;
2546 t_alg->algt.alg.hash.finup = ahash_finup;
2547 t_alg->algt.alg.hash.digest = ahash_digest;
2548 t_alg->algt.alg.hash.setkey = ahash_setkey;
2550 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2551 !strncmp(alg->cra_name, "hmac", 4)) {
2553 return ERR_PTR(-ENOTSUPP);
2555 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2556 (!strcmp(alg->cra_name, "sha224") ||
2557 !strcmp(alg->cra_name, "hmac(sha224)"))) {
2558 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2559 t_alg->algt.desc_hdr_template =
2560 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2561 DESC_HDR_SEL0_MDEUA |
2562 DESC_HDR_MODE0_MDEU_SHA256;
2566 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2567 return ERR_PTR(-EINVAL);
2570 alg->cra_module = THIS_MODULE;
2571 alg->cra_priority = TALITOS_CRA_PRIORITY;
2572 alg->cra_alignmask = 0;
2573 alg->cra_ctxsize = sizeof(struct talitos_ctx);
2574 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2581 static int talitos_probe_irq(struct platform_device *ofdev)
2583 struct device *dev = &ofdev->dev;
2584 struct device_node *np = ofdev->dev.of_node;
2585 struct talitos_private *priv = dev_get_drvdata(dev);
2588 priv->irq[0] = irq_of_parse_and_map(np, 0);
2589 if (!priv->irq[0]) {
2590 dev_err(dev, "failed to map irq\n");
2594 priv->irq[1] = irq_of_parse_and_map(np, 1);
2596 /* get the primary irq line */
2597 if (!priv->irq[1]) {
2598 err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
2599 dev_driver_string(dev), dev);
2603 err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
2604 dev_driver_string(dev), dev);
2608 /* get the secondary irq line */
2609 err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
2610 dev_driver_string(dev), dev);
2612 dev_err(dev, "failed to request secondary irq\n");
2613 irq_dispose_mapping(priv->irq[1]);
2621 dev_err(dev, "failed to request primary irq\n");
2622 irq_dispose_mapping(priv->irq[0]);
2629 static int talitos_probe(struct platform_device *ofdev)
2631 struct device *dev = &ofdev->dev;
2632 struct device_node *np = ofdev->dev.of_node;
2633 struct talitos_private *priv;
2634 const unsigned int *prop;
2637 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2641 INIT_LIST_HEAD(&priv->alg_list);
2643 dev_set_drvdata(dev, priv);
2645 priv->ofdev = ofdev;
2647 spin_lock_init(&priv->reg_lock);
2649 err = talitos_probe_irq(ofdev);
2653 if (!priv->irq[1]) {
2654 tasklet_init(&priv->done_task[0], talitos_done_4ch,
2655 (unsigned long)dev);
2657 tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
2658 (unsigned long)dev);
2659 tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
2660 (unsigned long)dev);
2663 priv->reg = of_iomap(np, 0);
2665 dev_err(dev, "failed to of_iomap\n");
2670 /* get SEC version capabilities from device tree */
2671 prop = of_get_property(np, "fsl,num-channels", NULL);
2673 priv->num_channels = *prop;
2675 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2677 priv->chfifo_len = *prop;
2679 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2681 priv->exec_units = *prop;
2683 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2685 priv->desc_types = *prop;
2687 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2688 !priv->exec_units || !priv->desc_types) {
2689 dev_err(dev, "invalid property data in device tree node\n");
2694 if (of_device_is_compatible(np, "fsl,sec3.0"))
2695 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2697 if (of_device_is_compatible(np, "fsl,sec2.1"))
2698 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2699 TALITOS_FTR_SHA224_HWINIT |
2700 TALITOS_FTR_HMAC_OK;
2702 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2703 priv->num_channels, GFP_KERNEL);
2705 dev_err(dev, "failed to allocate channel management space\n");
2710 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2712 for (i = 0; i < priv->num_channels; i++) {
2713 priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
2714 if (!priv->irq[1] || !(i & 1))
2715 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2717 spin_lock_init(&priv->chan[i].head_lock);
2718 spin_lock_init(&priv->chan[i].tail_lock);
2720 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2721 priv->fifo_len, GFP_KERNEL);
2722 if (!priv->chan[i].fifo) {
2723 dev_err(dev, "failed to allocate request fifo %d\n", i);
2728 atomic_set(&priv->chan[i].submit_count,
2729 -(priv->chfifo_len - 1));
2732 dma_set_mask(dev, DMA_BIT_MASK(36));
2734 /* reset and initialize the h/w */
2735 err = init_device(dev);
2737 dev_err(dev, "failed to initialize device\n");
2741 /* register the RNG, if available */
2742 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2743 err = talitos_register_rng(dev);
2745 dev_err(dev, "failed to register hwrng: %d\n", err);
2748 dev_info(dev, "hwrng\n");
2751 /* register crypto algorithms the device supports */
2752 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2753 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2754 struct talitos_crypto_alg *t_alg;
2757 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2758 if (IS_ERR(t_alg)) {
2759 err = PTR_ERR(t_alg);
2760 if (err == -ENOTSUPP)
2765 switch (t_alg->algt.type) {
2766 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2767 case CRYPTO_ALG_TYPE_AEAD:
2768 err = crypto_register_alg(
2769 &t_alg->algt.alg.crypto);
2770 name = t_alg->algt.alg.crypto.cra_driver_name;
2772 case CRYPTO_ALG_TYPE_AHASH:
2773 err = crypto_register_ahash(
2774 &t_alg->algt.alg.hash);
2776 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2780 dev_err(dev, "%s alg registration failed\n",
2784 list_add_tail(&t_alg->entry, &priv->alg_list);
2787 if (!list_empty(&priv->alg_list))
2788 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2789 (char *)of_get_property(np, "compatible", NULL));
2794 talitos_remove(ofdev);
2799 static const struct of_device_id talitos_match[] = {
2801 .compatible = "fsl,sec2.0",
2805 MODULE_DEVICE_TABLE(of, talitos_match);
2807 static struct platform_driver talitos_driver = {
2810 .of_match_table = talitos_match,
2812 .probe = talitos_probe,
2813 .remove = talitos_remove,
2816 module_platform_driver(talitos_driver);
2818 MODULE_LICENSE("GPL");
2819 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2820 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");