2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
63 ptr->eptr = upper_32_bits(dma_addr);
66 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len,
71 ptr->len1 = cpu_to_be16(len);
73 ptr->len = cpu_to_be16(len);
77 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
81 return be16_to_cpu(ptr->len1);
83 return be16_to_cpu(ptr->len);
86 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
93 * map virtual single (contiguous) pointer to h/w descriptor pointer
95 static void map_single_talitos_ptr(struct device *dev,
96 struct talitos_ptr *ptr,
97 unsigned short len, void *data,
98 enum dma_data_direction dir)
100 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
104 to_talitos_ptr_len(ptr, len, is_sec1);
105 to_talitos_ptr(ptr, dma_addr, is_sec1);
106 to_talitos_ptr_extent_clear(ptr, is_sec1);
110 * unmap bus single (contiguous) h/w descriptor pointer
112 static void unmap_single_talitos_ptr(struct device *dev,
113 struct talitos_ptr *ptr,
114 enum dma_data_direction dir)
116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
119 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
120 from_talitos_ptr_len(ptr, is_sec1), dir);
123 static int reset_channel(struct device *dev, int ch)
125 struct talitos_private *priv = dev_get_drvdata(dev);
126 unsigned int timeout = TALITOS_TIMEOUT;
127 bool is_sec1 = has_ftr_sec1(priv);
130 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
131 TALITOS1_CCCR_LO_RESET);
133 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
134 TALITOS1_CCCR_LO_RESET) && --timeout)
137 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
138 TALITOS2_CCCR_RESET);
140 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
141 TALITOS2_CCCR_RESET) && --timeout)
146 dev_err(dev, "failed to reset channel %d\n", ch);
150 /* set 36-bit addressing, done writeback enable and done IRQ enable */
151 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
152 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
154 /* and ICCR writeback, if available */
155 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
156 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
157 TALITOS_CCCR_LO_IWSE);
162 static int reset_device(struct device *dev)
164 struct talitos_private *priv = dev_get_drvdata(dev);
165 unsigned int timeout = TALITOS_TIMEOUT;
166 bool is_sec1 = has_ftr_sec1(priv);
167 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
169 setbits32(priv->reg + TALITOS_MCR, mcr);
171 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
176 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
177 setbits32(priv->reg + TALITOS_MCR, mcr);
181 dev_err(dev, "failed to reset device\n");
189 * Reset and initialize the device
191 static int init_device(struct device *dev)
193 struct talitos_private *priv = dev_get_drvdata(dev);
195 bool is_sec1 = has_ftr_sec1(priv);
199 * errata documentation: warning: certain SEC interrupts
200 * are not fully cleared by writing the MCR:SWR bit,
201 * set bit twice to completely reset
203 err = reset_device(dev);
207 err = reset_device(dev);
212 for (ch = 0; ch < priv->num_channels; ch++) {
213 err = reset_channel(dev, ch);
218 /* enable channel done and error interrupts */
220 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
221 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
222 /* disable parity error check in DEU (erroneous? test vect.) */
223 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
225 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
226 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
229 /* disable integrity check error interrupts (use writeback instead) */
230 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
231 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
232 TALITOS_MDEUICR_LO_ICE);
238 * talitos_submit - submits a descriptor to the device for processing
239 * @dev: the SEC device to be used
240 * @ch: the SEC device channel to be used
241 * @desc: the descriptor to be processed by the device
242 * @callback: whom to call when processing is complete
243 * @context: a handle for use by caller (optional)
245 * desc must contain valid dma-mapped (bus physical) address pointers.
246 * callback must check err and feedback in descriptor header
247 * for device processing status.
249 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
250 void (*callback)(struct device *dev,
251 struct talitos_desc *desc,
252 void *context, int error),
255 struct talitos_private *priv = dev_get_drvdata(dev);
256 struct talitos_request *request;
259 bool is_sec1 = has_ftr_sec1(priv);
261 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
263 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
264 /* h/w fifo is full */
265 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
269 head = priv->chan[ch].head;
270 request = &priv->chan[ch].fifo[head];
272 /* map descriptor and save caller data */
274 desc->hdr1 = desc->hdr;
276 request->dma_desc = dma_map_single(dev, &desc->hdr1,
280 request->dma_desc = dma_map_single(dev, desc,
284 request->callback = callback;
285 request->context = context;
287 /* increment fifo head */
288 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
291 request->desc = desc;
295 out_be32(priv->chan[ch].reg + TALITOS_FF,
296 upper_32_bits(request->dma_desc));
297 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
298 lower_32_bits(request->dma_desc));
300 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
304 EXPORT_SYMBOL(talitos_submit);
307 * process what was done, notify callback of error if not
309 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
311 struct talitos_private *priv = dev_get_drvdata(dev);
312 struct talitos_request *request, saved_req;
315 bool is_sec1 = has_ftr_sec1(priv);
317 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
319 tail = priv->chan[ch].tail;
320 while (priv->chan[ch].fifo[tail].desc) {
323 request = &priv->chan[ch].fifo[tail];
325 /* descriptors with their done bits set don't get the error */
327 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
329 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
337 dma_unmap_single(dev, request->dma_desc,
341 /* copy entries so we can call callback outside lock */
342 saved_req.desc = request->desc;
343 saved_req.callback = request->callback;
344 saved_req.context = request->context;
346 /* release request entry in fifo */
348 request->desc = NULL;
350 /* increment fifo tail */
351 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
353 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
355 atomic_dec(&priv->chan[ch].submit_count);
357 saved_req.callback(dev, saved_req.desc, saved_req.context,
359 /* channel may resume processing in single desc error case */
360 if (error && !reset_ch && status == error)
362 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
363 tail = priv->chan[ch].tail;
366 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
370 * process completed requests for channels that have done status
372 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
373 static void talitos1_done_##name(unsigned long data) \
375 struct device *dev = (struct device *)data; \
376 struct talitos_private *priv = dev_get_drvdata(dev); \
377 unsigned long flags; \
379 if (ch_done_mask & 0x10000000) \
380 flush_channel(dev, 0, 0, 0); \
381 if (priv->num_channels == 1) \
383 if (ch_done_mask & 0x40000000) \
384 flush_channel(dev, 1, 0, 0); \
385 if (ch_done_mask & 0x00010000) \
386 flush_channel(dev, 2, 0, 0); \
387 if (ch_done_mask & 0x00040000) \
388 flush_channel(dev, 3, 0, 0); \
391 /* At this point, all completed channels have been processed */ \
392 /* Unmask done interrupts for channels completed later on. */ \
393 spin_lock_irqsave(&priv->reg_lock, flags); \
394 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
395 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
396 spin_unlock_irqrestore(&priv->reg_lock, flags); \
399 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
401 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
402 static void talitos2_done_##name(unsigned long data) \
404 struct device *dev = (struct device *)data; \
405 struct talitos_private *priv = dev_get_drvdata(dev); \
406 unsigned long flags; \
408 if (ch_done_mask & 1) \
409 flush_channel(dev, 0, 0, 0); \
410 if (priv->num_channels == 1) \
412 if (ch_done_mask & (1 << 2)) \
413 flush_channel(dev, 1, 0, 0); \
414 if (ch_done_mask & (1 << 4)) \
415 flush_channel(dev, 2, 0, 0); \
416 if (ch_done_mask & (1 << 6)) \
417 flush_channel(dev, 3, 0, 0); \
420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
422 spin_lock_irqsave(&priv->reg_lock, flags); \
423 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
424 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
428 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
429 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
430 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
433 * locate current (offending) descriptor
435 static u32 current_desc_hdr(struct device *dev, int ch)
437 struct talitos_private *priv = dev_get_drvdata(dev);
441 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
442 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
445 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
449 tail = priv->chan[ch].tail;
452 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
453 iter = (iter + 1) & (priv->fifo_len - 1);
455 dev_err(dev, "couldn't locate current descriptor\n");
460 return priv->chan[ch].fifo[iter].desc->hdr;
464 * user diagnostics; report root cause of error based on execution unit status
466 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
468 struct talitos_private *priv = dev_get_drvdata(dev);
472 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
474 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
475 case DESC_HDR_SEL0_AFEU:
476 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
477 in_be32(priv->reg_afeu + TALITOS_EUISR),
478 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
480 case DESC_HDR_SEL0_DEU:
481 dev_err(dev, "DEUISR 0x%08x_%08x\n",
482 in_be32(priv->reg_deu + TALITOS_EUISR),
483 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
485 case DESC_HDR_SEL0_MDEUA:
486 case DESC_HDR_SEL0_MDEUB:
487 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
488 in_be32(priv->reg_mdeu + TALITOS_EUISR),
489 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
491 case DESC_HDR_SEL0_RNG:
492 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
493 in_be32(priv->reg_rngu + TALITOS_ISR),
494 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
496 case DESC_HDR_SEL0_PKEU:
497 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
498 in_be32(priv->reg_pkeu + TALITOS_EUISR),
499 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
501 case DESC_HDR_SEL0_AESU:
502 dev_err(dev, "AESUISR 0x%08x_%08x\n",
503 in_be32(priv->reg_aesu + TALITOS_EUISR),
504 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
506 case DESC_HDR_SEL0_CRCU:
507 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
508 in_be32(priv->reg_crcu + TALITOS_EUISR),
509 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
511 case DESC_HDR_SEL0_KEU:
512 dev_err(dev, "KEUISR 0x%08x_%08x\n",
513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
518 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
519 case DESC_HDR_SEL1_MDEUA:
520 case DESC_HDR_SEL1_MDEUB:
521 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
522 in_be32(priv->reg_mdeu + TALITOS_EUISR),
523 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
525 case DESC_HDR_SEL1_CRCU:
526 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
527 in_be32(priv->reg_crcu + TALITOS_EUISR),
528 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
532 for (i = 0; i < 8; i++)
533 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
534 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
535 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
539 * recover from error interrupts
541 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
543 struct talitos_private *priv = dev_get_drvdata(dev);
544 unsigned int timeout = TALITOS_TIMEOUT;
545 int ch, error, reset_dev = 0;
547 bool is_sec1 = has_ftr_sec1(priv);
548 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
550 for (ch = 0; ch < priv->num_channels; ch++) {
551 /* skip channels without errors */
553 /* bits 29, 31, 17, 19 */
554 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
557 if (!(isr & (1 << (ch * 2 + 1))))
563 v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
564 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
566 if (v_lo & TALITOS_CCPSR_LO_DOF) {
567 dev_err(dev, "double fetch fifo overflow error\n");
571 if (v_lo & TALITOS_CCPSR_LO_SOF) {
572 /* h/w dropped descriptor */
573 dev_err(dev, "single fetch fifo overflow error\n");
576 if (v_lo & TALITOS_CCPSR_LO_MDTE)
577 dev_err(dev, "master data transfer error\n");
578 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
579 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
580 : "s/g data length zero error\n");
581 if (v_lo & TALITOS_CCPSR_LO_FPZ)
582 dev_err(dev, is_sec1 ? "parity error\n"
583 : "fetch pointer zero error\n");
584 if (v_lo & TALITOS_CCPSR_LO_IDH)
585 dev_err(dev, "illegal descriptor header error\n");
586 if (v_lo & TALITOS_CCPSR_LO_IEU)
587 dev_err(dev, is_sec1 ? "static assignment error\n"
588 : "invalid exec unit error\n");
589 if (v_lo & TALITOS_CCPSR_LO_EU)
590 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
592 if (v_lo & TALITOS_CCPSR_LO_GB)
593 dev_err(dev, "gather boundary error\n");
594 if (v_lo & TALITOS_CCPSR_LO_GRL)
595 dev_err(dev, "gather return/length error\n");
596 if (v_lo & TALITOS_CCPSR_LO_SB)
597 dev_err(dev, "scatter boundary error\n");
598 if (v_lo & TALITOS_CCPSR_LO_SRL)
599 dev_err(dev, "scatter return/length error\n");
602 flush_channel(dev, ch, error, reset_ch);
605 reset_channel(dev, ch);
607 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
609 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
610 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
611 TALITOS2_CCCR_CONT) && --timeout)
614 dev_err(dev, "failed to restart channel %d\n",
620 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
621 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
622 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
623 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
626 dev_err(dev, "done overflow, internal time out, or "
627 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
629 /* purge request queues */
630 for (ch = 0; ch < priv->num_channels; ch++)
631 flush_channel(dev, ch, -EIO, 1);
633 /* reset and reinitialize the device */
638 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
639 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
641 struct device *dev = data; \
642 struct talitos_private *priv = dev_get_drvdata(dev); \
644 unsigned long flags; \
646 spin_lock_irqsave(&priv->reg_lock, flags); \
647 isr = in_be32(priv->reg + TALITOS_ISR); \
648 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
649 /* Acknowledge interrupt */ \
650 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
651 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
653 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
654 spin_unlock_irqrestore(&priv->reg_lock, flags); \
655 talitos_error(dev, isr & ch_err_mask, isr_lo); \
658 if (likely(isr & ch_done_mask)) { \
659 /* mask further done interrupts. */ \
660 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
661 /* done_task will unmask done interrupts at exit */ \
662 tasklet_schedule(&priv->done_task[tlet]); \
664 spin_unlock_irqrestore(&priv->reg_lock, flags); \
667 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
671 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
673 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
674 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
676 struct device *dev = data; \
677 struct talitos_private *priv = dev_get_drvdata(dev); \
679 unsigned long flags; \
681 spin_lock_irqsave(&priv->reg_lock, flags); \
682 isr = in_be32(priv->reg + TALITOS_ISR); \
683 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
684 /* Acknowledge interrupt */ \
685 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
686 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
688 if (unlikely(isr & ch_err_mask || isr_lo)) { \
689 spin_unlock_irqrestore(&priv->reg_lock, flags); \
690 talitos_error(dev, isr & ch_err_mask, isr_lo); \
693 if (likely(isr & ch_done_mask)) { \
694 /* mask further done interrupts. */ \
695 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
696 /* done_task will unmask done interrupts at exit */ \
697 tasklet_schedule(&priv->done_task[tlet]); \
699 spin_unlock_irqrestore(&priv->reg_lock, flags); \
702 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
706 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
707 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
709 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
715 static int talitos_rng_data_present(struct hwrng *rng, int wait)
717 struct device *dev = (struct device *)rng->priv;
718 struct talitos_private *priv = dev_get_drvdata(dev);
722 for (i = 0; i < 20; i++) {
723 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
724 TALITOS_RNGUSR_LO_OFL;
733 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
735 struct device *dev = (struct device *)rng->priv;
736 struct talitos_private *priv = dev_get_drvdata(dev);
738 /* rng fifo requires 64-bit accesses */
739 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
740 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
745 static int talitos_rng_init(struct hwrng *rng)
747 struct device *dev = (struct device *)rng->priv;
748 struct talitos_private *priv = dev_get_drvdata(dev);
749 unsigned int timeout = TALITOS_TIMEOUT;
751 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
752 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
753 & TALITOS_RNGUSR_LO_RD)
757 dev_err(dev, "failed to reset rng hw\n");
761 /* start generating */
762 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
767 static int talitos_register_rng(struct device *dev)
769 struct talitos_private *priv = dev_get_drvdata(dev);
771 priv->rng.name = dev_driver_string(dev),
772 priv->rng.init = talitos_rng_init,
773 priv->rng.data_present = talitos_rng_data_present,
774 priv->rng.data_read = talitos_rng_data_read,
775 priv->rng.priv = (unsigned long)dev;
777 return hwrng_register(&priv->rng);
780 static void talitos_unregister_rng(struct device *dev)
782 struct talitos_private *priv = dev_get_drvdata(dev);
784 hwrng_unregister(&priv->rng);
790 #define TALITOS_CRA_PRIORITY 3000
791 #define TALITOS_MAX_KEY_SIZE 96
792 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
797 __be32 desc_hdr_template;
798 u8 key[TALITOS_MAX_KEY_SIZE];
799 u8 iv[TALITOS_MAX_IV_LENGTH];
801 unsigned int enckeylen;
802 unsigned int authkeylen;
803 unsigned int authsize;
806 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
807 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
809 struct talitos_ahash_req_ctx {
810 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
811 unsigned int hw_context_size;
812 u8 buf[HASH_MAX_BLOCK_SIZE];
813 u8 bufnext[HASH_MAX_BLOCK_SIZE];
817 unsigned int to_hash_later;
819 struct scatterlist bufsl[2];
820 struct scatterlist *psrc;
823 static int aead_setauthsize(struct crypto_aead *authenc,
824 unsigned int authsize)
826 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
828 ctx->authsize = authsize;
833 static int aead_setkey(struct crypto_aead *authenc,
834 const u8 *key, unsigned int keylen)
836 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
837 struct crypto_authenc_keys keys;
839 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
842 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
845 memcpy(ctx->key, keys.authkey, keys.authkeylen);
846 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
848 ctx->keylen = keys.authkeylen + keys.enckeylen;
849 ctx->enckeylen = keys.enckeylen;
850 ctx->authkeylen = keys.authkeylen;
855 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
860 * talitos_edesc - s/w-extended descriptor
861 * @assoc_nents: number of segments in associated data scatterlist
862 * @src_nents: number of segments in input scatterlist
863 * @dst_nents: number of segments in output scatterlist
864 * @assoc_chained: whether assoc is chained or not
865 * @src_chained: whether src is chained or not
866 * @dst_chained: whether dst is chained or not
867 * @iv_dma: dma address of iv for checking continuity and link table
868 * @dma_len: length of dma mapped link_tbl space
869 * @dma_link_tbl: bus physical address of link_tbl
870 * @desc: h/w descriptor
871 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
873 * if decrypting (with authcheck), or either one of src_nents or dst_nents
874 * is greater than 1, an integrity check value is concatenated to the end
877 struct talitos_edesc {
886 dma_addr_t dma_link_tbl;
887 struct talitos_desc desc;
888 struct talitos_ptr link_tbl[0];
891 static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
892 unsigned int nents, enum dma_data_direction dir,
895 if (unlikely(chained))
897 dma_map_sg(dev, sg, 1, dir);
901 dma_map_sg(dev, sg, nents, dir);
905 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
906 enum dma_data_direction dir)
909 dma_unmap_sg(dev, sg, 1, dir);
914 static void talitos_sg_unmap(struct device *dev,
915 struct talitos_edesc *edesc,
916 struct scatterlist *src,
917 struct scatterlist *dst)
919 unsigned int src_nents = edesc->src_nents ? : 1;
920 unsigned int dst_nents = edesc->dst_nents ? : 1;
923 if (edesc->src_chained)
924 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
926 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
929 if (edesc->dst_chained)
930 talitos_unmap_sg_chain(dev, dst,
933 dma_unmap_sg(dev, dst, dst_nents,
937 if (edesc->src_chained)
938 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
940 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
943 static void ipsec_esp_unmap(struct device *dev,
944 struct talitos_edesc *edesc,
945 struct aead_request *areq)
947 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
948 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
949 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
950 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
952 if (edesc->assoc_chained)
953 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
954 else if (areq->assoclen)
955 /* assoc_nents counts also for IV in non-contiguous cases */
956 dma_unmap_sg(dev, areq->assoc,
957 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
960 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
963 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
968 * ipsec_esp descriptor callbacks
970 static void ipsec_esp_encrypt_done(struct device *dev,
971 struct talitos_desc *desc, void *context,
974 struct aead_request *areq = context;
975 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
976 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
977 struct talitos_edesc *edesc;
978 struct scatterlist *sg;
981 edesc = container_of(desc, struct talitos_edesc, desc);
983 ipsec_esp_unmap(dev, edesc, areq);
985 /* copy the generated ICV to dst */
986 if (edesc->dst_nents) {
987 icvdata = &edesc->link_tbl[edesc->src_nents +
988 edesc->dst_nents + 2 +
990 sg = sg_last(areq->dst, edesc->dst_nents);
991 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
992 icvdata, ctx->authsize);
997 aead_request_complete(areq, err);
1000 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1001 struct talitos_desc *desc,
1002 void *context, int err)
1004 struct aead_request *req = context;
1005 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1006 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1007 struct talitos_edesc *edesc;
1008 struct scatterlist *sg;
1011 edesc = container_of(desc, struct talitos_edesc, desc);
1013 ipsec_esp_unmap(dev, edesc, req);
1018 icvdata = &edesc->link_tbl[edesc->src_nents +
1019 edesc->dst_nents + 2 +
1020 edesc->assoc_nents];
1022 icvdata = &edesc->link_tbl[0];
1024 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1025 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
1026 ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
1031 aead_request_complete(req, err);
1034 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1035 struct talitos_desc *desc,
1036 void *context, int err)
1038 struct aead_request *req = context;
1039 struct talitos_edesc *edesc;
1041 edesc = container_of(desc, struct talitos_edesc, desc);
1043 ipsec_esp_unmap(dev, edesc, req);
1045 /* check ICV auth status */
1046 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1047 DESC_HDR_LO_ICCR1_PASS))
1052 aead_request_complete(req, err);
1056 * convert scatterlist to SEC h/w link table format
1057 * stop at cryptlen bytes
1059 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1060 int cryptlen, struct talitos_ptr *link_tbl_ptr)
1062 int n_sg = sg_count;
1065 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0);
1066 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
1067 link_tbl_ptr->j_extent = 0;
1069 cryptlen -= sg_dma_len(sg);
1073 /* adjust (decrease) last one (or two) entry's len to cryptlen */
1075 while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
1076 /* Empty this entry, and move to previous one */
1077 cryptlen += be16_to_cpu(link_tbl_ptr->len);
1078 link_tbl_ptr->len = 0;
1082 be16_add_cpu(&link_tbl_ptr->len, cryptlen);
1084 /* tag end of link table */
1085 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1091 * fill in and submit ipsec_esp descriptor
1093 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1094 u64 seq, void (*callback) (struct device *dev,
1095 struct talitos_desc *desc,
1096 void *context, int error))
1098 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1099 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1100 struct device *dev = ctx->dev;
1101 struct talitos_desc *desc = &edesc->desc;
1102 unsigned int cryptlen = areq->cryptlen;
1103 unsigned int authsize = ctx->authsize;
1104 unsigned int ivsize = crypto_aead_ivsize(aead);
1106 int sg_link_tbl_len;
1109 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1113 desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
1114 if (edesc->assoc_nents) {
1115 int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
1116 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1118 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1119 sizeof(struct talitos_ptr), 0);
1120 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1122 /* assoc_nents - 1 entries for assoc, 1 for IV */
1123 sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
1124 areq->assoclen, tbl_ptr);
1126 /* add IV to link table */
1127 tbl_ptr += sg_count - 1;
1128 tbl_ptr->j_extent = 0;
1130 to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0);
1131 tbl_ptr->len = cpu_to_be16(ivsize);
1132 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1134 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1135 edesc->dma_len, DMA_BIDIRECTIONAL);
1138 to_talitos_ptr(&desc->ptr[1],
1139 sg_dma_address(areq->assoc), 0);
1141 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0);
1142 desc->ptr[1].j_extent = 0;
1146 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
1147 desc->ptr[2].len = cpu_to_be16(ivsize);
1148 desc->ptr[2].j_extent = 0;
1149 /* Sync needed for the aead_givencrypt case */
1150 dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1153 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1154 (char *)&ctx->key + ctx->authkeylen,
1159 * map and adjust cipher len to aead request cryptlen.
1160 * extent is bytes of HMAC postpended to ciphertext,
1161 * typically 12 for ipsec
1163 desc->ptr[4].len = cpu_to_be16(cryptlen);
1164 desc->ptr[4].j_extent = authsize;
1166 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1167 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1169 edesc->src_chained);
1171 if (sg_count == 1) {
1172 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
1174 sg_link_tbl_len = cryptlen;
1176 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1177 sg_link_tbl_len = cryptlen + authsize;
1179 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1180 &edesc->link_tbl[0]);
1182 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1183 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0);
1184 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1188 /* Only one segment now, so no link tbl needed */
1189 to_talitos_ptr(&desc->ptr[4],
1190 sg_dma_address(areq->src), 0);
1195 desc->ptr[5].len = cpu_to_be16(cryptlen);
1196 desc->ptr[5].j_extent = authsize;
1198 if (areq->src != areq->dst)
1199 sg_count = talitos_map_sg(dev, areq->dst,
1200 edesc->dst_nents ? : 1,
1201 DMA_FROM_DEVICE, edesc->dst_chained);
1203 if (sg_count == 1) {
1204 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
1206 int tbl_off = edesc->src_nents + 1;
1207 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1209 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1210 tbl_off * sizeof(struct talitos_ptr), 0);
1211 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1214 /* Add an entry to the link table for ICV data */
1215 tbl_ptr += sg_count - 1;
1216 tbl_ptr->j_extent = 0;
1218 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1219 tbl_ptr->len = cpu_to_be16(authsize);
1221 /* icv data follows link tables */
1222 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1223 (tbl_off + edesc->dst_nents + 1 +
1224 edesc->assoc_nents) *
1225 sizeof(struct talitos_ptr), 0);
1226 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1227 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1228 edesc->dma_len, DMA_BIDIRECTIONAL);
1232 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1235 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1236 if (ret != -EINPROGRESS) {
1237 ipsec_esp_unmap(dev, edesc, areq);
1244 * derive number of elements in scatterlist
1246 static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
1248 struct scatterlist *sg = sg_list;
1252 while (nbytes > 0) {
1254 nbytes -= sg->length;
1255 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1264 * allocate and map the extended descriptor
1266 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1267 struct scatterlist *assoc,
1268 struct scatterlist *src,
1269 struct scatterlist *dst,
1271 unsigned int assoclen,
1272 unsigned int cryptlen,
1273 unsigned int authsize,
1274 unsigned int ivsize,
1279 struct talitos_edesc *edesc;
1280 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
1281 bool assoc_chained = false, src_chained = false, dst_chained = false;
1282 dma_addr_t iv_dma = 0;
1283 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1286 if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1287 dev_err(dev, "length exceeds h/w max limit\n");
1288 return ERR_PTR(-EINVAL);
1292 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1296 * Currently it is assumed that iv is provided whenever assoc
1301 assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
1302 talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
1304 assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
1306 if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
1307 assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1310 if (!dst || dst == src) {
1311 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1312 src_nents = (src_nents == 1) ? 0 : src_nents;
1313 dst_nents = dst ? src_nents : 0;
1314 } else { /* dst && dst != src*/
1315 src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1317 src_nents = (src_nents == 1) ? 0 : src_nents;
1318 dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1320 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1324 * allocate space for base edesc plus the link tables,
1325 * allowing for two separate entries for ICV and generated ICV (+ 2),
1326 * and the ICV data itself
1328 alloc_len = sizeof(struct talitos_edesc);
1329 if (assoc_nents || src_nents || dst_nents) {
1330 dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
1331 sizeof(struct talitos_ptr) + authsize;
1332 alloc_len += dma_len;
1335 alloc_len += icv_stashing ? authsize : 0;
1338 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1341 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1343 dma_unmap_sg(dev, assoc,
1344 assoc_nents ? assoc_nents - 1 : 1,
1348 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1350 dev_err(dev, "could not allocate edescriptor\n");
1351 return ERR_PTR(-ENOMEM);
1354 edesc->assoc_nents = assoc_nents;
1355 edesc->src_nents = src_nents;
1356 edesc->dst_nents = dst_nents;
1357 edesc->assoc_chained = assoc_chained;
1358 edesc->src_chained = src_chained;
1359 edesc->dst_chained = dst_chained;
1360 edesc->iv_dma = iv_dma;
1361 edesc->dma_len = dma_len;
1363 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1370 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1371 int icv_stashing, bool encrypt)
1373 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1374 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1375 unsigned int ivsize = crypto_aead_ivsize(authenc);
1377 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1378 iv, areq->assoclen, areq->cryptlen,
1379 ctx->authsize, ivsize, icv_stashing,
1380 areq->base.flags, encrypt);
1383 static int aead_encrypt(struct aead_request *req)
1385 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1386 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1387 struct talitos_edesc *edesc;
1389 /* allocate extended descriptor */
1390 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1392 return PTR_ERR(edesc);
1395 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1397 return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
1400 static int aead_decrypt(struct aead_request *req)
1402 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1403 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1404 unsigned int authsize = ctx->authsize;
1405 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1406 struct talitos_edesc *edesc;
1407 struct scatterlist *sg;
1410 req->cryptlen -= authsize;
1412 /* allocate extended descriptor */
1413 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1415 return PTR_ERR(edesc);
1417 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1418 ((!edesc->src_nents && !edesc->dst_nents) ||
1419 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1421 /* decrypt and check the ICV */
1422 edesc->desc.hdr = ctx->desc_hdr_template |
1423 DESC_HDR_DIR_INBOUND |
1424 DESC_HDR_MODE1_MDEU_CICV;
1426 /* reset integrity check result bits */
1427 edesc->desc.hdr_lo = 0;
1429 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
1432 /* Have to check the ICV with software */
1433 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1435 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1437 icvdata = &edesc->link_tbl[edesc->src_nents +
1438 edesc->dst_nents + 2 +
1439 edesc->assoc_nents];
1441 icvdata = &edesc->link_tbl[0];
1443 sg = sg_last(req->src, edesc->src_nents ? : 1);
1445 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1448 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
1451 static int aead_givencrypt(struct aead_givcrypt_request *req)
1453 struct aead_request *areq = &req->areq;
1454 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1455 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1456 struct talitos_edesc *edesc;
1458 /* allocate extended descriptor */
1459 edesc = aead_edesc_alloc(areq, req->giv, 0, true);
1461 return PTR_ERR(edesc);
1464 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1466 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1467 /* avoid consecutive packets going out with same IV */
1468 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1470 return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
1473 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1474 const u8 *key, unsigned int keylen)
1476 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1478 memcpy(&ctx->key, key, keylen);
1479 ctx->keylen = keylen;
1484 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1485 struct scatterlist *dst, unsigned int len,
1486 struct talitos_edesc *edesc)
1488 talitos_sg_unmap(dev, edesc, src, dst);
1491 static void common_nonsnoop_unmap(struct device *dev,
1492 struct talitos_edesc *edesc,
1493 struct ablkcipher_request *areq)
1495 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1497 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1498 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1499 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1502 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1506 static void ablkcipher_done(struct device *dev,
1507 struct talitos_desc *desc, void *context,
1510 struct ablkcipher_request *areq = context;
1511 struct talitos_edesc *edesc;
1513 edesc = container_of(desc, struct talitos_edesc, desc);
1515 common_nonsnoop_unmap(dev, edesc, areq);
1519 areq->base.complete(&areq->base, err);
1522 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1523 unsigned int len, struct talitos_edesc *edesc,
1524 enum dma_data_direction dir, struct talitos_ptr *ptr)
1527 struct talitos_private *priv = dev_get_drvdata(dev);
1528 bool is_sec1 = has_ftr_sec1(priv);
1530 to_talitos_ptr_len(ptr, len, is_sec1);
1531 to_talitos_ptr_extent_clear(ptr, is_sec1);
1533 sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
1534 edesc->src_chained);
1536 if (sg_count == 1) {
1537 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1539 sg_count = sg_to_link_tbl(src, sg_count, len,
1540 &edesc->link_tbl[0]);
1542 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1543 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1544 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1548 /* Only one segment now, so no link tbl needed */
1549 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1555 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1556 unsigned int len, struct talitos_edesc *edesc,
1557 enum dma_data_direction dir,
1558 struct talitos_ptr *ptr, int sg_count)
1560 struct talitos_private *priv = dev_get_drvdata(dev);
1561 bool is_sec1 = has_ftr_sec1(priv);
1563 to_talitos_ptr_len(ptr, len, is_sec1);
1564 to_talitos_ptr_extent_clear(ptr, is_sec1);
1566 if (dir != DMA_NONE)
1567 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
1568 dir, edesc->dst_chained);
1570 if (sg_count == 1) {
1571 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1573 struct talitos_ptr *link_tbl_ptr =
1574 &edesc->link_tbl[edesc->src_nents + 1];
1576 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1577 (edesc->src_nents + 1) *
1578 sizeof(struct talitos_ptr), 0);
1579 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1580 sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1581 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1582 edesc->dma_len, DMA_BIDIRECTIONAL);
1586 static int common_nonsnoop(struct talitos_edesc *edesc,
1587 struct ablkcipher_request *areq,
1588 void (*callback) (struct device *dev,
1589 struct talitos_desc *desc,
1590 void *context, int error))
1592 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1593 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1594 struct device *dev = ctx->dev;
1595 struct talitos_desc *desc = &edesc->desc;
1596 unsigned int cryptlen = areq->nbytes;
1597 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1599 struct talitos_private *priv = dev_get_drvdata(dev);
1600 bool is_sec1 = has_ftr_sec1(priv);
1602 /* first DWORD empty */
1603 desc->ptr[0] = zero_entry;
1606 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1607 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1608 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
1611 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1612 (char *)&ctx->key, DMA_TO_DEVICE);
1617 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1618 (areq->src == areq->dst) ?
1619 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1623 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1624 (areq->src == areq->dst) ? DMA_NONE
1626 &desc->ptr[4], sg_count);
1629 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1632 /* last DWORD empty */
1633 desc->ptr[6] = zero_entry;
1635 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1636 if (ret != -EINPROGRESS) {
1637 common_nonsnoop_unmap(dev, edesc, areq);
1643 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1646 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1647 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1648 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1650 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1651 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1652 areq->base.flags, encrypt);
1655 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1657 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1658 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1659 struct talitos_edesc *edesc;
1661 /* allocate extended descriptor */
1662 edesc = ablkcipher_edesc_alloc(areq, true);
1664 return PTR_ERR(edesc);
1667 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1669 return common_nonsnoop(edesc, areq, ablkcipher_done);
1672 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1674 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1675 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1676 struct talitos_edesc *edesc;
1678 /* allocate extended descriptor */
1679 edesc = ablkcipher_edesc_alloc(areq, false);
1681 return PTR_ERR(edesc);
1683 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1685 return common_nonsnoop(edesc, areq, ablkcipher_done);
1688 static void common_nonsnoop_hash_unmap(struct device *dev,
1689 struct talitos_edesc *edesc,
1690 struct ahash_request *areq)
1692 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1693 struct talitos_private *priv = dev_get_drvdata(dev);
1694 bool is_sec1 = has_ftr_sec1(priv);
1696 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1698 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1700 /* When using hashctx-in, must unmap it. */
1701 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1702 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1705 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1706 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1710 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1715 static void ahash_done(struct device *dev,
1716 struct talitos_desc *desc, void *context,
1719 struct ahash_request *areq = context;
1720 struct talitos_edesc *edesc =
1721 container_of(desc, struct talitos_edesc, desc);
1722 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1724 if (!req_ctx->last && req_ctx->to_hash_later) {
1725 /* Position any partial block for next update/final/finup */
1726 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1727 req_ctx->nbuf = req_ctx->to_hash_later;
1729 common_nonsnoop_hash_unmap(dev, edesc, areq);
1733 areq->base.complete(&areq->base, err);
1736 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1737 struct ahash_request *areq, unsigned int length,
1738 void (*callback) (struct device *dev,
1739 struct talitos_desc *desc,
1740 void *context, int error))
1742 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1743 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1744 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1745 struct device *dev = ctx->dev;
1746 struct talitos_desc *desc = &edesc->desc;
1748 struct talitos_private *priv = dev_get_drvdata(dev);
1749 bool is_sec1 = has_ftr_sec1(priv);
1751 /* first DWORD empty */
1752 desc->ptr[0] = zero_entry;
1754 /* hash context in */
1755 if (!req_ctx->first || req_ctx->swinit) {
1756 map_single_talitos_ptr(dev, &desc->ptr[1],
1757 req_ctx->hw_context_size,
1758 (char *)req_ctx->hw_context,
1760 req_ctx->swinit = 0;
1762 desc->ptr[1] = zero_entry;
1763 /* Indicate next op is not the first. */
1769 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1770 (char *)&ctx->key, DMA_TO_DEVICE);
1772 desc->ptr[2] = zero_entry;
1777 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1778 DMA_TO_DEVICE, &desc->ptr[3]);
1780 /* fifth DWORD empty */
1781 desc->ptr[4] = zero_entry;
1783 /* hash/HMAC out -or- hash context out */
1785 map_single_talitos_ptr(dev, &desc->ptr[5],
1786 crypto_ahash_digestsize(tfm),
1787 areq->result, DMA_FROM_DEVICE);
1789 map_single_talitos_ptr(dev, &desc->ptr[5],
1790 req_ctx->hw_context_size,
1791 req_ctx->hw_context, DMA_FROM_DEVICE);
1793 /* last DWORD empty */
1794 desc->ptr[6] = zero_entry;
1796 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1797 if (ret != -EINPROGRESS) {
1798 common_nonsnoop_hash_unmap(dev, edesc, areq);
1804 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1805 unsigned int nbytes)
1807 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1808 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1809 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1811 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
1812 nbytes, 0, 0, 0, areq->base.flags, false);
1815 static int ahash_init(struct ahash_request *areq)
1817 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1818 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1820 /* Initialize the context */
1822 req_ctx->first = 1; /* first indicates h/w must init its context */
1823 req_ctx->swinit = 0; /* assume h/w init of context */
1824 req_ctx->hw_context_size =
1825 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1826 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1827 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1833 * on h/w without explicit sha224 support, we initialize h/w context
1834 * manually with sha224 constants, and tell it to run sha256.
1836 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1838 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1841 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1843 req_ctx->hw_context[0] = SHA224_H0;
1844 req_ctx->hw_context[1] = SHA224_H1;
1845 req_ctx->hw_context[2] = SHA224_H2;
1846 req_ctx->hw_context[3] = SHA224_H3;
1847 req_ctx->hw_context[4] = SHA224_H4;
1848 req_ctx->hw_context[5] = SHA224_H5;
1849 req_ctx->hw_context[6] = SHA224_H6;
1850 req_ctx->hw_context[7] = SHA224_H7;
1852 /* init 64-bit count */
1853 req_ctx->hw_context[8] = 0;
1854 req_ctx->hw_context[9] = 0;
1859 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1861 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1862 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1863 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1864 struct talitos_edesc *edesc;
1865 unsigned int blocksize =
1866 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1867 unsigned int nbytes_to_hash;
1868 unsigned int to_hash_later;
1872 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1873 /* Buffer up to one whole block */
1874 sg_copy_to_buffer(areq->src,
1875 sg_count(areq->src, nbytes, &chained),
1876 req_ctx->buf + req_ctx->nbuf, nbytes);
1877 req_ctx->nbuf += nbytes;
1881 /* At least (blocksize + 1) bytes are available to hash */
1882 nbytes_to_hash = nbytes + req_ctx->nbuf;
1883 to_hash_later = nbytes_to_hash & (blocksize - 1);
1887 else if (to_hash_later)
1888 /* There is a partial block. Hash the full block(s) now */
1889 nbytes_to_hash -= to_hash_later;
1891 /* Keep one block buffered */
1892 nbytes_to_hash -= blocksize;
1893 to_hash_later = blocksize;
1896 /* Chain in any previously buffered data */
1897 if (req_ctx->nbuf) {
1898 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1899 sg_init_table(req_ctx->bufsl, nsg);
1900 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1902 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1903 req_ctx->psrc = req_ctx->bufsl;
1905 req_ctx->psrc = areq->src;
1907 if (to_hash_later) {
1908 int nents = sg_count(areq->src, nbytes, &chained);
1909 sg_pcopy_to_buffer(areq->src, nents,
1912 nbytes - to_hash_later);
1914 req_ctx->to_hash_later = to_hash_later;
1916 /* Allocate extended descriptor */
1917 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1919 return PTR_ERR(edesc);
1921 edesc->desc.hdr = ctx->desc_hdr_template;
1923 /* On last one, request SEC to pad; otherwise continue */
1925 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1927 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1929 /* request SEC to INIT hash. */
1930 if (req_ctx->first && !req_ctx->swinit)
1931 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1933 /* When the tfm context has a keylen, it's an HMAC.
1934 * A first or last (ie. not middle) descriptor must request HMAC.
1936 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1937 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1939 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1943 static int ahash_update(struct ahash_request *areq)
1945 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1949 return ahash_process_req(areq, areq->nbytes);
1952 static int ahash_final(struct ahash_request *areq)
1954 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1958 return ahash_process_req(areq, 0);
1961 static int ahash_finup(struct ahash_request *areq)
1963 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1967 return ahash_process_req(areq, areq->nbytes);
1970 static int ahash_digest(struct ahash_request *areq)
1972 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1973 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1978 return ahash_process_req(areq, areq->nbytes);
1981 struct keyhash_result {
1982 struct completion completion;
1986 static void keyhash_complete(struct crypto_async_request *req, int err)
1988 struct keyhash_result *res = req->data;
1990 if (err == -EINPROGRESS)
1994 complete(&res->completion);
1997 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2000 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2002 struct scatterlist sg[1];
2003 struct ahash_request *req;
2004 struct keyhash_result hresult;
2007 init_completion(&hresult.completion);
2009 req = ahash_request_alloc(tfm, GFP_KERNEL);
2013 /* Keep tfm keylen == 0 during hash of the long key */
2015 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2016 keyhash_complete, &hresult);
2018 sg_init_one(&sg[0], key, keylen);
2020 ahash_request_set_crypt(req, sg, hash, keylen);
2021 ret = crypto_ahash_digest(req);
2027 ret = wait_for_completion_interruptible(
2028 &hresult.completion);
2035 ahash_request_free(req);
2040 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2041 unsigned int keylen)
2043 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2044 unsigned int blocksize =
2045 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2046 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2047 unsigned int keysize = keylen;
2048 u8 hash[SHA512_DIGEST_SIZE];
2051 if (keylen <= blocksize)
2052 memcpy(ctx->key, key, keysize);
2054 /* Must get the hash of the long key */
2055 ret = keyhash(tfm, key, keylen, hash);
2058 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2062 keysize = digestsize;
2063 memcpy(ctx->key, hash, digestsize);
2066 ctx->keylen = keysize;
2072 struct talitos_alg_template {
2075 struct crypto_alg crypto;
2076 struct ahash_alg hash;
2078 __be32 desc_hdr_template;
2081 static struct talitos_alg_template driver_algs[] = {
2082 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2083 { .type = CRYPTO_ALG_TYPE_AEAD,
2085 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2086 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
2087 .cra_blocksize = AES_BLOCK_SIZE,
2088 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2090 .ivsize = AES_BLOCK_SIZE,
2091 .maxauthsize = SHA1_DIGEST_SIZE,
2094 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2095 DESC_HDR_SEL0_AESU |
2096 DESC_HDR_MODE0_AESU_CBC |
2097 DESC_HDR_SEL1_MDEUA |
2098 DESC_HDR_MODE1_MDEU_INIT |
2099 DESC_HDR_MODE1_MDEU_PAD |
2100 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2102 { .type = CRYPTO_ALG_TYPE_AEAD,
2104 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
2105 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
2106 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2107 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2109 .ivsize = DES3_EDE_BLOCK_SIZE,
2110 .maxauthsize = SHA1_DIGEST_SIZE,
2113 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2115 DESC_HDR_MODE0_DEU_CBC |
2116 DESC_HDR_MODE0_DEU_3DES |
2117 DESC_HDR_SEL1_MDEUA |
2118 DESC_HDR_MODE1_MDEU_INIT |
2119 DESC_HDR_MODE1_MDEU_PAD |
2120 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2122 { .type = CRYPTO_ALG_TYPE_AEAD,
2124 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2125 .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
2126 .cra_blocksize = AES_BLOCK_SIZE,
2127 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2129 .ivsize = AES_BLOCK_SIZE,
2130 .maxauthsize = SHA224_DIGEST_SIZE,
2133 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2134 DESC_HDR_SEL0_AESU |
2135 DESC_HDR_MODE0_AESU_CBC |
2136 DESC_HDR_SEL1_MDEUA |
2137 DESC_HDR_MODE1_MDEU_INIT |
2138 DESC_HDR_MODE1_MDEU_PAD |
2139 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2141 { .type = CRYPTO_ALG_TYPE_AEAD,
2143 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
2144 .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
2145 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2146 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2148 .ivsize = DES3_EDE_BLOCK_SIZE,
2149 .maxauthsize = SHA224_DIGEST_SIZE,
2152 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2154 DESC_HDR_MODE0_DEU_CBC |
2155 DESC_HDR_MODE0_DEU_3DES |
2156 DESC_HDR_SEL1_MDEUA |
2157 DESC_HDR_MODE1_MDEU_INIT |
2158 DESC_HDR_MODE1_MDEU_PAD |
2159 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2161 { .type = CRYPTO_ALG_TYPE_AEAD,
2163 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2164 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2165 .cra_blocksize = AES_BLOCK_SIZE,
2166 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2168 .ivsize = AES_BLOCK_SIZE,
2169 .maxauthsize = SHA256_DIGEST_SIZE,
2172 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2173 DESC_HDR_SEL0_AESU |
2174 DESC_HDR_MODE0_AESU_CBC |
2175 DESC_HDR_SEL1_MDEUA |
2176 DESC_HDR_MODE1_MDEU_INIT |
2177 DESC_HDR_MODE1_MDEU_PAD |
2178 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2180 { .type = CRYPTO_ALG_TYPE_AEAD,
2182 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2183 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2184 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2185 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2187 .ivsize = DES3_EDE_BLOCK_SIZE,
2188 .maxauthsize = SHA256_DIGEST_SIZE,
2191 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2193 DESC_HDR_MODE0_DEU_CBC |
2194 DESC_HDR_MODE0_DEU_3DES |
2195 DESC_HDR_SEL1_MDEUA |
2196 DESC_HDR_MODE1_MDEU_INIT |
2197 DESC_HDR_MODE1_MDEU_PAD |
2198 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2200 { .type = CRYPTO_ALG_TYPE_AEAD,
2202 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2203 .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
2204 .cra_blocksize = AES_BLOCK_SIZE,
2205 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2207 .ivsize = AES_BLOCK_SIZE,
2208 .maxauthsize = SHA384_DIGEST_SIZE,
2211 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2212 DESC_HDR_SEL0_AESU |
2213 DESC_HDR_MODE0_AESU_CBC |
2214 DESC_HDR_SEL1_MDEUB |
2215 DESC_HDR_MODE1_MDEU_INIT |
2216 DESC_HDR_MODE1_MDEU_PAD |
2217 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2219 { .type = CRYPTO_ALG_TYPE_AEAD,
2221 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
2222 .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
2223 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2224 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2226 .ivsize = DES3_EDE_BLOCK_SIZE,
2227 .maxauthsize = SHA384_DIGEST_SIZE,
2230 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2232 DESC_HDR_MODE0_DEU_CBC |
2233 DESC_HDR_MODE0_DEU_3DES |
2234 DESC_HDR_SEL1_MDEUB |
2235 DESC_HDR_MODE1_MDEU_INIT |
2236 DESC_HDR_MODE1_MDEU_PAD |
2237 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2239 { .type = CRYPTO_ALG_TYPE_AEAD,
2241 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2242 .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
2243 .cra_blocksize = AES_BLOCK_SIZE,
2244 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2246 .ivsize = AES_BLOCK_SIZE,
2247 .maxauthsize = SHA512_DIGEST_SIZE,
2250 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2251 DESC_HDR_SEL0_AESU |
2252 DESC_HDR_MODE0_AESU_CBC |
2253 DESC_HDR_SEL1_MDEUB |
2254 DESC_HDR_MODE1_MDEU_INIT |
2255 DESC_HDR_MODE1_MDEU_PAD |
2256 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2258 { .type = CRYPTO_ALG_TYPE_AEAD,
2260 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
2261 .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
2262 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2263 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2265 .ivsize = DES3_EDE_BLOCK_SIZE,
2266 .maxauthsize = SHA512_DIGEST_SIZE,
2269 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2271 DESC_HDR_MODE0_DEU_CBC |
2272 DESC_HDR_MODE0_DEU_3DES |
2273 DESC_HDR_SEL1_MDEUB |
2274 DESC_HDR_MODE1_MDEU_INIT |
2275 DESC_HDR_MODE1_MDEU_PAD |
2276 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2278 { .type = CRYPTO_ALG_TYPE_AEAD,
2280 .cra_name = "authenc(hmac(md5),cbc(aes))",
2281 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2282 .cra_blocksize = AES_BLOCK_SIZE,
2283 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2285 .ivsize = AES_BLOCK_SIZE,
2286 .maxauthsize = MD5_DIGEST_SIZE,
2289 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2290 DESC_HDR_SEL0_AESU |
2291 DESC_HDR_MODE0_AESU_CBC |
2292 DESC_HDR_SEL1_MDEUA |
2293 DESC_HDR_MODE1_MDEU_INIT |
2294 DESC_HDR_MODE1_MDEU_PAD |
2295 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2297 { .type = CRYPTO_ALG_TYPE_AEAD,
2299 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2300 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2301 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2302 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2304 .ivsize = DES3_EDE_BLOCK_SIZE,
2305 .maxauthsize = MD5_DIGEST_SIZE,
2308 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2310 DESC_HDR_MODE0_DEU_CBC |
2311 DESC_HDR_MODE0_DEU_3DES |
2312 DESC_HDR_SEL1_MDEUA |
2313 DESC_HDR_MODE1_MDEU_INIT |
2314 DESC_HDR_MODE1_MDEU_PAD |
2315 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2317 /* ABLKCIPHER algorithms. */
2318 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2320 .cra_name = "cbc(aes)",
2321 .cra_driver_name = "cbc-aes-talitos",
2322 .cra_blocksize = AES_BLOCK_SIZE,
2323 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2326 .min_keysize = AES_MIN_KEY_SIZE,
2327 .max_keysize = AES_MAX_KEY_SIZE,
2328 .ivsize = AES_BLOCK_SIZE,
2331 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2332 DESC_HDR_SEL0_AESU |
2333 DESC_HDR_MODE0_AESU_CBC,
2335 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2337 .cra_name = "cbc(des3_ede)",
2338 .cra_driver_name = "cbc-3des-talitos",
2339 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2340 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2343 .min_keysize = DES3_EDE_KEY_SIZE,
2344 .max_keysize = DES3_EDE_KEY_SIZE,
2345 .ivsize = DES3_EDE_BLOCK_SIZE,
2348 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2350 DESC_HDR_MODE0_DEU_CBC |
2351 DESC_HDR_MODE0_DEU_3DES,
2353 /* AHASH algorithms. */
2354 { .type = CRYPTO_ALG_TYPE_AHASH,
2356 .halg.digestsize = MD5_DIGEST_SIZE,
2359 .cra_driver_name = "md5-talitos",
2360 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2361 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2365 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2366 DESC_HDR_SEL0_MDEUA |
2367 DESC_HDR_MODE0_MDEU_MD5,
2369 { .type = CRYPTO_ALG_TYPE_AHASH,
2371 .halg.digestsize = SHA1_DIGEST_SIZE,
2374 .cra_driver_name = "sha1-talitos",
2375 .cra_blocksize = SHA1_BLOCK_SIZE,
2376 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2380 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2381 DESC_HDR_SEL0_MDEUA |
2382 DESC_HDR_MODE0_MDEU_SHA1,
2384 { .type = CRYPTO_ALG_TYPE_AHASH,
2386 .halg.digestsize = SHA224_DIGEST_SIZE,
2388 .cra_name = "sha224",
2389 .cra_driver_name = "sha224-talitos",
2390 .cra_blocksize = SHA224_BLOCK_SIZE,
2391 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2395 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2396 DESC_HDR_SEL0_MDEUA |
2397 DESC_HDR_MODE0_MDEU_SHA224,
2399 { .type = CRYPTO_ALG_TYPE_AHASH,
2401 .halg.digestsize = SHA256_DIGEST_SIZE,
2403 .cra_name = "sha256",
2404 .cra_driver_name = "sha256-talitos",
2405 .cra_blocksize = SHA256_BLOCK_SIZE,
2406 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2410 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2411 DESC_HDR_SEL0_MDEUA |
2412 DESC_HDR_MODE0_MDEU_SHA256,
2414 { .type = CRYPTO_ALG_TYPE_AHASH,
2416 .halg.digestsize = SHA384_DIGEST_SIZE,
2418 .cra_name = "sha384",
2419 .cra_driver_name = "sha384-talitos",
2420 .cra_blocksize = SHA384_BLOCK_SIZE,
2421 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2425 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2426 DESC_HDR_SEL0_MDEUB |
2427 DESC_HDR_MODE0_MDEUB_SHA384,
2429 { .type = CRYPTO_ALG_TYPE_AHASH,
2431 .halg.digestsize = SHA512_DIGEST_SIZE,
2433 .cra_name = "sha512",
2434 .cra_driver_name = "sha512-talitos",
2435 .cra_blocksize = SHA512_BLOCK_SIZE,
2436 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2440 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2441 DESC_HDR_SEL0_MDEUB |
2442 DESC_HDR_MODE0_MDEUB_SHA512,
2444 { .type = CRYPTO_ALG_TYPE_AHASH,
2446 .halg.digestsize = MD5_DIGEST_SIZE,
2448 .cra_name = "hmac(md5)",
2449 .cra_driver_name = "hmac-md5-talitos",
2450 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2451 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2455 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2456 DESC_HDR_SEL0_MDEUA |
2457 DESC_HDR_MODE0_MDEU_MD5,
2459 { .type = CRYPTO_ALG_TYPE_AHASH,
2461 .halg.digestsize = SHA1_DIGEST_SIZE,
2463 .cra_name = "hmac(sha1)",
2464 .cra_driver_name = "hmac-sha1-talitos",
2465 .cra_blocksize = SHA1_BLOCK_SIZE,
2466 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2470 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2471 DESC_HDR_SEL0_MDEUA |
2472 DESC_HDR_MODE0_MDEU_SHA1,
2474 { .type = CRYPTO_ALG_TYPE_AHASH,
2476 .halg.digestsize = SHA224_DIGEST_SIZE,
2478 .cra_name = "hmac(sha224)",
2479 .cra_driver_name = "hmac-sha224-talitos",
2480 .cra_blocksize = SHA224_BLOCK_SIZE,
2481 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2485 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2486 DESC_HDR_SEL0_MDEUA |
2487 DESC_HDR_MODE0_MDEU_SHA224,
2489 { .type = CRYPTO_ALG_TYPE_AHASH,
2491 .halg.digestsize = SHA256_DIGEST_SIZE,
2493 .cra_name = "hmac(sha256)",
2494 .cra_driver_name = "hmac-sha256-talitos",
2495 .cra_blocksize = SHA256_BLOCK_SIZE,
2496 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2500 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2501 DESC_HDR_SEL0_MDEUA |
2502 DESC_HDR_MODE0_MDEU_SHA256,
2504 { .type = CRYPTO_ALG_TYPE_AHASH,
2506 .halg.digestsize = SHA384_DIGEST_SIZE,
2508 .cra_name = "hmac(sha384)",
2509 .cra_driver_name = "hmac-sha384-talitos",
2510 .cra_blocksize = SHA384_BLOCK_SIZE,
2511 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2515 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2516 DESC_HDR_SEL0_MDEUB |
2517 DESC_HDR_MODE0_MDEUB_SHA384,
2519 { .type = CRYPTO_ALG_TYPE_AHASH,
2521 .halg.digestsize = SHA512_DIGEST_SIZE,
2523 .cra_name = "hmac(sha512)",
2524 .cra_driver_name = "hmac-sha512-talitos",
2525 .cra_blocksize = SHA512_BLOCK_SIZE,
2526 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2530 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2531 DESC_HDR_SEL0_MDEUB |
2532 DESC_HDR_MODE0_MDEUB_SHA512,
2536 struct talitos_crypto_alg {
2537 struct list_head entry;
2539 struct talitos_alg_template algt;
2542 static int talitos_cra_init(struct crypto_tfm *tfm)
2544 struct crypto_alg *alg = tfm->__crt_alg;
2545 struct talitos_crypto_alg *talitos_alg;
2546 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2547 struct talitos_private *priv;
2549 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2550 talitos_alg = container_of(__crypto_ahash_alg(alg),
2551 struct talitos_crypto_alg,
2554 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2557 /* update context with ptr to dev */
2558 ctx->dev = talitos_alg->dev;
2560 /* assign SEC channel to tfm in round-robin fashion */
2561 priv = dev_get_drvdata(ctx->dev);
2562 ctx->ch = atomic_inc_return(&priv->last_chan) &
2563 (priv->num_channels - 1);
2565 /* copy descriptor header template value */
2566 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2568 /* select done notification */
2569 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2574 static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2576 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2578 talitos_cra_init(tfm);
2580 /* random first IV */
2581 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2586 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2588 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2590 talitos_cra_init(tfm);
2593 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2594 sizeof(struct talitos_ahash_req_ctx));
2600 * given the alg's descriptor header template, determine whether descriptor
2601 * type and primary/secondary execution units required match the hw
2602 * capabilities description provided in the device tree node.
2604 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2606 struct talitos_private *priv = dev_get_drvdata(dev);
2609 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2610 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2612 if (SECONDARY_EU(desc_hdr_template))
2613 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2614 & priv->exec_units);
2619 static int talitos_remove(struct platform_device *ofdev)
2621 struct device *dev = &ofdev->dev;
2622 struct talitos_private *priv = dev_get_drvdata(dev);
2623 struct talitos_crypto_alg *t_alg, *n;
2626 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2627 switch (t_alg->algt.type) {
2628 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2629 case CRYPTO_ALG_TYPE_AEAD:
2630 crypto_unregister_alg(&t_alg->algt.alg.crypto);
2632 case CRYPTO_ALG_TYPE_AHASH:
2633 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2636 list_del(&t_alg->entry);
2640 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2641 talitos_unregister_rng(dev);
2643 for (i = 0; i < priv->num_channels; i++)
2644 kfree(priv->chan[i].fifo);
2648 for (i = 0; i < 2; i++)
2650 free_irq(priv->irq[i], dev);
2651 irq_dispose_mapping(priv->irq[i]);
2654 tasklet_kill(&priv->done_task[0]);
2656 tasklet_kill(&priv->done_task[1]);
2665 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2666 struct talitos_alg_template
2669 struct talitos_private *priv = dev_get_drvdata(dev);
2670 struct talitos_crypto_alg *t_alg;
2671 struct crypto_alg *alg;
2673 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2675 return ERR_PTR(-ENOMEM);
2677 t_alg->algt = *template;
2679 switch (t_alg->algt.type) {
2680 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2681 alg = &t_alg->algt.alg.crypto;
2682 alg->cra_init = talitos_cra_init;
2683 alg->cra_type = &crypto_ablkcipher_type;
2684 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2685 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2686 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2687 alg->cra_ablkcipher.geniv = "eseqiv";
2689 case CRYPTO_ALG_TYPE_AEAD:
2690 alg = &t_alg->algt.alg.crypto;
2691 alg->cra_init = talitos_cra_init_aead;
2692 alg->cra_type = &crypto_aead_type;
2693 alg->cra_aead.setkey = aead_setkey;
2694 alg->cra_aead.setauthsize = aead_setauthsize;
2695 alg->cra_aead.encrypt = aead_encrypt;
2696 alg->cra_aead.decrypt = aead_decrypt;
2697 alg->cra_aead.givencrypt = aead_givencrypt;
2698 alg->cra_aead.geniv = "<built-in>";
2700 case CRYPTO_ALG_TYPE_AHASH:
2701 alg = &t_alg->algt.alg.hash.halg.base;
2702 alg->cra_init = talitos_cra_init_ahash;
2703 alg->cra_type = &crypto_ahash_type;
2704 t_alg->algt.alg.hash.init = ahash_init;
2705 t_alg->algt.alg.hash.update = ahash_update;
2706 t_alg->algt.alg.hash.final = ahash_final;
2707 t_alg->algt.alg.hash.finup = ahash_finup;
2708 t_alg->algt.alg.hash.digest = ahash_digest;
2709 t_alg->algt.alg.hash.setkey = ahash_setkey;
2711 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2712 !strncmp(alg->cra_name, "hmac", 4)) {
2714 return ERR_PTR(-ENOTSUPP);
2716 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2717 (!strcmp(alg->cra_name, "sha224") ||
2718 !strcmp(alg->cra_name, "hmac(sha224)"))) {
2719 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2720 t_alg->algt.desc_hdr_template =
2721 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2722 DESC_HDR_SEL0_MDEUA |
2723 DESC_HDR_MODE0_MDEU_SHA256;
2727 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2728 return ERR_PTR(-EINVAL);
2731 alg->cra_module = THIS_MODULE;
2732 alg->cra_priority = TALITOS_CRA_PRIORITY;
2733 alg->cra_alignmask = 0;
2734 alg->cra_ctxsize = sizeof(struct talitos_ctx);
2735 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2742 static int talitos_probe_irq(struct platform_device *ofdev)
2744 struct device *dev = &ofdev->dev;
2745 struct device_node *np = ofdev->dev.of_node;
2746 struct talitos_private *priv = dev_get_drvdata(dev);
2748 bool is_sec1 = has_ftr_sec1(priv);
2750 priv->irq[0] = irq_of_parse_and_map(np, 0);
2751 if (!priv->irq[0]) {
2752 dev_err(dev, "failed to map irq\n");
2756 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2757 dev_driver_string(dev), dev);
2761 priv->irq[1] = irq_of_parse_and_map(np, 1);
2763 /* get the primary irq line */
2764 if (!priv->irq[1]) {
2765 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
2766 dev_driver_string(dev), dev);
2770 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
2771 dev_driver_string(dev), dev);
2775 /* get the secondary irq line */
2776 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
2777 dev_driver_string(dev), dev);
2779 dev_err(dev, "failed to request secondary irq\n");
2780 irq_dispose_mapping(priv->irq[1]);
2788 dev_err(dev, "failed to request primary irq\n");
2789 irq_dispose_mapping(priv->irq[0]);
2796 static int talitos_probe(struct platform_device *ofdev)
2798 struct device *dev = &ofdev->dev;
2799 struct device_node *np = ofdev->dev.of_node;
2800 struct talitos_private *priv;
2801 const unsigned int *prop;
2805 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2809 INIT_LIST_HEAD(&priv->alg_list);
2811 dev_set_drvdata(dev, priv);
2813 priv->ofdev = ofdev;
2815 spin_lock_init(&priv->reg_lock);
2817 priv->reg = of_iomap(np, 0);
2819 dev_err(dev, "failed to of_iomap\n");
2824 /* get SEC version capabilities from device tree */
2825 prop = of_get_property(np, "fsl,num-channels", NULL);
2827 priv->num_channels = *prop;
2829 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2831 priv->chfifo_len = *prop;
2833 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2835 priv->exec_units = *prop;
2837 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2839 priv->desc_types = *prop;
2841 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2842 !priv->exec_units || !priv->desc_types) {
2843 dev_err(dev, "invalid property data in device tree node\n");
2848 if (of_device_is_compatible(np, "fsl,sec3.0"))
2849 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2851 if (of_device_is_compatible(np, "fsl,sec2.1"))
2852 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2853 TALITOS_FTR_SHA224_HWINIT |
2854 TALITOS_FTR_HMAC_OK;
2856 if (of_device_is_compatible(np, "fsl,sec1.0"))
2857 priv->features |= TALITOS_FTR_SEC1;
2859 if (of_device_is_compatible(np, "fsl,sec1.2")) {
2860 priv->reg_deu = priv->reg + TALITOS12_DEU;
2861 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2862 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2863 stride = TALITOS1_CH_STRIDE;
2864 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2865 priv->reg_deu = priv->reg + TALITOS10_DEU;
2866 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2867 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2868 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2869 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2870 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2871 stride = TALITOS1_CH_STRIDE;
2873 priv->reg_deu = priv->reg + TALITOS2_DEU;
2874 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2875 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2876 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2877 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2878 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2879 priv->reg_keu = priv->reg + TALITOS2_KEU;
2880 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2881 stride = TALITOS2_CH_STRIDE;
2884 err = talitos_probe_irq(ofdev);
2888 if (of_device_is_compatible(np, "fsl,sec1.0")) {
2889 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2890 (unsigned long)dev);
2892 if (!priv->irq[1]) {
2893 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2894 (unsigned long)dev);
2896 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2897 (unsigned long)dev);
2898 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2899 (unsigned long)dev);
2903 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2904 priv->num_channels, GFP_KERNEL);
2906 dev_err(dev, "failed to allocate channel management space\n");
2911 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2913 for (i = 0; i < priv->num_channels; i++) {
2914 priv->chan[i].reg = priv->reg + stride * (i + 1);
2915 if (!priv->irq[1] || !(i & 1))
2916 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2918 spin_lock_init(&priv->chan[i].head_lock);
2919 spin_lock_init(&priv->chan[i].tail_lock);
2921 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2922 priv->fifo_len, GFP_KERNEL);
2923 if (!priv->chan[i].fifo) {
2924 dev_err(dev, "failed to allocate request fifo %d\n", i);
2929 atomic_set(&priv->chan[i].submit_count,
2930 -(priv->chfifo_len - 1));
2933 dma_set_mask(dev, DMA_BIT_MASK(36));
2935 /* reset and initialize the h/w */
2936 err = init_device(dev);
2938 dev_err(dev, "failed to initialize device\n");
2942 /* register the RNG, if available */
2943 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2944 err = talitos_register_rng(dev);
2946 dev_err(dev, "failed to register hwrng: %d\n", err);
2949 dev_info(dev, "hwrng\n");
2952 /* register crypto algorithms the device supports */
2953 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2954 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2955 struct talitos_crypto_alg *t_alg;
2958 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2959 if (IS_ERR(t_alg)) {
2960 err = PTR_ERR(t_alg);
2961 if (err == -ENOTSUPP)
2966 switch (t_alg->algt.type) {
2967 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2968 case CRYPTO_ALG_TYPE_AEAD:
2969 err = crypto_register_alg(
2970 &t_alg->algt.alg.crypto);
2971 name = t_alg->algt.alg.crypto.cra_driver_name;
2973 case CRYPTO_ALG_TYPE_AHASH:
2974 err = crypto_register_ahash(
2975 &t_alg->algt.alg.hash);
2977 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2981 dev_err(dev, "%s alg registration failed\n",
2985 list_add_tail(&t_alg->entry, &priv->alg_list);
2988 if (!list_empty(&priv->alg_list))
2989 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2990 (char *)of_get_property(np, "compatible", NULL));
2995 talitos_remove(ofdev);
3000 static const struct of_device_id talitos_match[] = {
3002 .compatible = "fsl,sec2.0",
3006 MODULE_DEVICE_TABLE(of, talitos_match);
3008 static struct platform_driver talitos_driver = {
3011 .of_match_table = talitos_match,
3013 .probe = talitos_probe,
3014 .remove = talitos_remove,
3017 module_platform_driver(talitos_driver);
3019 MODULE_LICENSE("GPL");
3020 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3021 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");